├── Makefile ├── server.go ├── middleware ├── middleware.go ├── recoverer.go ├── timeout.go ├── request_id.go └── logger.go ├── scripts ├── test.sh └── lint.sh ├── .gitignore ├── request_test.go ├── tracer_test.go ├── serializer.go ├── examples ├── crawler │ ├── task │ │ └── crawl.go │ ├── README.md │ ├── parser │ │ └── parser.go │ ├── main.go │ └── handler │ │ └── crawl.go ├── producer │ └── producer.go ├── custom-broker │ └── main.go ├── worker │ └── worker.go ├── sentry │ └── sentry.go ├── rpc │ └── main.go └── main.go ├── serializer_json.go ├── utils_test.go ├── errors.go ├── contrib ├── sentry │ └── sentry.go └── rpc │ ├── Makefile │ ├── helpers.go │ ├── server.go │ ├── handler.go │ ├── proto │ ├── bokchoy.proto │ └── bokchoy.pb.go │ └── client.go ├── bokchoy_test.go ├── constants.go ├── .github └── workflows │ └── go.yml ├── broker_test.go ├── handler.go ├── tracer.go ├── request.go ├── task_test.go ├── LICENSE ├── config.go ├── suite_test.go ├── go.mod ├── broker.go ├── tests └── lua │ └── main.go ├── context.go ├── terminal.go ├── utils.go ├── queue_test.go ├── logging └── logging.go ├── consumer_test.go ├── options.go ├── consumer.go ├── bokchoy.go ├── task.go ├── queue.go ├── README.md ├── broker_redis.go └── docs └── a-tour-of-bokchoy.md /Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | scripts/test.sh 3 | 4 | lint: 5 | scripts/lint.sh 6 | 7 | run-cover: 8 | go tool cover -html=coverage.out 9 | -------------------------------------------------------------------------------- /server.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import "context" 4 | 5 | type Server interface { 6 | Start(context.Context) error 7 | Stop(context.Context) 8 | } 9 | -------------------------------------------------------------------------------- /middleware/middleware.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | type contextKey struct { 4 | name string 5 | } 6 | 7 | func (k *contextKey) String() string { 8 | return "bokchoy/middleware context value " + k.name 9 | } 10 | -------------------------------------------------------------------------------- /scripts/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | export GO111MODULE=on 4 | export REDIS_PORT=${REDIS_PORT:-"6379"} 5 | export REDIS_HOST=${REDIS_HOST:-"localhost"} 6 | 7 | 8 | time go test -cover -coverprofile=coverage.out -v -p 1 9 | 10 | exit $? 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | -------------------------------------------------------------------------------- /request_test.go: -------------------------------------------------------------------------------- 1 | package bokchoy_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | "github.com/thoas/bokchoy" 9 | ) 10 | 11 | func TestRequest_String(t *testing.T) { 12 | is := assert.New(t) 13 | 14 | req := &bokchoy.Request{Task: &bokchoy.Task{}} 15 | is.NotZero(req.String()) 16 | } 17 | -------------------------------------------------------------------------------- /tracer_test.go: -------------------------------------------------------------------------------- 1 | package bokchoy_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | 8 | "github.com/thoas/bokchoy" 9 | ) 10 | 11 | func TestTracer_Error(t *testing.T) { 12 | var ( 13 | ctx = context.Background() 14 | err = fmt.Errorf("Unexpected error") 15 | ) 16 | 17 | bokchoy.DefaultTracer.Log(ctx, "An error has occured", err) 18 | } 19 | -------------------------------------------------------------------------------- /serializer.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | // Serializer defines an interface to implement a serializer. 4 | type Serializer interface { 5 | Dumps(interface{}) ([]byte, error) 6 | Loads([]byte, interface{}) error 7 | } 8 | 9 | // newSerializer initializes a new Serializer. 10 | func newSerializer(cfg SerializerConfig) Serializer { 11 | switch cfg.Type { 12 | default: 13 | return JSONSerializer{} 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /examples/crawler/task/crawl.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import "fmt" 4 | 5 | // Crawl defines a crawl. 6 | type Crawl struct { 7 | BaseURL string `json:url` 8 | URL string `json:"url"` 9 | Depth int `json:"depth"` 10 | } 11 | 12 | // Strings returns string representation of a crawl. 13 | func (c Crawl) String() string { 14 | return fmt.Sprintf( 15 | "", 16 | c.URL, c.Depth) 17 | } 18 | -------------------------------------------------------------------------------- /scripts/lint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export GO111MODULE=on 4 | 5 | set -eo pipefail 6 | 7 | linter_path="${GOPATH}/bin/golangci-lint" 8 | 9 | if [[ ! -x "${linter_path}" ]]; then 10 | go get -u github.com/golangci/golangci-lint/cmd/golangci-lint 11 | fi 12 | 13 | SOURCE_DIRECTORY=$(dirname "${BASH_SOURCE[0]}") 14 | cd "${SOURCE_DIRECTORY}/.." 15 | 16 | if [[ -n $1 ]]; then 17 | golangci-lint run "$1" 18 | else 19 | golangci-lint run ./... 20 | fi 21 | 22 | -------------------------------------------------------------------------------- /serializer_json.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import "encoding/json" 4 | 5 | type JSONSerializer struct { 6 | } 7 | 8 | func (s JSONSerializer) Dumps(v interface{}) ([]byte, error) { 9 | return json.Marshal(v) 10 | } 11 | 12 | func (s JSONSerializer) Loads(data []byte, v interface{}) error { 13 | return json.Unmarshal(data, v) 14 | } 15 | 16 | func (s JSONSerializer) String() string { 17 | return "json" 18 | } 19 | 20 | var _ Serializer = (*JSONSerializer)(nil) 21 | -------------------------------------------------------------------------------- /utils_test.go: -------------------------------------------------------------------------------- 1 | package bokchoy_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/thoas/bokchoy" 8 | "github.com/thoas/go-funk" 9 | ) 10 | 11 | func TestUtils_ID(t *testing.T) { 12 | is := assert.New(t) 13 | 14 | iteration := 1000 15 | 16 | ids := make([]string, iteration) 17 | 18 | for i := 0; i < iteration; i++ { 19 | ids[i] = bokchoy.ID() 20 | } 21 | 22 | is.Len(funk.UniqString(ids), iteration) 23 | } 24 | -------------------------------------------------------------------------------- /examples/crawler/README.md: -------------------------------------------------------------------------------- 1 | # crawler 2 | 3 | This package contains a basic crawler built on top of Bokchoy. 4 | 5 | ## Usage 6 | 7 | ### Producer 8 | 9 | ```console 10 | go run main.go -run producer -url https://golang.org/ 11 | ``` 12 | 13 | By default the depth is `1`, you can override it with `-depth` flag. 14 | 15 | ### Worker 16 | 17 | ```console 18 | go run main.go -run worker 19 | ``` 20 | 21 | By default the concurrency is `1`, you can override it with `-concurrency` flag. 22 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import "fmt" 4 | 5 | var ( 6 | // ErrAttributeError is returned when an attribute is not found. 7 | ErrAttributeError = fmt.Errorf("Attribute error") 8 | 9 | // ErrTaskCanceled is returned when a task is canceled. 10 | ErrTaskCanceled = fmt.Errorf("Task canceled") 11 | 12 | // ErrTaskNotFound is returned when a task is not found. 13 | ErrTaskNotFound = fmt.Errorf("Task not found") 14 | 15 | // ErrNoQueueToRun is returned when no queue has been found to run. 16 | ErrNoQueueToRun = fmt.Errorf("No queue to run") 17 | ) 18 | -------------------------------------------------------------------------------- /contrib/sentry/sentry.go: -------------------------------------------------------------------------------- 1 | package sentry 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/getsentry/sentry-go" 7 | 8 | "github.com/thoas/bokchoy" 9 | ) 10 | 11 | type SentryTracer struct { 12 | } 13 | 14 | func (s SentryTracer) String() string { 15 | return "sentry" 16 | } 17 | 18 | func (s *SentryTracer) Log(ctx context.Context, msg string, err error) { 19 | sentry.WithScope(func(scope *sentry.Scope) { 20 | task := bokchoy.GetContextTask(ctx) 21 | 22 | scope.SetContext("msg", msg) 23 | scope.SetContext("task", task) 24 | 25 | sentry.CaptureException(err) 26 | }) 27 | } 28 | -------------------------------------------------------------------------------- /bokchoy_test.go: -------------------------------------------------------------------------------- 1 | package bokchoy_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestBokchoy_Queue(t *testing.T) { 11 | run(t, func(t *testing.T, s *suite) { 12 | is := assert.New(t) 13 | queue := s.bokchoy.Queue("tests.task.message") 14 | is.NotZero(queue) 15 | is.Equal(queue.Name(), "tests.task.message") 16 | }) 17 | } 18 | 19 | func TestBokchoy_Flush(t *testing.T) { 20 | run(t, func(t *testing.T, s *suite) { 21 | is := assert.New(t) 22 | err := s.bokchoy.Flush(context.Background()) 23 | is.NoError(err) 24 | }) 25 | } 26 | -------------------------------------------------------------------------------- /contrib/rpc/Makefile: -------------------------------------------------------------------------------- 1 | install: 2 | go get -u google.golang.org/grpc 3 | go get -u github.com/golang/protobuf/protoc-gen-go 4 | go get github.com/gogo/protobuf/protoc-gen-gofast 5 | 6 | generate: 7 | rm -rf proto/*.go 8 | protoc \ 9 | -I proto \ 10 | -I=$$GOPATH/src \ 11 | -I=$$GOPATH/src/github.com/gogo/protobuf/protobuf \ 12 | --gogo_out=\ 13 | Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,\ 14 | Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,\ 15 | Mgoogle/protobuf/wrappers.proto=github.com/golang/protobuf/ptypes/wrappers,\ 16 | plugins=grpc:proto \ 17 | proto/bokchoy.proto 18 | -------------------------------------------------------------------------------- /constants.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import "time" 4 | 5 | const ( 6 | logo = ` 7 | _ _ _ 8 | | |__ ___ | | _____| |__ ___ _ _ 9 | | '_ \ / _ \| |/ / __| '_ \ / _ \| | | | 10 | | |_) | (_) | < (__| | | | (_) | |_| | 11 | |_.__/ \___/|_|\_\___|_| |_|\___/ \__, | 12 | |___/ ` 13 | defaultTimeout = 180 * time.Second 14 | defaultConcurrency = 1 15 | defaultMaxRetries = 3 16 | defaultTTL = 180 * time.Second 17 | 18 | Version = "v0.2.0" 19 | ) 20 | 21 | var defaultRetryIntervals = []time.Duration{ 22 | 60 * time.Second, 23 | 120 * time.Second, 24 | 180 * time.Second, 25 | } 26 | -------------------------------------------------------------------------------- /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | 11 | build: 12 | runs-on: ubuntu-latest 13 | container: golang:1.21 14 | services: 15 | # Label used to access the service container 16 | redis: 17 | # Docker Hub image 18 | image: redis 19 | # Set health checks to wait until redis has started 20 | options: >- 21 | --health-cmd "redis-cli ping" 22 | --health-interval 10s 23 | --health-timeout 5s 24 | --health-retries 5 25 | steps: 26 | - uses: actions/checkout@v2 27 | 28 | - name: Test 29 | run: make test 30 | env: 31 | REDIS_HOST: redis 32 | REDIS_PORT: 6379 33 | -------------------------------------------------------------------------------- /broker_test.go: -------------------------------------------------------------------------------- 1 | package bokchoy_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "github.com/thoas/bokchoy" 9 | ) 10 | 11 | func TestBroker_Redis(t *testing.T) { 12 | is := assert.New(t) 13 | ctx := context.Background() 14 | 15 | _, err := bokchoy.New(ctx, bokchoy.Config{ 16 | Broker: bokchoy.BrokerConfig{ 17 | Type: "redis", 18 | Redis: bokchoy.RedisConfig{ 19 | Type: "sentinel", 20 | }, 21 | }, 22 | }, bokchoy.WithInitialize(false)) 23 | is.NoError(err) 24 | 25 | _, err = bokchoy.New(ctx, bokchoy.Config{ 26 | Broker: bokchoy.BrokerConfig{ 27 | Type: "redis", 28 | Redis: bokchoy.RedisConfig{ 29 | Type: "cluster", 30 | }, 31 | }, 32 | }, bokchoy.WithInitialize(false)) 33 | 34 | is.NoError(err) 35 | } 36 | -------------------------------------------------------------------------------- /handler.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | // HandlerFunc is a handler to handle incoming tasks. 4 | type HandlerFunc func(*Request) error 5 | 6 | // Handle consumes the request. 7 | func (s HandlerFunc) Handle(r *Request) error { 8 | return s(r) 9 | } 10 | 11 | // Handler is an interface to implement a task handler. 12 | type Handler interface { 13 | Handle(*Request) error 14 | } 15 | 16 | func chain(middlewares []func(Handler) Handler, sub Handler) Handler { 17 | // Return ahead of time if there aren't any middlewares for the chain 18 | if len(middlewares) == 0 { 19 | return sub 20 | } 21 | 22 | // Wrap the end handler with the middleware chain 23 | h := middlewares[len(middlewares)-1](sub) 24 | for i := len(middlewares) - 2; i >= 0; i-- { 25 | h = middlewares[i](h) 26 | } 27 | 28 | return h 29 | } 30 | -------------------------------------------------------------------------------- /contrib/rpc/helpers.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/thoas/bokchoy" 7 | "github.com/thoas/bokchoy/contrib/rpc/proto" 8 | ) 9 | 10 | func TaskToProto(task *bokchoy.Task) *proto.Task { 11 | pb := &proto.Task{ 12 | ID: task.ID, 13 | Name: task.Name, 14 | Status: int64(task.Status), 15 | MaxRetries: int64(task.MaxRetries), 16 | TTL: &task.TTL, 17 | ETA: &task.ETA, 18 | Timeout: &task.Timeout, 19 | PublishedAt: &task.PublishedAt, 20 | ProcessedAt: &task.ProcessedAt, 21 | StartedAt: &task.StartedAt, 22 | } 23 | 24 | retryIntervals := make([]*time.Duration, len(task.RetryIntervals)) 25 | for i := range task.RetryIntervals { 26 | retryIntervals[i] = &task.RetryIntervals[i] 27 | } 28 | 29 | pb.RetryIntervals = retryIntervals 30 | 31 | return pb 32 | } 33 | -------------------------------------------------------------------------------- /tracer.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/thoas/bokchoy/logging" 7 | ) 8 | 9 | // DefaultTracer is the default tracer. 10 | var DefaultTracer = NewLoggerTracer(logging.DefaultLogger) 11 | 12 | // Tracer is a component used to trace errors. 13 | type Tracer interface { 14 | Log(context.Context, string, error) 15 | } 16 | 17 | // NewLoggerTracer initializes a new Tracer instance. 18 | func NewLoggerTracer(logger logging.Logger) Tracer { 19 | return &loggerTracer{logger} 20 | } 21 | 22 | type loggerTracer struct { 23 | logger logging.Logger 24 | } 25 | 26 | func (t loggerTracer) String() string { 27 | return "logger" 28 | } 29 | 30 | // Log logs the error. 31 | func (t loggerTracer) Log(ctx context.Context, msg string, err error) { 32 | t.logger.Error(ctx, msg, logging.Error(err)) 33 | } 34 | 35 | var _ Tracer = (*loggerTracer)(nil) 36 | -------------------------------------------------------------------------------- /request.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | ) 7 | 8 | // Request is the bokchoy Request which will be handled 9 | // by a subscriber handler. 10 | type Request struct { 11 | ctx context.Context 12 | Task *Task 13 | } 14 | 15 | // Context returns the context attached to the Request. 16 | func (r *Request) Context() context.Context { 17 | if r.ctx != nil { 18 | return r.ctx 19 | } 20 | return context.Background() 21 | } 22 | 23 | // WithContext creates a new Request with a context 24 | func (r *Request) WithContext(ctx context.Context) *Request { 25 | if ctx == nil { 26 | panic("nil context") 27 | } 28 | r2 := new(Request) 29 | *r2 = *r 30 | r2.ctx = ctx 31 | 32 | return r2 33 | } 34 | 35 | // String returns a string representation of a Request 36 | func (r Request) String() string { 37 | return fmt.Sprintf( 38 | "", 39 | r.Task, 40 | ) 41 | } 42 | -------------------------------------------------------------------------------- /examples/producer/producer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "time" 8 | 9 | "github.com/thoas/bokchoy" 10 | ) 11 | 12 | func main() { 13 | ctx := context.Background() 14 | 15 | // define the main engine which will manage queues 16 | engine, err := bokchoy.New(ctx, bokchoy.Config{ 17 | Broker: bokchoy.BrokerConfig{ 18 | Type: "redis", 19 | Redis: bokchoy.RedisConfig{ 20 | Type: "client", 21 | Client: bokchoy.RedisClientConfig{ 22 | Addr: "localhost:6379", 23 | }, 24 | }, 25 | }, 26 | }) 27 | if err != nil { 28 | log.Fatal(err) 29 | } 30 | 31 | payload := map[string]string{ 32 | "data": "hello world", 33 | } 34 | 35 | task, err := engine.Queue("tasks.message").Publish(ctx, payload, 36 | bokchoy.WithTimeout(1*time.Second), bokchoy.WithCountdown(-1)) 37 | if err != nil { 38 | log.Fatal(err) 39 | } 40 | 41 | fmt.Println(task, "has been published") 42 | } 43 | -------------------------------------------------------------------------------- /examples/custom-broker/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "time" 8 | 9 | "github.com/redis/go-redis/v9" 10 | 11 | "github.com/thoas/bokchoy" 12 | "github.com/thoas/bokchoy/logging" 13 | ) 14 | 15 | func main() { 16 | ctx := context.Background() 17 | 18 | logger := logging.NewNopLogger() 19 | 20 | clt := redis.NewClient(&redis.Options{ 21 | Addr: "localhost:6379", 22 | }) 23 | 24 | // define a new Redis broker with the 'tasks' prefix 25 | bkr := bokchoy.NewRedisBroker(clt, "client", "tasks", logger) 26 | 27 | // define the main engine which will manage queues 28 | engine, err := bokchoy.New(ctx, bokchoy.Config{}, bokchoy.WithBroker(bkr)) 29 | if err != nil { 30 | log.Fatal(err) 31 | } 32 | 33 | payload := map[string]string{ 34 | "data": "hello world", 35 | } 36 | 37 | task, err := engine.Queue("tasks.message").Publish(ctx, payload, 38 | bokchoy.WithTimeout(1*time.Second), bokchoy.WithCountdown(-1)) 39 | if err != nil { 40 | log.Fatal(err) 41 | } 42 | 43 | fmt.Println(task, "has been published") 44 | } 45 | -------------------------------------------------------------------------------- /task_test.go: -------------------------------------------------------------------------------- 1 | package bokchoy_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/thoas/bokchoy" 8 | ) 9 | 10 | func TestTask_Serialize(t *testing.T) { 11 | is := assert.New(t) 12 | task := bokchoy.NewTask("task.message", "hello") 13 | is.NotZero(task) 14 | 15 | serializer := bokchoy.JSONSerializer{} 16 | 17 | results, err := task.Serialize(serializer) 18 | is.Nil(err) 19 | is.NotZero(results) 20 | 21 | is.True(task.IsStatusWaiting()) 22 | is.Equal(task.Name, "task.message") 23 | is.NotZero(task.Payload) 24 | is.NotZero(task.PublishedAt) 25 | 26 | task2, err := bokchoy.TaskFromPayload(results, serializer) 27 | 28 | is.Nil(err) 29 | is.NotZero(task2) 30 | } 31 | 32 | func TestTask_Finished(t *testing.T) { 33 | is := assert.New(t) 34 | task := bokchoy.NewTask("task.message", "hello") 35 | is.False(task.Finished()) 36 | task.MarkAsSucceeded() 37 | is.True(task.Finished()) 38 | task.MarkAsFailed(nil) 39 | task.MaxRetries = 0 40 | is.True(task.Finished()) 41 | task.MaxRetries = 3 42 | is.False(task.Finished()) 43 | } 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Florent Messa 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /middleware/recoverer.go: -------------------------------------------------------------------------------- 1 | // inspired from https://github.com/go-chi/chi/blob/master/middleware/recoverer.go 2 | 3 | package middleware 4 | 5 | import ( 6 | "fmt" 7 | "os" 8 | "runtime/debug" 9 | 10 | "github.com/pkg/errors" 11 | "github.com/thoas/bokchoy" 12 | ) 13 | 14 | // Recoverer is a middleware that recovers from panics, logs the panic (and a 15 | // backtrace), and adds the error to the request context 16 | func Recoverer(next bokchoy.Handler) bokchoy.Handler { 17 | return bokchoy.HandlerFunc(func(r *bokchoy.Request) error { 18 | var err error 19 | 20 | defer func() { 21 | if rvr := recover(); rvr != nil { 22 | logEntry := GetLogEntry(r) 23 | if logEntry != nil { 24 | logEntry.Panic(rvr, debug.Stack()) 25 | } else { 26 | fmt.Fprintf(os.Stderr, "Panic: %+v\n", rvr) 27 | debug.PrintStack() 28 | } 29 | 30 | var ok bool 31 | if err, ok = rvr.(error); !ok { 32 | err = fmt.Errorf("%v", rvr) 33 | } 34 | 35 | ctx := bokchoy.WithContextError(r.Context(), errors.WithStack(err)) 36 | 37 | *r = *r.WithContext(ctx) 38 | } 39 | }() 40 | 41 | err = next.Handle(r) 42 | 43 | return err 44 | }) 45 | } 46 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import "github.com/redis/go-redis/v9" 4 | 5 | // RedisClusterConfig contains the redis cluster configuration. 6 | type RedisClusterConfig redis.ClusterOptions 7 | 8 | // RedisClientConfig contains the redis client configuration. 9 | type RedisClientConfig redis.Options 10 | 11 | // RedisSentinelConfig contains the redis sentinel configuration. 12 | type RedisSentinelConfig redis.FailoverOptions 13 | 14 | // RedisConfig contains all redis configuration: client, sentinel (failover), cluster. 15 | type RedisConfig struct { 16 | Type string 17 | Prefix string 18 | Client RedisClientConfig 19 | Cluster RedisClusterConfig 20 | Sentinel RedisSentinelConfig 21 | } 22 | 23 | // QueueConfig contains queue information that should be initialized. 24 | type QueueConfig struct { 25 | Name string 26 | } 27 | 28 | // BrokerConfig contains the broker configuration. 29 | type BrokerConfig struct { 30 | Type string 31 | Redis RedisConfig 32 | } 33 | 34 | // Config contains the main configuration to initialize Bokchoy. 35 | type Config struct { 36 | Queues []QueueConfig 37 | Broker BrokerConfig 38 | Serializer SerializerConfig 39 | } 40 | 41 | // SerializerConfig contains a serializer configuration to store tasks. 42 | type SerializerConfig struct { 43 | Type string 44 | } 45 | -------------------------------------------------------------------------------- /middleware/timeout.go: -------------------------------------------------------------------------------- 1 | // inspired from https://github.com/go-chi/chi/blob/master/middleware/timeout.go 2 | 3 | package middleware 4 | 5 | import ( 6 | "context" 7 | "time" 8 | 9 | "github.com/thoas/bokchoy" 10 | ) 11 | 12 | // Timeout is a middleware that cancels ctx after a given timeout and return 13 | // 14 | // It's required that you select the ctx.Done() channel to check for the signal 15 | // if the context has reached its deadline and return, otherwise the timeout 16 | // signal will be just ignored. 17 | // 18 | // ie. a handler may look like: 19 | // 20 | // queue.HandlerFunc(func(r *bokchoy.Request) { 21 | // ctx := r.Context() 22 | // processTime := time.Duration(rand.Intn(4)+1) * time.Second 23 | // 24 | // select { 25 | // case <-ctx.Done(): 26 | // return 27 | // 28 | // case <-time.After(processTime): 29 | // // The above channel simulates some hard work. 30 | // } 31 | // 32 | // return nil 33 | // }) 34 | // 35 | func Timeout(timeout time.Duration) func(next bokchoy.Handler) bokchoy.Handler { 36 | return func(next bokchoy.Handler) bokchoy.Handler { 37 | fn := func(r *bokchoy.Request) error { 38 | ctx, cancel := context.WithTimeout(r.Context(), timeout) 39 | var err error 40 | 41 | defer func() { 42 | cancel() 43 | err = ctx.Err() 44 | }() 45 | 46 | err = next.Handle(r.WithContext(ctx)) 47 | 48 | return err 49 | } 50 | return bokchoy.HandlerFunc(fn) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /contrib/rpc/server.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "strconv" 8 | 9 | "google.golang.org/grpc" 10 | 11 | "github.com/thoas/bokchoy" 12 | "github.com/thoas/bokchoy/contrib/rpc/proto" 13 | "github.com/thoas/bokchoy/logging" 14 | ) 15 | 16 | // Server is a rpc server which contains gRPC. 17 | type Server struct { 18 | logger logging.Logger 19 | bok *bokchoy.Bokchoy 20 | srv *grpc.Server 21 | port int 22 | } 23 | 24 | // NewServer initializes a new Server. 25 | func NewServer(bok *bokchoy.Bokchoy, port int) *Server { 26 | s := grpc.NewServer() 27 | 28 | logger := bok.Logger.With(logging.String("server", "rpc")) 29 | 30 | proto.RegisterBokchoyServer(s, &Handler{ 31 | bok: bok, 32 | logger: logger, 33 | }) 34 | 35 | return &Server{ 36 | bok: bok, 37 | srv: s, 38 | port: port, 39 | logger: logger, 40 | } 41 | } 42 | 43 | func (s *Server) String() string { 44 | return "rpc" 45 | } 46 | 47 | // Start starts the RPC server. 48 | func (s *Server) Start(ctx context.Context) error { 49 | addr := fmt.Sprintf(":%s", strconv.Itoa(s.port)) 50 | 51 | lis, err := net.Listen("tcp", addr) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | s.logger.Info(ctx, "Start server", logging.String("addr", addr)) 57 | 58 | err = s.srv.Serve(lis) 59 | if err != nil { 60 | return err 61 | } 62 | 63 | return nil 64 | } 65 | 66 | // Stop stops the RPC server. 67 | func (s *Server) Stop(ctx context.Context) { 68 | s.srv.GracefulStop() 69 | 70 | s.logger.Info(ctx, "Server shutdown") 71 | } 72 | 73 | var _ bokchoy.Server = (*Server)(nil) 74 | -------------------------------------------------------------------------------- /examples/worker/worker.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "os" 8 | "os/signal" 9 | 10 | "github.com/thoas/bokchoy" 11 | "github.com/thoas/bokchoy/logging" 12 | "github.com/thoas/bokchoy/middleware" 13 | ) 14 | 15 | func main() { 16 | var ( 17 | err error 18 | logger logging.Logger 19 | ctx = context.Background() 20 | loggerLevel = os.Getenv("LOGGER_LEVEL") 21 | ) 22 | 23 | if loggerLevel == "development" { 24 | logger, err = logging.NewDevelopmentLogger() 25 | if err != nil { 26 | log.Fatal(err) 27 | } 28 | 29 | defer logger.Sync() 30 | } 31 | 32 | engine, err := bokchoy.New(ctx, bokchoy.Config{ 33 | Broker: bokchoy.BrokerConfig{ 34 | Type: "redis", 35 | Redis: bokchoy.RedisConfig{ 36 | Type: "client", 37 | Client: bokchoy.RedisClientConfig{ 38 | Addr: "localhost:6379", 39 | }, 40 | }, 41 | }, 42 | }, bokchoy.WithLogger(logger)) 43 | if err != nil { 44 | log.Fatal(err) 45 | } 46 | 47 | engine.Use(middleware.Recoverer) 48 | engine.Use(middleware.RequestID) 49 | engine.Use(middleware.DefaultLogger) 50 | 51 | engine.Queue("tasks.message").HandleFunc(func(r *bokchoy.Request) error { 52 | fmt.Println("Receive request:", r) 53 | fmt.Println("Request context:", r.Context()) 54 | fmt.Println("Payload:", r.Task.Payload) 55 | 56 | r.Task.Result = "You can store your result here" 57 | 58 | return nil 59 | }) 60 | 61 | c := make(chan os.Signal, 1) 62 | signal.Notify(c, os.Interrupt) 63 | 64 | go func() { 65 | for range c { 66 | log.Print("Received signal, gracefully stopping") 67 | engine.Stop(ctx) 68 | } 69 | }() 70 | 71 | engine.Run(ctx) 72 | } 73 | -------------------------------------------------------------------------------- /contrib/rpc/handler.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/thoas/bokchoy" 8 | "github.com/thoas/bokchoy/contrib/rpc/proto" 9 | "github.com/thoas/bokchoy/logging" 10 | ) 11 | 12 | type Handler struct { 13 | bok *bokchoy.Bokchoy 14 | logger logging.Logger 15 | } 16 | 17 | func getRequestOptions(req *proto.PublishTaskRequest) []bokchoy.Option { 18 | options := []bokchoy.Option{} 19 | 20 | if req.Countdown != nil { 21 | options = append(options, bokchoy.WithCountdown(*req.Countdown)) 22 | } 23 | 24 | if req.TTL != nil { 25 | options = append(options, bokchoy.WithTTL(*req.TTL)) 26 | } 27 | 28 | if req.MaxRetries != nil { 29 | options = append(options, bokchoy.WithMaxRetries(int(req.MaxRetries.Value))) 30 | } 31 | 32 | if len(req.RetryIntervals) != 0 { 33 | intervals := make([]time.Duration, len(req.RetryIntervals)) 34 | 35 | for i := range req.RetryIntervals { 36 | intervals[i] = *req.RetryIntervals[i] 37 | } 38 | 39 | options = append(options, bokchoy.WithRetryIntervals(intervals)) 40 | } 41 | 42 | if req.Timeout != nil { 43 | options = append(options, bokchoy.WithTimeout(*req.Timeout)) 44 | } 45 | 46 | return options 47 | } 48 | 49 | func (h Handler) PublishTask(ctx context.Context, req *proto.PublishTaskRequest) (*proto.Task, error) { 50 | var payload interface{} 51 | err := h.bok.Serializer.Loads(req.Payload.Value, &payload) 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | task, err := h.bok.Queue(req.Queue).Publish(ctx, payload, getRequestOptions(req)...) 57 | if err != nil { 58 | return nil, err 59 | } 60 | 61 | pb := TaskToProto(task) 62 | pb.Payload = req.Payload 63 | 64 | return pb, nil 65 | } 66 | -------------------------------------------------------------------------------- /suite_test.go: -------------------------------------------------------------------------------- 1 | package bokchoy_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "os" 8 | "testing" 9 | 10 | "github.com/thoas/bokchoy" 11 | "github.com/thoas/bokchoy/logging" 12 | "github.com/thoas/bokchoy/middleware" 13 | ) 14 | 15 | type suiteServer struct { 16 | } 17 | 18 | func (s suiteServer) Start(context.Context) error { 19 | return nil 20 | } 21 | 22 | func (s suiteServer) Stop(context.Context) { 23 | } 24 | 25 | type suite struct { 26 | bokchoy *bokchoy.Bokchoy 27 | } 28 | 29 | type FuncTest func(t *testing.T, s *suite) 30 | 31 | // nolint 32 | func run(t *testing.T, f FuncTest) { 33 | logger, err := logging.NewDevelopmentLogger() 34 | if err != nil { 35 | log.Fatal(err) 36 | } 37 | 38 | defer logger.Sync() 39 | 40 | ctx := context.Background() 41 | 42 | addr := fmt.Sprintf("%s:%s", os.Getenv("REDIS_HOST"), os.Getenv("REDIS_PORT")) 43 | 44 | bok, err := bokchoy.New(ctx, bokchoy.Config{ 45 | Broker: bokchoy.BrokerConfig{ 46 | Type: "redis", 47 | Redis: bokchoy.RedisConfig{ 48 | Type: "client", 49 | Client: bokchoy.RedisClientConfig{ 50 | Addr: addr, 51 | }, 52 | }, 53 | }, 54 | Queues: []bokchoy.QueueConfig{ 55 | { 56 | Name: "tests.task.message", 57 | }, 58 | }, 59 | }, 60 | bokchoy.WithQueues([]string{"tasks.message"}), 61 | bokchoy.WithServers([]bokchoy.Server{suiteServer{}}), 62 | bokchoy.WithLogger(logger.With(logging.String("logger", "bokchoy")))) 63 | if err != nil { 64 | panic(err) 65 | } 66 | 67 | bok.Use(middleware.RequestID) 68 | bok.Use(middleware.Recoverer) 69 | 70 | err = bok.Empty(ctx) 71 | if err != nil { 72 | panic(err) 73 | } 74 | 75 | suite := &suite{bok} 76 | 77 | f(t, suite) 78 | } 79 | -------------------------------------------------------------------------------- /contrib/rpc/proto/bokchoy.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | import "google/protobuf/timestamp.proto"; 4 | import "google/protobuf/duration.proto"; 5 | import "google/protobuf/wrappers.proto"; 6 | import "github.com/gogo/protobuf/gogoproto/gogo.proto"; 7 | 8 | package proto; 9 | 10 | message PublishTaskRequest { 11 | string queue = 1; 12 | google.protobuf.BytesValue payload = 2; 13 | 14 | google.protobuf.Duration countdown = 3 [(gogoproto.stdduration) = true]; 15 | google.protobuf.Duration timeout = 4 [(gogoproto.stdduration) = true]; 16 | 17 | google.protobuf.Duration ttl = 5 [(gogoproto.customname) = "TTL", (gogoproto.stdduration) = true]; 18 | 19 | google.protobuf.Int64Value max_retries = 6 [(gogoproto.nullable) = true]; 20 | 21 | repeated google.protobuf.Duration retry_intervals = 7 [(gogoproto.stdduration) = true]; 22 | } 23 | 24 | message Task { 25 | string id = 1 [(gogoproto.customname) = "ID"]; 26 | string name = 2; 27 | google.protobuf.BytesValue payload = 3; 28 | int64 status = 4; 29 | int64 max_retries = 5; 30 | google.protobuf.Duration timeout = 6 [(gogoproto.stdduration) = true]; 31 | google.protobuf.Duration ttl = 7 [(gogoproto.customname) = "TTL", (gogoproto.stdduration) = true]; 32 | repeated google.protobuf.Duration retry_intervals = 8 [(gogoproto.stdduration) = true]; 33 | google.protobuf.Timestamp published_at = 9 [(gogoproto.stdtime) = true]; 34 | google.protobuf.Timestamp started_at = 10 [(gogoproto.stdtime) = true]; 35 | google.protobuf.Timestamp processed_at = 11 [(gogoproto.stdtime) = true]; 36 | google.protobuf.Timestamp eta = 12 [(gogoproto.customname) = "ETA", (gogoproto.stdtime) = true]; 37 | } 38 | 39 | service Bokchoy { 40 | rpc PublishTask(PublishTaskRequest) returns (Task) {} 41 | } 42 | -------------------------------------------------------------------------------- /examples/crawler/parser/parser.go: -------------------------------------------------------------------------------- 1 | package parser 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "strings" 7 | 8 | "github.com/PuerkitoBio/goquery" 9 | ) 10 | 11 | // Parser is a common interface to parse a document. 12 | type Parser interface { 13 | ExtractLinks(string, io.Reader) ([]string, error) 14 | } 15 | 16 | // DocumentParser is an HTML document parser. 17 | type DocumentParser struct { 18 | } 19 | 20 | // ExtractLinks extracts relative links from an net/http response with a base url. 21 | // It returns links which only contain the base url to avoid crawling external links. 22 | func (p *DocumentParser) ExtractLinks(baseURL string, body io.Reader) ([]string, error) { 23 | doc, err := goquery.NewDocumentFromReader(body) 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | return filterLinks(baseURL, extractLinks(doc)), nil 29 | } 30 | 31 | // extractLinks extracts links from a goquery.Document. 32 | func extractLinks(doc *goquery.Document) []string { 33 | foundUrls := []string{} 34 | doc.Find("a").Each(func(i int, s *goquery.Selection) { 35 | res, _ := s.Attr("href") 36 | foundUrls = append(foundUrls, res) 37 | }) 38 | 39 | return foundUrls 40 | } 41 | 42 | // filterLinks filters links with a base url. 43 | func filterLinks(baseURL string, links []string) []string { 44 | filteredLinks := []string{} 45 | 46 | for _, link := range links { 47 | link = strings.TrimSuffix(link, "/") 48 | 49 | if strings.HasPrefix(link, baseURL) { 50 | filteredLinks = append(filteredLinks, link) 51 | } 52 | 53 | if strings.HasPrefix(link, "/") { 54 | resolvedURL := fmt.Sprintf("%s%s", baseURL, link) 55 | filteredLinks = append(filteredLinks, resolvedURL) 56 | } 57 | } 58 | 59 | return filteredLinks 60 | } 61 | -------------------------------------------------------------------------------- /examples/sentry/sentry.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "os/signal" 10 | 11 | "github.com/getsentry/sentry-go" 12 | "github.com/thoas/bokchoy" 13 | bokchoysentry "github.com/thoas/bokchoy/contrib/sentry" 14 | "github.com/thoas/bokchoy/middleware" 15 | ) 16 | 17 | func main() { 18 | var ( 19 | err error 20 | ctx = context.Background() 21 | run string 22 | ) 23 | 24 | flag.StringVar(&run, "run", "", "service to run") 25 | flag.Parse() 26 | 27 | sentry.Init(sentry.ClientOptions{ 28 | Dsn: os.Getenv("SENTRY_DSN"), 29 | }) 30 | 31 | engine, err := bokchoy.New(ctx, bokchoy.Config{ 32 | Broker: bokchoy.BrokerConfig{ 33 | Type: "redis", 34 | Redis: bokchoy.RedisConfig{ 35 | Type: "client", 36 | Client: bokchoy.RedisClientConfig{ 37 | Addr: "localhost:6379", 38 | }, 39 | }, 40 | }, 41 | }, bokchoy.WithTracer(&bokchoysentry.SentryTracer{})) 42 | if err != nil { 43 | log.Fatal(err) 44 | } 45 | 46 | engine.Use(middleware.Recoverer) 47 | 48 | switch run { 49 | case "producer": 50 | task, err := engine.Queue("tasks.message").Publish(ctx, map[string]string{ 51 | "data": "hello world", 52 | }) 53 | if err != nil { 54 | log.Fatal(err) 55 | } 56 | 57 | fmt.Println(task, "has been published") 58 | case "worker": 59 | engine.Queue("tasks.message").HandleFunc(func(r *bokchoy.Request) error { 60 | return fmt.Errorf("Unexpected error") 61 | }) 62 | 63 | c := make(chan os.Signal, 1) 64 | signal.Notify(c, os.Interrupt) 65 | 66 | go func() { 67 | for range c { 68 | log.Print("Received signal, gracefully stopping") 69 | engine.Stop(ctx) 70 | } 71 | }() 72 | 73 | engine.Run(ctx) 74 | } 75 | 76 | } 77 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/thoas/bokchoy 2 | 3 | go 1.21 4 | 5 | require ( 6 | github.com/PuerkitoBio/goquery v1.5.1 7 | github.com/davecgh/go-spew v1.1.1 8 | github.com/getsentry/sentry-go v0.6.0 9 | github.com/gogo/protobuf v1.3.2 10 | github.com/golang/protobuf v1.5.3 11 | github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 12 | github.com/mitchellh/mapstructure v1.2.2 13 | github.com/oklog/ulid v1.3.1 14 | github.com/pkg/errors v0.9.1 15 | github.com/redis/go-redis/v9 v9.4.0 16 | github.com/stretchr/testify v1.8.1 17 | github.com/thoas/go-funk v0.4.0 18 | go.uber.org/zap v1.12.0 19 | google.golang.org/grpc v1.56.3 20 | ) 21 | 22 | require ( 23 | github.com/BurntSushi/toml v0.3.1 // indirect 24 | github.com/andybalholm/cascadia v1.1.0 // indirect 25 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 26 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 27 | github.com/pmezard/go-difflib v1.0.0 // indirect 28 | go.uber.org/atomic v1.5.0 // indirect 29 | go.uber.org/multierr v1.3.0 // indirect 30 | go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee // indirect 31 | golang.org/x/lint v0.0.0-20190930215403-16217165b5de // indirect 32 | golang.org/x/mod v0.17.0 // indirect 33 | golang.org/x/net v0.33.0 // indirect 34 | golang.org/x/sync v0.10.0 // indirect 35 | golang.org/x/sys v0.28.0 // indirect 36 | golang.org/x/text v0.21.0 // indirect 37 | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect 38 | google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect 39 | google.golang.org/protobuf v1.33.0 // indirect 40 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect 41 | gopkg.in/yaml.v3 v3.0.1 // indirect 42 | honnef.co/go/tools v0.0.1-2019.2.3 // indirect 43 | ) 44 | -------------------------------------------------------------------------------- /contrib/rpc/client.go: -------------------------------------------------------------------------------- 1 | package rpc 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/golang/protobuf/ptypes/wrappers" 8 | grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" 9 | "github.com/pkg/errors" 10 | "github.com/thoas/bokchoy/contrib/rpc/proto" 11 | "google.golang.org/grpc" 12 | "google.golang.org/grpc/codes" 13 | ) 14 | 15 | type ClientOptions struct { 16 | MaxRetries uint 17 | PerRetryTimeout time.Duration 18 | RetryCodes []codes.Code 19 | } 20 | 21 | // NewClient initializes a new rpc client. 22 | func NewClient(addr string, options ClientOptions) *Client { 23 | return &Client{ 24 | addr: addr, 25 | options: options, 26 | } 27 | } 28 | 29 | type Client struct { 30 | addr string 31 | options ClientOptions 32 | } 33 | 34 | func (c *Client) dial() (*grpc.ClientConn, error) { 35 | conn, err := grpc.Dial(c.addr, 36 | grpc.WithInsecure(), 37 | grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor()), 38 | ) 39 | if err != nil { 40 | return nil, errors.Wrapf(err, "unable to connect to server at %s", c.addr) 41 | } 42 | 43 | return conn, nil 44 | } 45 | 46 | func (c *Client) PublishTask(ctx context.Context, queueName string, payload []byte) (*proto.Task, error) { 47 | conn, err := c.dial() 48 | if err != nil { 49 | return nil, err 50 | } 51 | 52 | defer conn.Close() 53 | clt := proto.NewBokchoyClient(conn) 54 | 55 | task, err := clt.PublishTask(ctx, &proto.PublishTaskRequest{ 56 | Queue: queueName, 57 | Payload: &wrappers.BytesValue{ 58 | Value: payload, 59 | }, 60 | }, grpc_retry.WithMax(c.options.MaxRetries), 61 | grpc_retry.WithPerRetryTimeout(c.options.PerRetryTimeout), 62 | grpc_retry.WithCodes(c.options.RetryCodes...)) 63 | if err != nil { 64 | return nil, errors.Wrapf(err, "unable to publish task") 65 | } 66 | 67 | return task, nil 68 | } 69 | -------------------------------------------------------------------------------- /broker.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/thoas/bokchoy/logging" 8 | ) 9 | 10 | // Broker is the common interface to define a Broker. 11 | type Broker interface { 12 | // Initialize initializes the broker. 13 | Initialize(context.Context) error 14 | 15 | // Ping pings the broker to ensure it's well connected. 16 | Ping(context.Context) error 17 | 18 | // Get returns raw data stored in broker. 19 | Get(context.Context, string) (map[string]interface{}, error) 20 | 21 | // Delete deletes raw data in broker based on key. 22 | Delete(context.Context, string, string) error 23 | 24 | // List returns raw data stored in broker. 25 | List(context.Context, string) ([]map[string]interface{}, error) 26 | 27 | // Empty empties a queue. 28 | Empty(context.Context, string) error 29 | 30 | // Flush flushes the entire broker. 31 | Flush(context.Context) error 32 | 33 | // Count returns number of items from a queue name. 34 | Count(context.Context, string) (BrokerStats, error) 35 | 36 | // Save synchronizes the stored item. 37 | Set(context.Context, string, map[string]interface{}, time.Duration) error 38 | 39 | // Publish publishes raw data. 40 | Publish(context.Context, string, string, map[string]interface{}, time.Time) error 41 | 42 | // Consume returns an array of raw data. 43 | Consume(context.Context, string, time.Time) ([]map[string]interface{}, error) 44 | } 45 | 46 | // BrokerStats is the statistics returned by a Queue. 47 | type BrokerStats struct { 48 | Total int 49 | Direct int 50 | Delayed int 51 | } 52 | 53 | // newBroker initializes a new Broker instance. 54 | func newBroker(cfg BrokerConfig, logger logging.Logger) Broker { 55 | var ( 56 | broker Broker 57 | ) 58 | 59 | switch cfg.Type { 60 | default: 61 | broker = newRedisBroker(cfg.Redis, logger) 62 | } 63 | 64 | return broker 65 | } 66 | -------------------------------------------------------------------------------- /tests/lua/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "log" 7 | 8 | "github.com/davecgh/go-spew/spew" 9 | "github.com/redis/go-redis/v9" 10 | ) 11 | 12 | var multihgetall = `local collate = function (key) 13 | local raw_data = redis.call('HGETALL', key) 14 | local data = {} 15 | 16 | for idx = 1, #raw_data, 2 do 17 | data[raw_data[idx]] = raw_data[idx + 1] 18 | end 19 | 20 | return data; 21 | end 22 | 23 | local data = {} 24 | 25 | for _, key in ipairs(KEYS) do 26 | data[key] = collate(key) 27 | end 28 | 29 | return cjson.encode(data) 30 | ` 31 | 32 | var script = `local key = ARGV[1] 33 | local min = ARGV[2] 34 | local max = ARGV[3] 35 | local results = redis.call('ZRANGEBYSCORE', key, min, max) 36 | local length = #results 37 | if length > 0 then 38 | redis.call('ZREMRANGEBYSCORE', key, min, max) 39 | return results 40 | else 41 | return nil 42 | end` 43 | 44 | func main() { 45 | client := redis.NewClient(&redis.Options{ 46 | Addr: "localhost:6379", 47 | Password: "", // no password set 48 | DB: 0, // use default DB 49 | }) 50 | 51 | var ( 52 | results map[string]map[string]interface{} 53 | ctx context.Context 54 | ) 55 | 56 | sha, err := client.ScriptLoad(ctx, multihgetall).Result() 57 | if err != nil { 58 | log.Fatal(err) 59 | } 60 | 61 | vals, err := client.EvalSha(ctx, sha, []string{"foo"}).Result() 62 | if err != nil { 63 | log.Fatal(err) 64 | } 65 | 66 | err = json.Unmarshal([]byte(vals.(string)), &results) 67 | if err != nil { 68 | log.Fatal(err) 69 | } 70 | 71 | spew.Dump(results) 72 | 73 | sha, err = client.ScriptLoad(ctx, script).Result() 74 | spew.Dump(sha, err) 75 | 76 | vals, err = client.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() 77 | spew.Dump(vals, err) 78 | 79 | vals, err = client.EvalSha(ctx, sha, nil, "myzset", "-inf", "+inf").Result() 80 | spew.Dump(vals, err) 81 | } 82 | -------------------------------------------------------------------------------- /examples/rpc/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "os/signal" 10 | "time" 11 | 12 | "github.com/thoas/bokchoy" 13 | "github.com/thoas/bokchoy/contrib/rpc" 14 | "github.com/thoas/bokchoy/middleware" 15 | "google.golang.org/grpc/codes" 16 | ) 17 | 18 | const ( 19 | queueName = "tasks.message" 20 | ) 21 | 22 | func main() { 23 | var ( 24 | err error 25 | ctx = context.Background() 26 | run string 27 | rpcPort int 28 | ) 29 | 30 | flag.StringVar(&run, "run", "", "service to run") 31 | flag.IntVar(&rpcPort, "rpc-port", 9090, "port for rpc server") 32 | flag.Parse() 33 | 34 | engine, err := bokchoy.New(ctx, bokchoy.Config{ 35 | Broker: bokchoy.BrokerConfig{ 36 | Type: "redis", 37 | Redis: bokchoy.RedisConfig{ 38 | Type: "client", 39 | Client: bokchoy.RedisClientConfig{ 40 | Addr: "localhost:6379", 41 | }, 42 | }, 43 | }, 44 | }) 45 | if err != nil { 46 | log.Fatal(err) 47 | } 48 | 49 | engine.Use(middleware.DefaultLogger) 50 | 51 | switch run { 52 | case "client": 53 | clt := rpc.NewClient(fmt.Sprintf(":%d", rpcPort), rpc.ClientOptions{ 54 | MaxRetries: 3, 55 | PerRetryTimeout: 1 * time.Second, 56 | RetryCodes: []codes.Code{codes.Unavailable}, 57 | }) 58 | 59 | task, err := clt.PublishTask(ctx, queueName, []byte(`{"data": "hello world"}`)) 60 | if err != nil { 61 | log.Fatalf("could not retrieve result: %v", err) 62 | } 63 | 64 | log.Printf("Task published: %+v", task) 65 | case "producer": 66 | task, err := engine.Queue(queueName).Publish(ctx, map[string]string{ 67 | "data": "hello world", 68 | }) 69 | if err != nil { 70 | log.Fatal(err) 71 | } 72 | 73 | fmt.Println(task, "has been published") 74 | case "worker": 75 | engine.Queue(queueName).HandleFunc(func(r *bokchoy.Request) error { 76 | return nil 77 | }) 78 | 79 | c := make(chan os.Signal, 1) 80 | signal.Notify(c, os.Interrupt) 81 | 82 | go func() { 83 | for range c { 84 | log.Print("Received signal, gracefully stopping") 85 | engine.Stop(ctx) 86 | } 87 | }() 88 | 89 | engine.Run(ctx, bokchoy.WithServers([]bokchoy.Server{ 90 | rpc.NewServer(engine, rpcPort), 91 | })) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /context.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | type contextKey struct { 8 | name string 9 | } 10 | 11 | // AfterRequestFunc is a function which will execute after the request 12 | type AfterRequestFunc func() 13 | 14 | func (k *contextKey) String() string { 15 | return "bokchoy context value " + k.name 16 | } 17 | 18 | var ( 19 | // ErrorCtxKey is the context.Context key to store 20 | // the recovered error from the middleware 21 | ErrorCtxKey = &contextKey{"Error"} 22 | 23 | // TaskCtxKey is the context.Context key to store 24 | // the task from the middleware 25 | TaskCtxKey = &contextKey{"Task"} 26 | 27 | // AfterRequestCtxKey is the context.Context to store 28 | // functions to execute after the request 29 | AfterRequestCtxKey = &contextKey{"AfterRequest"} 30 | ) 31 | 32 | // WithContextTask sets the in-context task for a request. 33 | func WithContextTask(ctx context.Context, task *Task) context.Context { 34 | ctx = context.WithValue(ctx, TaskCtxKey, task) 35 | return ctx 36 | } 37 | 38 | // GetContextTask returns the in-context task for a request. 39 | func GetContextTask(ctx context.Context) *Task { 40 | err, _ := ctx.Value(TaskCtxKey).(*Task) 41 | return err 42 | } 43 | 44 | // WithContextError sets the in-context error for a request. 45 | func WithContextError(ctx context.Context, err error) context.Context { 46 | ctx = context.WithValue(ctx, ErrorCtxKey, err) 47 | return ctx 48 | } 49 | 50 | // GetContextError returns the in-context recovered error for a request. 51 | func GetContextError(ctx context.Context) error { 52 | err, _ := ctx.Value(ErrorCtxKey).(error) 53 | return err 54 | } 55 | 56 | // GetContextAfterRequestFuncs returns the registered functions 57 | // which will execute after the request 58 | func GetContextAfterRequestFuncs(ctx context.Context) []AfterRequestFunc { 59 | funcs, _ := ctx.Value(AfterRequestCtxKey).([]AfterRequestFunc) 60 | if funcs == nil { 61 | return []AfterRequestFunc{} 62 | } 63 | 64 | return funcs 65 | } 66 | 67 | // WithContextAfterRequestFunc registers a new function to be executed 68 | // after the request 69 | func WithContextAfterRequestFunc(ctx context.Context, f AfterRequestFunc) context.Context { 70 | funcs := append(GetContextAfterRequestFuncs(ctx), f) 71 | return context.WithValue(ctx, AfterRequestCtxKey, funcs) 72 | } 73 | -------------------------------------------------------------------------------- /terminal.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | // Ported from Goji's middleware, source: 4 | // https://github.com/zenazn/goji/tree/master/web/middleware 5 | 6 | import ( 7 | "bytes" 8 | "fmt" 9 | "os" 10 | ) 11 | 12 | // Color is a terminal color representation. 13 | type Color []byte 14 | 15 | var ( 16 | // Normal colors 17 | ColorBlack = Color{'\033', '[', '3', '0', 'm'} 18 | ColorRed = Color{'\033', '[', '3', '1', 'm'} 19 | ColorGreen = Color{'\033', '[', '3', '2', 'm'} 20 | ColorYellow = Color{'\033', '[', '3', '3', 'm'} 21 | ColorBlue = Color{'\033', '[', '3', '4', 'm'} 22 | ColorMagenta = Color{'\033', '[', '3', '5', 'm'} 23 | ColorCyan = Color{'\033', '[', '3', '6', 'm'} 24 | ColorWhite = Color{'\033', '[', '3', '7', 'm'} 25 | 26 | // Bright colors 27 | ColorBrightBlack = Color{'\033', '[', '3', '0', ';', '1', 'm'} 28 | ColorBrightRed = Color{'\033', '[', '3', '1', ';', '1', 'm'} 29 | ColorBrightGreen = Color{'\033', '[', '3', '2', ';', '1', 'm'} 30 | ColorBrightYellow = Color{'\033', '[', '3', '3', ';', '1', 'm'} 31 | ColorBrightBlue = Color{'\033', '[', '3', '4', ';', '1', 'm'} 32 | ColorBrightMagenta = Color{'\033', '[', '3', '5', ';', '1', 'm'} 33 | ColorBrightCyan = Color{'\033', '[', '3', '6', ';', '1', 'm'} 34 | ColorBrightWhite = Color{'\033', '[', '3', '7', ';', '1', 'm'} 35 | 36 | ColorReset = Color{'\033', '[', '0', 'm'} 37 | ) 38 | 39 | var isTTY bool 40 | 41 | func init() { 42 | // This is sort of cheating: if stdout is a character device, we assume 43 | // that means it's a TTY. Unfortunately, there are many non-TTY 44 | // character devices, but fortunately stdout is rarely set to any of 45 | // them. 46 | // 47 | // We could solve this properly by pulling in a dependency on 48 | // code.google.com/p/go.crypto/ssh/terminal, for instance, but as a 49 | // heuristic for whether to print in color or in black-and-white, I'd 50 | // really rather not. 51 | fi, err := os.Stdout.Stat() 52 | if err == nil { 53 | m := os.ModeDevice | os.ModeCharDevice 54 | isTTY = fi.Mode()&m == m 55 | } 56 | } 57 | 58 | // ColorWriter is a bytes buffer with color. 59 | type ColorWriter struct { 60 | *bytes.Buffer 61 | color Color 62 | } 63 | 64 | // WithColor returns a new ColorWriter with a new color. 65 | func (c ColorWriter) WithColor(color Color) *ColorWriter { 66 | c.color = color 67 | return &c 68 | } 69 | 70 | // NewColorWriter initializes a new ColorWriter. 71 | func NewColorWriter(color Color) *ColorWriter { 72 | return &ColorWriter{ 73 | &bytes.Buffer{}, 74 | color, 75 | } 76 | } 77 | 78 | // Write writes an output to stdout. 79 | // nolint: errcheck 80 | func (c *ColorWriter) Write(s string, args ...interface{}) { 81 | if isTTY && c.color != nil { 82 | c.Buffer.Write(c.color) 83 | } 84 | 85 | fmt.Fprintf(c.Buffer, s, args...) 86 | if isTTY && c.color != nil { 87 | c.Buffer.Write(ColorReset) 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /examples/crawler/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "log" 7 | "os" 8 | "os/signal" 9 | "time" 10 | 11 | "github.com/thoas/bokchoy" 12 | "github.com/thoas/bokchoy/examples/crawler/handler" 13 | "github.com/thoas/bokchoy/examples/crawler/parser" 14 | "github.com/thoas/bokchoy/logging" 15 | "github.com/thoas/bokchoy/middleware" 16 | ) 17 | 18 | func main() { 19 | var ( 20 | // which service needs to be run 21 | run string 22 | 23 | // url to crawl 24 | url string 25 | 26 | // until depth 27 | depth int 28 | 29 | // timeout 30 | timeout int 31 | 32 | // concurrency 33 | concurrency int 34 | 35 | // redis address to customize 36 | redisAddr string 37 | err error 38 | ctx = context.Background() 39 | logger logging.Logger 40 | loggerLevel = os.Getenv("LOGGER_LEVEL") 41 | ) 42 | 43 | if loggerLevel == "development" { 44 | logger, err = logging.NewDevelopmentLogger() 45 | if err != nil { 46 | log.Fatal(err) 47 | } 48 | 49 | defer logger.Sync() 50 | } 51 | 52 | flag.IntVar(&depth, "depth", 1, "depth to crawl") 53 | flag.IntVar(&timeout, "timeout", 5, "timeout in seconds") 54 | flag.IntVar(&concurrency, "concurrency", 1, "number of workers") 55 | flag.StringVar(&url, "url", "", "url to crawl") 56 | flag.StringVar(&run, "run", "", "service to run") 57 | flag.StringVar(&redisAddr, "redis-addr", "localhost:6379", "redis address") 58 | flag.Parse() 59 | 60 | bok, err := bokchoy.New(ctx, bokchoy.Config{ 61 | Broker: bokchoy.BrokerConfig{ 62 | Type: "redis", 63 | Redis: bokchoy.RedisConfig{ 64 | Type: "client", 65 | Client: bokchoy.RedisClientConfig{ 66 | Addr: redisAddr, 67 | }, 68 | }, 69 | }, 70 | }, bokchoy.WithMaxRetries(2), bokchoy.WithRetryIntervals([]time.Duration{ 71 | 5 * time.Second, 72 | 10 * time.Second, 73 | }), bokchoy.WithLogger(logger)) 74 | bok.Use(middleware.Recoverer) 75 | bok.Use(middleware.DefaultLogger) 76 | 77 | queue := bok.Queue("tasks.crawl") 78 | 79 | if err != nil { 80 | log.Fatal(err) 81 | } 82 | 83 | h := handler.NewCrawlHandler(queue, &parser.DocumentParser{}, time.Duration(timeout)) 84 | 85 | switch run { 86 | case "producer": 87 | task, err := h.Crawl(ctx, url, url, depth) 88 | if err != nil { 89 | log.Fatal(err) 90 | } 91 | 92 | log.Printf("%s published", task) 93 | case "worker": 94 | queue.Handle(h, bokchoy.WithConcurrency(concurrency)) 95 | 96 | // initialize a signal to close Bokchoy 97 | c := make(chan os.Signal, 1) 98 | signal.Notify(c, os.Interrupt) 99 | 100 | // iterate over the channel to stop 101 | go func() { 102 | for range c { 103 | log.Print("Received signal, gracefully stopping") 104 | bok.Stop(ctx) 105 | } 106 | }() 107 | 108 | // blocking operation, everything is done for you 109 | bok.Run(ctx) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /middleware/request_id.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | // Ported from Goji's middleware, source: 4 | // https://github.com/zenazn/goji/tree/master/web/middleware 5 | 6 | import ( 7 | "context" 8 | "crypto/rand" 9 | "encoding/base64" 10 | "fmt" 11 | "os" 12 | "strings" 13 | "sync/atomic" 14 | 15 | "github.com/thoas/bokchoy" 16 | ) 17 | 18 | // Key to use when setting the request ID. 19 | type ctxKeyRequestID int 20 | 21 | // RequestIDKey is the key that holds the unique request ID in a request context. 22 | const RequestIDKey ctxKeyRequestID = 0 23 | 24 | var prefix string 25 | var reqid uint64 26 | 27 | // A quick note on the statistics here: we're trying to calculate the chance that 28 | // two randomly generated base62 prefixes will collide. We use the formula from 29 | // http://en.wikipedia.org/wiki/Birthday_problem 30 | // 31 | // P[m, n] \approx 1 - e^{-m^2/2n} 32 | // 33 | // We ballpark an upper bound for $m$ by imagining (for whatever reason) a server 34 | // that restarts every second over 10 years, for $m = 86400 * 365 * 10 = 315360000$ 35 | // 36 | // For a $k$ character base-62 identifier, we have $n(k) = 62^k$ 37 | // 38 | // Plugging this in, we find $P[m, n(10)] \approx 5.75%$, which is good enough for 39 | // our purposes, and is surely more than anyone would ever need in practice -- a 40 | // process that is rebooted a handful of times a day for a hundred years has less 41 | // than a millionth of a percent chance of generating two colliding IDs. 42 | 43 | func init() { 44 | hostname, err := os.Hostname() 45 | if hostname == "" || err != nil { 46 | hostname = "localhost" 47 | } 48 | var buf [12]byte 49 | var b64 string 50 | for len(b64) < 10 { 51 | _, err = rand.Read(buf[:]) 52 | if err != nil { 53 | panic(err) 54 | } 55 | 56 | b64 = base64.StdEncoding.EncodeToString(buf[:]) 57 | b64 = strings.NewReplacer("+", "", "/", "").Replace(b64) 58 | } 59 | 60 | prefix = fmt.Sprintf("%s/%s", hostname, b64[0:10]) 61 | } 62 | 63 | // RequestID is a middleware that injects a request ID into the context of each 64 | // request. A request ID is a string of the form "host.example.com/random-0001", 65 | // where "random" is a base62 random string that uniquely identifies this go 66 | // process, and where the last number is an atomically incremented request 67 | // counter. 68 | 69 | func RequestID(next bokchoy.Handler) bokchoy.Handler { 70 | return bokchoy.HandlerFunc(func(r *bokchoy.Request) error { 71 | ctx := r.Context() 72 | myid := atomic.AddUint64(&reqid, 1) 73 | requestID := fmt.Sprintf("%s-%06d", prefix, myid) 74 | ctx = context.WithValue(ctx, RequestIDKey, requestID) 75 | 76 | return next.Handle(r.WithContext(ctx)) 77 | }) 78 | } 79 | 80 | // GetReqID returns a request ID from the given context if one is present. 81 | // Returns the empty string if a request ID cannot be found. 82 | func GetReqID(ctx context.Context) string { 83 | if ctx == nil { 84 | return "" 85 | } 86 | if reqID, ok := ctx.Value(RequestIDKey).(string); ok { 87 | return reqID 88 | } 89 | return "" 90 | } 91 | 92 | // NextRequestID generates the next request ID in the sequence. 93 | func NextRequestID() uint64 { 94 | return atomic.AddUint64(&reqid, 1) 95 | } 96 | -------------------------------------------------------------------------------- /utils.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "strconv" 7 | "time" 8 | 9 | "github.com/oklog/ulid" 10 | "github.com/pkg/errors" 11 | ) 12 | 13 | func ID() string { 14 | t := time.Now().UTC() 15 | entropy := rand.New(rand.NewSource(t.UnixNano())) 16 | return ulid.MustNew(ulid.Timestamp(t), entropy).String() 17 | } 18 | 19 | func reverseDurations(durations []time.Duration) []time.Duration { 20 | results := make([]time.Duration, len(durations)) 21 | 22 | j := len(durations) - 1 23 | for i := 0; i < len(durations); i++ { 24 | results[i] = durations[j] 25 | j-- 26 | } 27 | 28 | return results 29 | } 30 | 31 | func mapDuration(values map[string]interface{}, key string, optional bool) (time.Duration, error) { 32 | raw, err := mapString(values, key, optional) 33 | if err != nil { 34 | return 0, err 35 | } 36 | 37 | if raw != "" { 38 | value, err := strconv.ParseInt(raw, 10, 64) 39 | if err != nil { 40 | return 0, errors.Wrapf(ErrAttributeError, "cannot parse `%s` to integer", key) 41 | } 42 | 43 | timeValue := time.Duration(value) * time.Second 44 | 45 | return timeValue, nil 46 | } 47 | 48 | return 0, nil 49 | } 50 | 51 | func mapTime(values map[string]interface{}, key string, optional bool) (time.Time, error) { 52 | raw, err := mapString(values, key, optional) 53 | if err != nil { 54 | return time.Time{}, err 55 | } 56 | 57 | if raw != "" { 58 | value, err := strconv.ParseInt(raw, 10, 64) 59 | if err != nil { 60 | return time.Time{}, errors.Wrapf(ErrAttributeError, "cannot parse `%s` to integer", key) 61 | } 62 | 63 | timeValue := time.Unix(value, 0).UTC() 64 | 65 | return timeValue, nil 66 | } 67 | 68 | return time.Time{}, nil 69 | } 70 | 71 | func mapString(values map[string]interface{}, key string, optional bool) (string, error) { 72 | raw, ok := values[key] 73 | if !ok && !optional { 74 | return "", errors.Wrapf(ErrAttributeError, "cannot cast `%s`", key) 75 | } 76 | 77 | switch raw := raw.(type) { 78 | case string: 79 | return raw, nil 80 | case int, int64: 81 | return fmt.Sprintf("%d", raw), nil 82 | } 83 | 84 | return "", nil 85 | } 86 | 87 | func mapInt(values map[string]interface{}, key string, optional bool) (int, error) { 88 | raw, err := mapString(values, key, optional) 89 | if err != nil { 90 | return 0, err 91 | } 92 | 93 | if raw != "" { 94 | value, err := strconv.ParseInt(raw, 10, 64) 95 | if err != nil { 96 | return 0, errors.Wrapf(ErrAttributeError, "cannot parse `%s` to integer", key) 97 | } 98 | 99 | return int(value), nil 100 | } 101 | 102 | return 0, nil 103 | } 104 | 105 | func mapFloat(values map[string]interface{}, key string, optional bool) (float64, error) { 106 | raw, err := mapString(values, key, optional) 107 | if err != nil { 108 | return 0, err 109 | } 110 | 111 | if raw != "" { 112 | value, err := strconv.ParseFloat(raw, 10) 113 | if err != nil { 114 | return 0, errors.Wrapf(ErrAttributeError, "cannot parse `%s` to float", key) 115 | } 116 | 117 | return value, nil 118 | } 119 | 120 | return 0, nil 121 | } 122 | 123 | func unpack(fields map[string]interface{}) []interface{} { 124 | args := make([]interface{}, len(fields)*2) 125 | i := 0 126 | for k, v := range fields { 127 | args[i] = k 128 | args[i+1] = v 129 | i += 2 130 | } 131 | 132 | return args 133 | } 134 | -------------------------------------------------------------------------------- /queue_test.go: -------------------------------------------------------------------------------- 1 | package bokchoy_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/thoas/bokchoy" 10 | ) 11 | 12 | func TestQueue_Consumer(t *testing.T) { 13 | run(t, func(t *testing.T, s *suite) { 14 | is := assert.New(t) 15 | queue := s.bokchoy.Queue("tests.task.message") 16 | queue.HandleFunc(func(r *bokchoy.Request) error { 17 | return nil 18 | }) 19 | is.NotZero(queue.Consumer()) 20 | }) 21 | } 22 | 23 | func TestQueue_Cancel(t *testing.T) { 24 | run(t, func(t *testing.T, s *suite) { 25 | is := assert.New(t) 26 | ctx := context.Background() 27 | queue := s.bokchoy.Queue("tests.task.message") 28 | task1, err := queue.Publish(ctx, "hello", bokchoy.WithTTL(10*time.Second)) 29 | is.NotZero(task1) 30 | is.NoError(err) 31 | task2, err := queue.Cancel(ctx, task1.ID) 32 | is.NotZero(task2) 33 | is.NoError(err) 34 | is.True(task2.IsStatusCanceled()) 35 | }) 36 | } 37 | 38 | func TestQueue_Save(t *testing.T) { 39 | run(t, func(t *testing.T, s *suite) { 40 | is := assert.New(t) 41 | ctx := context.Background() 42 | queue := s.bokchoy.Queue("tests.task.message") 43 | task1, err := queue.Publish(ctx, "hello", bokchoy.WithTTL(10*time.Second)) 44 | is.NotZero(task1) 45 | is.NoError(err) 46 | 47 | task1.MarkAsSucceeded() 48 | err = queue.Save(ctx, task1) 49 | is.NoError(err) 50 | 51 | task2, err := queue.Get(ctx, task1.ID) 52 | is.NotZero(task2) 53 | is.NoError(err) 54 | is.True(task2.IsStatusSucceeded()) 55 | is.NotZero(task2.ProcessedAt) 56 | is.NotZero(task2.ExecTime) 57 | }) 58 | } 59 | 60 | func TestQueue_Publish(t *testing.T) { 61 | run(t, func(t *testing.T, s *suite) { 62 | is := assert.New(t) 63 | ctx := context.Background() 64 | 65 | queue := s.bokchoy.Queue("tests.task.message") 66 | task1, err := queue.Publish(ctx, "hello") 67 | is.NoError(err) 68 | stats, err := queue.Count(ctx) 69 | is.NoError(err) 70 | is.Equal(1, stats.Total) 71 | is.Equal(1, stats.Direct) 72 | is.Nil(err) 73 | is.NotZero(task1) 74 | is.Equal(task1.Name, queue.Name()) 75 | 76 | err = queue.Empty(ctx) 77 | is.NoError(err) 78 | 79 | task2, err := queue.Publish(ctx, "hello", bokchoy.WithCountdown(60*time.Second)) 80 | is.NoError(err) 81 | stats, err = queue.Count(ctx) 82 | is.NoError(err) 83 | is.NotZero(task2.ETA) 84 | 85 | is.Equal(1, stats.Total) 86 | is.Equal(0, stats.Direct) 87 | is.NotZero(task2) 88 | is.Equal(task2.Name, queue.Name()) 89 | 90 | task3, err := queue.Get(ctx, task2.ID) 91 | is.NoError(err) 92 | is.NotZero(task3) 93 | is.NotZero(task3.ETA) 94 | is.Equal(task3.ETA.Unix(), task2.ETA.Unix()) 95 | }) 96 | } 97 | 98 | type consumer struct { 99 | ticker chan struct{} 100 | } 101 | 102 | func (c consumer) Handle(r *bokchoy.Request) error { 103 | c.ticker <- struct{}{} 104 | 105 | return nil 106 | } 107 | 108 | func TestQueue_ConsumeDelayed(t *testing.T) { 109 | run(t, func(t *testing.T, s *suite) { 110 | is := assert.New(t) 111 | ctx := context.Background() 112 | 113 | consumer := &consumer{ 114 | ticker: make(chan struct{}), 115 | } 116 | 117 | queueName := "tests.task.message" 118 | 119 | s.bokchoy.Handle(queueName, consumer) 120 | 121 | go func() { 122 | err := s.bokchoy.Run(ctx) 123 | is.NoError(err) 124 | }() 125 | 126 | task, err := s.bokchoy.Publish(ctx, queueName, "world", bokchoy.WithCountdown(2*time.Second)) 127 | is.NotZero(task) 128 | is.NoError(err) 129 | 130 | ctx, cancel := context.WithTimeout(ctx, 5*time.Second) 131 | defer cancel() 132 | 133 | select { 134 | case <-ctx.Done(): 135 | is.True(false) 136 | case <-consumer.ticker: 137 | is.True(true) 138 | } 139 | 140 | s.bokchoy.Stop(ctx) 141 | }) 142 | } 143 | -------------------------------------------------------------------------------- /examples/crawler/handler/crawl.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "log" 7 | "net" 8 | "net/http" 9 | "strings" 10 | "sync" 11 | "time" 12 | 13 | "github.com/thoas/go-funk" 14 | 15 | "github.com/thoas/bokchoy" 16 | "github.com/thoas/bokchoy/examples/crawler/parser" 17 | "github.com/thoas/bokchoy/examples/crawler/task" 18 | ) 19 | 20 | // NewCrawlHandler initializes a new CrawlHandler instance. 21 | func NewCrawlHandler(queue *bokchoy.Queue, parser parser.Parser, timeout time.Duration) *CrawlHandler { 22 | return &CrawlHandler{ 23 | clt: &http.Client{ 24 | Timeout: time.Second * timeout, 25 | Transport: &http.Transport{ 26 | Dial: (&net.Dialer{ 27 | Timeout: timeout * time.Second, 28 | }).Dial, 29 | TLSHandshakeTimeout: timeout * time.Second, 30 | }, 31 | }, 32 | crawls: map[string]int{}, 33 | queue: queue, 34 | parser: parser, 35 | } 36 | } 37 | 38 | type CrawlHandler struct { 39 | clt *http.Client 40 | crawls map[string]int 41 | mu sync.RWMutex 42 | queue *bokchoy.Queue 43 | parser parser.Parser 44 | } 45 | 46 | // AddCrawl adds a new crawl to the storage. 47 | func (h *CrawlHandler) AddCrawl(url string, statusCode int) { 48 | h.mu.Lock() 49 | defer h.mu.Unlock() 50 | 51 | h.crawls[url] = statusCode 52 | } 53 | 54 | // Crawls returns the crawls. 55 | func (h *CrawlHandler) Crawls() []string { 56 | h.mu.RLock() 57 | crawls := make([]string, len(h.crawls)) 58 | i := 0 59 | for url := range h.crawls { 60 | crawls[i] = url 61 | i++ 62 | } 63 | h.mu.RUnlock() 64 | 65 | return crawls 66 | } 67 | 68 | // Crawl publishes a new task to crawl an url with its base URL. 69 | func (h *CrawlHandler) Crawl(ctx context.Context, baseURL string, url string, depth int) (*bokchoy.Task, error) { 70 | url = strings.TrimSuffix(url, "/") 71 | baseURL = strings.TrimSuffix(baseURL, "/") 72 | 73 | task, err := h.queue.Publish(ctx, &task.Crawl{ 74 | URL: url, 75 | BaseURL: baseURL, 76 | Depth: depth, 77 | }) 78 | if err != nil { 79 | return nil, err 80 | } 81 | return task, nil 82 | } 83 | 84 | func (h *CrawlHandler) extractLinks(baseURL string, res *http.Response) ([]string, error) { 85 | links, err := h.parser.ExtractLinks(baseURL, res.Body) 86 | if err != nil { 87 | return nil, err 88 | } 89 | 90 | crawls := h.Crawls() 91 | 92 | filteredLinks := []string{} 93 | for i := range links { 94 | if funk.InStrings(crawls, links[i]) { 95 | continue 96 | } 97 | 98 | filteredLinks = append(filteredLinks, links[i]) 99 | } 100 | 101 | return filteredLinks, nil 102 | } 103 | 104 | // Handle handles a bokchoy.Request. 105 | func (h *CrawlHandler) Handle(r *bokchoy.Request) error { 106 | res, err := json.Marshal(r.Task.Payload) 107 | if err != nil { 108 | return err 109 | } 110 | 111 | var crawl task.Crawl 112 | 113 | err = json.Unmarshal(res, &crawl) 114 | if err != nil { 115 | return err 116 | } 117 | 118 | log.Print("Received ", crawl) 119 | 120 | resp, err := h.clt.Get(crawl.URL) 121 | if err != nil { 122 | return err 123 | } 124 | 125 | log.Print("Crawled ", crawl.URL, " - [", resp.Status, "]") 126 | h.AddCrawl(crawl.URL, resp.StatusCode) 127 | 128 | if resp.StatusCode != 200 { 129 | return nil 130 | } 131 | 132 | defer resp.Body.Close() 133 | 134 | // depth is zero, the handler should stop 135 | if crawl.Depth == 0 { 136 | return nil 137 | } 138 | 139 | // extract relative links 140 | links, err := h.extractLinks(crawl.BaseURL, resp) 141 | if err != nil { 142 | return nil 143 | } 144 | 145 | for i := range links { 146 | // next crawls will still have the same base url 147 | // depth is decremented to stop the flow 148 | task, err := h.Crawl(r.Context(), crawl.BaseURL, links[i], crawl.Depth-1) 149 | if err != nil { 150 | return err 151 | } 152 | 153 | log.Printf("%s published", task) 154 | } 155 | 156 | return nil 157 | } 158 | -------------------------------------------------------------------------------- /logging/logging.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "go.uber.org/zap" 8 | "go.uber.org/zap/zapcore" 9 | ) 10 | 11 | // DefaultLogger is the default logger. 12 | var DefaultLogger = NewNopLogger() 13 | 14 | // Field is the logger field. 15 | type Field = zapcore.Field 16 | 17 | // ObjectEncoder is the logger representation of a structure. 18 | type ObjectEncoder = zapcore.ObjectEncoder 19 | 20 | // Logger is the standart logger interface. 21 | type Logger interface { 22 | Panic(context.Context, string, ...Field) 23 | Info(context.Context, string, ...Field) 24 | Error(context.Context, string, ...Field) 25 | Debug(context.Context, string, ...Field) 26 | Sync() error 27 | With(fields ...Field) Logger 28 | } 29 | 30 | // String appends a string field. 31 | func String(k, v string) Field { 32 | return zap.String(k, v) 33 | } 34 | 35 | // Duration appends a duration field. 36 | func Duration(k string, d time.Duration) Field { 37 | return zap.Duration(k, d) 38 | } 39 | 40 | // Float64 appends a float64 field. 41 | func Float64(key string, val float64) Field { 42 | return zap.Float64(key, val) 43 | } 44 | 45 | // Time appends a time field. 46 | func Time(key string, val time.Time) Field { 47 | return zap.Time(key, val) 48 | } 49 | 50 | // Int appends an int field. 51 | func Int(k string, i int) Field { 52 | return zap.Int(k, i) 53 | } 54 | 55 | // Int64 appends an int64 field. 56 | func Int64(k string, i int64) Field { 57 | return zap.Int64(k, i) 58 | } 59 | 60 | // Error appends an error field. 61 | func Error(v error) Field { 62 | return zap.Error(v) 63 | } 64 | 65 | // Object appends an object field with implements ObjectMarshaler interface. 66 | func Object(key string, val zapcore.ObjectMarshaler) Field { 67 | return zap.Object(key, val) 68 | } 69 | 70 | // NewProductionLogger initializes a production logger. 71 | func NewProductionLogger() (Logger, error) { 72 | logger, err := zap.NewProduction() 73 | if err != nil { 74 | return nil, err 75 | } 76 | 77 | return &wrapLogger{logger}, nil 78 | } 79 | 80 | // NewDevelopmentLogger initializes a development logger. 81 | func NewDevelopmentLogger() (Logger, error) { 82 | logger, err := zap.NewDevelopment() 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | return &wrapLogger{logger}, nil 88 | } 89 | 90 | // NewNopLogger initializes a noop logger. 91 | func NewNopLogger() Logger { 92 | return &wrapLogger{zap.NewNop()} 93 | } 94 | 95 | type wrapLogger struct { 96 | *zap.Logger 97 | } 98 | 99 | // With creates a child logger and adds structured context to it. Fields added 100 | // to the child don't affect the parent, and vice versa. 101 | func (l wrapLogger) With(fields ...Field) Logger { 102 | return &wrapLogger{l.Logger.With(fields...)} 103 | } 104 | 105 | // Panic logs a message at PanicLevel. The message includes any fields passed 106 | // at the log site, as well as any fields accumulated on the logger. 107 | // 108 | // The logger then panics, even if logging at PanicLevel is disabled. 109 | func (l wrapLogger) Panic(ctx context.Context, msg string, fields ...Field) { 110 | l.Logger.Panic(msg, fields...) 111 | } 112 | 113 | // Info logs a message at InfoLevel. The message includes any fields passed 114 | // at the log site, as well as any fields accumulated on the logger. 115 | func (l wrapLogger) Info(ctx context.Context, msg string, fields ...Field) { 116 | l.Logger.Info(msg, fields...) 117 | 118 | } 119 | 120 | // Error logs a message at ErrorLevel. The message includes any fields passed 121 | // at the log site, as well as any fields accumulated on the logger. 122 | func (l wrapLogger) Error(ctx context.Context, msg string, fields ...Field) { 123 | l.Logger.Error(msg, fields...) 124 | } 125 | 126 | // Debug logs a message at DebugLevel. The message includes any fields passed 127 | // at the log site, as well as any fields accumulated on the logger. 128 | func (l wrapLogger) Debug(ctx context.Context, msg string, fields ...Field) { 129 | l.Logger.Debug(msg, fields...) 130 | } 131 | -------------------------------------------------------------------------------- /consumer_test.go: -------------------------------------------------------------------------------- 1 | package bokchoy_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/thoas/bokchoy" 11 | ) 12 | 13 | type noopconsumer struct { 14 | } 15 | 16 | func (c noopconsumer) Handle(r *bokchoy.Request) error { 17 | return nil 18 | } 19 | 20 | func TestConsumer_Consume(t *testing.T) { 21 | run(t, func(t *testing.T, s *suite) { 22 | is := assert.New(t) 23 | 24 | queue := s.bokchoy.Queue("tests.task.info") 25 | 26 | ticker := make(chan struct{}) 27 | 28 | ctx := context.Background() 29 | queue.HandleFunc(func(r *bokchoy.Request) error { 30 | time.Sleep(time.Millisecond * 500) 31 | 32 | r.Task.Result = r.Task.Payload 33 | 34 | ticker <- struct{}{} 35 | 36 | ctx := r.Context() 37 | 38 | bokchoy.GetContextTask(ctx) 39 | bokchoy.WithContextAfterRequestFunc(ctx, func() {}) 40 | 41 | return nil 42 | }, bokchoy.WithConcurrency(1)) 43 | consumer := &noopconsumer{} 44 | queue.OnStart(consumer). 45 | OnComplete(consumer). 46 | OnFailure(consumer). 47 | OnSuccess(consumer) 48 | 49 | go func() { 50 | err := s.bokchoy.Run(ctx, bokchoy.WithQueues([]string{"tests.task.info"})) 51 | is.NoError(err) 52 | }() 53 | 54 | task, err := queue.Publish(ctx, "world", bokchoy.WithSerializer(&bokchoy.JSONSerializer{})) 55 | is.NoError(err) 56 | 57 | ctx, cancel := context.WithTimeout(ctx, 5*time.Second) 58 | defer cancel() 59 | 60 | select { 61 | case <-ctx.Done(): 62 | is.True(false) 63 | case <-ticker: 64 | is.True(true) 65 | } 66 | 67 | s.bokchoy.Stop(ctx) 68 | 69 | task, err = queue.Get(ctx, task.ID) 70 | is.NoError(err) 71 | is.True(task.IsStatusSucceeded()) 72 | is.Equal(task.Payload, task.Result) 73 | 74 | is.Equal(fmt.Sprintf("%.1f", task.ExecTime), "0.5") 75 | }) 76 | } 77 | 78 | // nolint: govet 79 | func TestConsumer_ConsumeRetries(t *testing.T) { 80 | run(t, func(t *testing.T, s *suite) { 81 | is := assert.New(t) 82 | 83 | queue := s.bokchoy.Queue("tests.task.error") 84 | 85 | maxRetries := 3 86 | 87 | ticker := make(chan struct{}, maxRetries) 88 | 89 | ctx := context.Background() 90 | queue.HandleFunc(func(r *bokchoy.Request) error { 91 | maxRetries-- 92 | 93 | ticker <- struct{}{} 94 | 95 | return fmt.Errorf("An error occurred") 96 | }) 97 | 98 | go func() { 99 | err := s.bokchoy.Run(ctx) 100 | is.NoError(err) 101 | }() 102 | 103 | task, err := queue.Publish(ctx, "error", 104 | bokchoy.WithMaxRetries(maxRetries), bokchoy.WithRetryIntervals([]time.Duration{ 105 | 1 * time.Second, 106 | 2 * time.Second, 107 | 3 * time.Second, 108 | })) 109 | is.NotZero(task) 110 | is.NoError(err) 111 | 112 | ctx, cancel := context.WithTimeout(ctx, 10*time.Second) 113 | defer cancel() 114 | 115 | for { 116 | select { 117 | case <-ctx.Done(): 118 | return 119 | case <-ticker: 120 | if maxRetries == 0 { 121 | return 122 | } 123 | } 124 | } 125 | 126 | is.Equal(maxRetries, 0) 127 | 128 | s.bokchoy.Stop(ctx) 129 | 130 | task, err = queue.Get(ctx, task.ID) 131 | is.NoError(err) 132 | is.True(task.IsStatusFailed()) 133 | is.Equal(task.MaxRetries, 0) 134 | }) 135 | } 136 | 137 | // nolint: govet,gosimple 138 | func TestConsumer_ConsumeLong(t *testing.T) { 139 | run(t, func(t *testing.T, s *suite) { 140 | is := assert.New(t) 141 | 142 | queue := s.bokchoy.Queue("tests.task.long") 143 | 144 | ctx := context.Background() 145 | queue.HandleFunc(func(r *bokchoy.Request) error { 146 | time.Sleep(3 * time.Second) 147 | 148 | return nil 149 | }) 150 | 151 | go func() { 152 | err := s.bokchoy.Run(ctx) 153 | is.NoError(err) 154 | }() 155 | 156 | task, err := queue.Publish(ctx, "long", 157 | bokchoy.WithTimeout(2*time.Second), 158 | bokchoy.WithMaxRetries(0)) 159 | 160 | is.NotZero(task) 161 | is.NoError(err) 162 | 163 | ctx, cancel := context.WithTimeout(ctx, 3*time.Second) 164 | defer cancel() 165 | 166 | for { 167 | select { 168 | case <-ctx.Done(): 169 | return 170 | } 171 | } 172 | 173 | s.bokchoy.Stop(ctx) 174 | 175 | task, err = queue.Get(ctx, task.ID) 176 | is.NoError(err) 177 | is.True(task.IsStatusCanceled()) 178 | }) 179 | } 180 | -------------------------------------------------------------------------------- /options.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "strings" 5 | "time" 6 | 7 | "github.com/thoas/bokchoy/logging" 8 | ) 9 | 10 | // Options is the bokchoy options. 11 | type Options struct { 12 | Tracer Tracer 13 | Logger logging.Logger 14 | Concurrency int 15 | MaxRetries int 16 | TTL time.Duration 17 | Countdown *time.Duration 18 | Timeout time.Duration 19 | RetryIntervals []time.Duration 20 | Serializer Serializer 21 | Initialize bool 22 | Queues []string 23 | DisableOutput bool 24 | Servers []Server 25 | Broker Broker 26 | } 27 | 28 | // RetryIntervalsDisplay returns a string representation of the retry intervals. 29 | func (o Options) RetryIntervalsDisplay() string { 30 | intervals := make([]string, len(o.RetryIntervals)) 31 | for i := range o.RetryIntervals { 32 | intervals[i] = o.RetryIntervals[i].String() 33 | } 34 | 35 | return strings.Join(intervals, ", ") 36 | } 37 | 38 | // newOptions returns default options. 39 | func newOptions() *Options { 40 | opts := &Options{} 41 | 42 | options := []Option{ 43 | WithConcurrency(defaultConcurrency), 44 | WithMaxRetries(defaultMaxRetries), 45 | WithTTL(defaultTTL), 46 | WithTimeout(defaultTimeout), 47 | WithRetryIntervals(defaultRetryIntervals), 48 | WithInitialize(true), 49 | } 50 | 51 | for i := range options { 52 | options[i](opts) 53 | } 54 | 55 | return opts 56 | } 57 | 58 | // Option is an option unit. 59 | type Option func(opts *Options) 60 | 61 | // WithDisableOutput defines if the output (logo, queues information) 62 | // should be disabled. 63 | func WithDisableOutput(disableOutput bool) Option { 64 | return func(opts *Options) { 65 | opts.DisableOutput = disableOutput 66 | } 67 | } 68 | 69 | // WithBroker registers new broker. 70 | func WithBroker(broker Broker) Option { 71 | return func(opts *Options) { 72 | opts.Broker = broker 73 | } 74 | } 75 | 76 | // WithServers registers new servers to be run. 77 | func WithServers(servers []Server) Option { 78 | return func(opts *Options) { 79 | opts.Servers = servers 80 | } 81 | } 82 | 83 | // WithQueues allows to override queues to run. 84 | func WithQueues(queues []string) Option { 85 | return func(opts *Options) { 86 | opts.Queues = queues 87 | } 88 | } 89 | 90 | // WithSerializer defines the Serializer. 91 | func WithSerializer(serializer Serializer) Option { 92 | return func(opts *Options) { 93 | opts.Serializer = serializer 94 | } 95 | } 96 | 97 | // WithInitialize defines if the broker needs to be initialized. 98 | func WithInitialize(initialize bool) Option { 99 | return func(opts *Options) { 100 | opts.Initialize = initialize 101 | } 102 | } 103 | 104 | // WithTracer defines the Tracer. 105 | func WithTracer(tracer Tracer) Option { 106 | return func(opts *Options) { 107 | opts.Tracer = tracer 108 | } 109 | } 110 | 111 | // WithLogger defines the Logger. 112 | func WithLogger(logger logging.Logger) Option { 113 | return func(opts *Options) { 114 | opts.Logger = logger 115 | } 116 | } 117 | 118 | // WithTimeout defines the timeout used to execute a task. 119 | func WithTimeout(timeout time.Duration) Option { 120 | return func(opts *Options) { 121 | opts.Timeout = timeout 122 | } 123 | } 124 | 125 | // WithCountdown defines the countdown to launch a delayed task. 126 | func WithCountdown(countdown time.Duration) Option { 127 | return func(opts *Options) { 128 | opts.Countdown = &countdown 129 | } 130 | } 131 | 132 | // WithConcurrency defines the number of concurrent consumers. 133 | func WithConcurrency(concurrency int) Option { 134 | return func(opts *Options) { 135 | opts.Concurrency = concurrency 136 | } 137 | } 138 | 139 | // WithMaxRetries defines the number of maximum retries for a failed task. 140 | func WithMaxRetries(maxRetries int) Option { 141 | return func(opts *Options) { 142 | opts.MaxRetries = maxRetries 143 | } 144 | } 145 | 146 | // WithRetryIntervals defines the retry intervals for a failed task. 147 | func WithRetryIntervals(retryIntervals []time.Duration) Option { 148 | return func(opts *Options) { 149 | opts.RetryIntervals = retryIntervals 150 | } 151 | } 152 | 153 | // WithTTL defines the duration to keep the task in the broker. 154 | func WithTTL(ttl time.Duration) Option { 155 | return func(opts *Options) { 156 | opts.TTL = ttl 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /examples/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "os/signal" 10 | "strconv" 11 | "strings" 12 | "time" 13 | 14 | "github.com/davecgh/go-spew/spew" 15 | "github.com/mitchellh/mapstructure" 16 | 17 | "github.com/thoas/bokchoy" 18 | "github.com/thoas/bokchoy/logging" 19 | "github.com/thoas/bokchoy/middleware" 20 | ) 21 | 22 | type message struct { 23 | Data string `json:"data"` 24 | } 25 | 26 | // nolint: gocyclo, vet 27 | func main() { 28 | var ( 29 | run string 30 | taskID string 31 | retryIntervals string 32 | concurrency int 33 | ) 34 | 35 | flag.StringVar(&taskID, "task-id", "", "task identifier") 36 | flag.StringVar(&run, "run", "publish", "service to run") 37 | flag.StringVar(&retryIntervals, "retry-intervals", "2,4,6", "retry intervals in seconds") 38 | flag.IntVar(&concurrency, "concurrency", 1, "concurrency to run consumer") 39 | flag.Parse() 40 | 41 | logger, err := logging.NewDevelopmentLogger() 42 | if err != nil { 43 | log.Fatal(err) 44 | } 45 | 46 | defer logger.Sync() 47 | 48 | ctx := context.Background() 49 | 50 | bok, err := bokchoy.New(ctx, bokchoy.Config{ 51 | Broker: bokchoy.BrokerConfig{ 52 | Type: "redis", 53 | Redis: bokchoy.RedisConfig{ 54 | Type: "client", 55 | Client: bokchoy.RedisClientConfig{ 56 | Addr: "localhost:6379", 57 | }, 58 | }, 59 | }, 60 | }, bokchoy.WithLogger(logger.With(logging.String("logger", "bokchoy")))) 61 | bok.Use(middleware.RequestID) 62 | 63 | queue := bok.Queue("tasks.message") 64 | queueFail := bok.Queue("tasks.message.failed") 65 | 66 | retryIntervalsList := strings.Split(retryIntervals, ",") 67 | intervals := make([]time.Duration, len(retryIntervalsList)) 68 | for i := range retryIntervalsList { 69 | value, _ := strconv.ParseInt(retryIntervalsList[i], 10, 64) 70 | 71 | intervals[i] = time.Duration(value) * time.Second 72 | } 73 | 74 | switch run { 75 | case "list": 76 | tasks, err := queue.List(ctx) 77 | 78 | if err != nil { 79 | log.Fatal(err) 80 | } 81 | 82 | for i := range tasks { 83 | log.Printf("%s retrieved", tasks[i]) 84 | } 85 | case "get": 86 | task, err := queue.Get(ctx, taskID) 87 | 88 | if err != nil { 89 | log.Fatal(err) 90 | } 91 | 92 | log.Printf("%s retrieved", task) 93 | case "cancel": 94 | task, err := queue.Cancel(ctx, taskID) 95 | 96 | if err != nil { 97 | log.Fatal(err) 98 | } 99 | 100 | log.Printf("%s canceled", task) 101 | case "publish:failed:intervals": 102 | if err != nil { 103 | log.Fatal(err) 104 | } 105 | 106 | task, err := queueFail.Publish(ctx, message{Data: "hello"}, 107 | bokchoy.WithMaxRetries(3), 108 | bokchoy.WithRetryIntervals(intervals)) 109 | 110 | if err != nil { 111 | log.Fatal(err) 112 | } 113 | 114 | log.Printf("%s published with retry intervals", task) 115 | case "publish:delay": 116 | if err != nil { 117 | log.Fatal(err) 118 | } 119 | 120 | task, err := queue.Publish(ctx, message{Data: "hello"}, 121 | bokchoy.WithCountdown(5*time.Second)) 122 | if err != nil { 123 | log.Fatal(err) 124 | } 125 | 126 | log.Printf("%s delayed published", task) 127 | case "publish": 128 | if err != nil { 129 | log.Fatal(err) 130 | } 131 | 132 | for i := 0; i < concurrency; i++ { 133 | task, err := queue.Publish(ctx, message{Data: "hello"}) 134 | if err != nil { 135 | log.Fatal(err) 136 | } 137 | 138 | log.Printf("%s published", task) 139 | } 140 | 141 | case "publish:timeout": 142 | if err != nil { 143 | log.Fatal(err) 144 | } 145 | 146 | task, err := queue.Publish(ctx, message{Data: "hello"}, 147 | bokchoy.WithTimeout(5*time.Second)) 148 | if err != nil { 149 | log.Fatal(err) 150 | } 151 | 152 | log.Printf("%s published", task) 153 | case "consume": 154 | queueFail.HandleFunc(func(r *bokchoy.Request) error { 155 | return fmt.Errorf("It should fail badly") 156 | }, bokchoy.WithConcurrency(concurrency)) 157 | 158 | queue.OnStartFunc(func(r *bokchoy.Request) error { 159 | *r = *r.WithContext(context.WithValue(r.Context(), "foo", "bar")) 160 | 161 | return nil 162 | }) 163 | 164 | queue.OnCompleteFunc(func(r *bokchoy.Request) error { 165 | spew.Dump(r.Context()) 166 | 167 | return nil 168 | }) 169 | 170 | queue.HandleFunc(func(r *bokchoy.Request) error { 171 | var ( 172 | msg message 173 | task = r.Task 174 | ) 175 | err := mapstructure.Decode(task.Payload, &msg) 176 | if err != nil { 177 | return err 178 | } 179 | 180 | log.Printf("%s received, message decoded: %+v", task, msg) 181 | 182 | return nil 183 | }, bokchoy.WithConcurrency(concurrency)) 184 | 185 | c := make(chan os.Signal, 1) 186 | signal.Notify(c, os.Interrupt) 187 | 188 | go func() { 189 | for range c { 190 | log.Print("Received signal, gracefully stopping") 191 | bok.Stop(ctx) 192 | } 193 | }() 194 | 195 | bok.Run(ctx) 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /middleware/logger.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | "time" 8 | 9 | "github.com/thoas/bokchoy" 10 | ) 11 | 12 | var ( 13 | // LogEntryCtxKey is the context.Context key to store the request log entry. 14 | LogEntryCtxKey = &contextKey{"LogEntry"} 15 | 16 | // DefaultLogger is called by the Logger middleware handler to log each request. 17 | // Its made a package-level variable so that it can be reconfigured for custom 18 | // logging configurations. 19 | DefaultLogger = RequestLogger(&DefaultLogFormatter{Logger: log.New(os.Stdout, "", log.LstdFlags), NoColor: false}) 20 | ) 21 | 22 | // RequestLogger returns a logger handler using a custom LogFormatter. 23 | func RequestLogger(f LogFormatter) func(next bokchoy.Handler) bokchoy.Handler { 24 | return func(next bokchoy.Handler) bokchoy.Handler { 25 | fn := func(r *bokchoy.Request) error { 26 | entry := f.NewLogEntry(r) 27 | 28 | t1 := time.Now() 29 | 30 | ctx := bokchoy.WithContextAfterRequestFunc(r.Context(), func() { 31 | entry.Write(r, time.Since(t1)) 32 | }) 33 | 34 | r = r.WithContext(ctx) 35 | 36 | return next.Handle(WithLogEntry(r, entry)) 37 | } 38 | return bokchoy.HandlerFunc(fn) 39 | } 40 | } 41 | 42 | // LogFormatter initiates the beginning of a new LogEntry per request. 43 | // See DefaultLogFormatter for an example implementation. 44 | type LogFormatter interface { 45 | NewLogEntry(r *bokchoy.Request) LogEntry 46 | } 47 | 48 | // LogEntry records the final log when a request completes. 49 | // See defaultLogEntry for an example implementation. 50 | type LogEntry interface { 51 | Write(r *bokchoy.Request, elapsed time.Duration) 52 | Panic(v interface{}, stack []byte) 53 | } 54 | 55 | // GetLogEntry returns the in-context LogEntry for a request. 56 | func GetLogEntry(r *bokchoy.Request) LogEntry { 57 | entry, _ := r.Context().Value(LogEntryCtxKey).(LogEntry) 58 | return entry 59 | } 60 | 61 | // WithLogEntry sets the in-context LogEntry for a request. 62 | func WithLogEntry(r *bokchoy.Request, entry LogEntry) *bokchoy.Request { 63 | r = r.WithContext(context.WithValue(r.Context(), LogEntryCtxKey, entry)) 64 | return r 65 | } 66 | 67 | // LoggerInterface accepts printing to stdlib logger or compatible logger. 68 | type LoggerInterface interface { 69 | Print(v ...interface{}) 70 | } 71 | 72 | // DefaultLogFormatter is a simple logger that implements a LogFormatter. 73 | type DefaultLogFormatter struct { 74 | Logger LoggerInterface 75 | NoColor bool 76 | } 77 | 78 | // NewLogEntry creates a new LogEntry for the request. 79 | func (l *DefaultLogFormatter) NewLogEntry(r *bokchoy.Request) LogEntry { 80 | entry := &defaultLogEntry{ 81 | DefaultLogFormatter: l, 82 | request: r, 83 | buf: bokchoy.NewColorWriter(nil), 84 | } 85 | 86 | reqID := GetReqID(r.Context()) 87 | if reqID != "" { 88 | entry.buf = entry.buf.WithColor(bokchoy.ColorYellow) 89 | entry.buf.Write("[%s] ", reqID) 90 | } 91 | 92 | task := r.Task 93 | 94 | entry.buf = entry.buf.WithColor(bokchoy.ColorBrightMagenta) 95 | entry.buf.Write("", task.ID, task.Name, task.Payload) 96 | entry.buf = entry.buf.WithColor(bokchoy.ColorWhite) 97 | entry.buf.WriteString(" - ") 98 | 99 | return entry 100 | } 101 | 102 | type defaultLogEntry struct { 103 | *DefaultLogFormatter 104 | request *bokchoy.Request 105 | buf *bokchoy.ColorWriter 106 | } 107 | 108 | func (l *defaultLogEntry) Write(r *bokchoy.Request, elapsed time.Duration) { 109 | task := r.Task 110 | 111 | switch { 112 | case task.IsStatusProcessing(): 113 | l.buf = l.buf.WithColor(bokchoy.ColorBrightBlue) 114 | l.buf.Write("%s", task.StatusDisplay()) 115 | case task.IsStatusSucceeded(): 116 | l.buf = l.buf.WithColor(bokchoy.ColorBrightGreen) 117 | l.buf.Write("%s", task.StatusDisplay()) 118 | case task.IsStatusCanceled(): 119 | l.buf = l.buf.WithColor(bokchoy.ColorBrightYellow) 120 | l.buf.Write("%s", task.StatusDisplay()) 121 | case task.IsStatusFailed(): 122 | l.buf = l.buf.WithColor(bokchoy.ColorBrightRed) 123 | l.buf.Write("%s", task.StatusDisplay()) 124 | } 125 | 126 | l.buf.WriteString(" - ") 127 | 128 | l.buf = l.buf.WithColor(bokchoy.ColorBrightBlue) 129 | 130 | if task.Result == nil { 131 | l.buf.Write("result: (empty)") 132 | } else { 133 | l.buf.Write("result: \"%s\"", task.Result) 134 | } 135 | 136 | l.buf = l.buf.WithColor(bokchoy.ColorWhite) 137 | 138 | l.buf.WriteString(" in ") 139 | if elapsed < 500*time.Millisecond { 140 | l.buf = l.buf.WithColor(bokchoy.ColorGreen) 141 | l.buf.Write("%s", elapsed) 142 | } else if elapsed < 5*time.Second { 143 | l.buf = l.buf.WithColor(bokchoy.ColorYellow) 144 | l.buf.Write("%s", elapsed) 145 | } else { 146 | l.buf = l.buf.WithColor(bokchoy.ColorRed) 147 | l.buf.Write("%s", elapsed) 148 | } 149 | 150 | l.Logger.Print(l.buf.String()) 151 | } 152 | 153 | func (l *defaultLogEntry) Panic(v interface{}, stack []byte) { 154 | panicEntry := l.NewLogEntry(l.request).(*defaultLogEntry) 155 | l.buf = l.buf.WithColor(bokchoy.ColorRed) 156 | l.buf.Write("panic: %+v", v) 157 | l.Logger.Print(panicEntry.buf.String()) 158 | l.Logger.Print(string(stack)) 159 | } 160 | -------------------------------------------------------------------------------- /consumer.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | 8 | "github.com/thoas/bokchoy/logging" 9 | 10 | "github.com/pkg/errors" 11 | ) 12 | 13 | type consumer struct { 14 | name string 15 | handler Handler 16 | middlewares []func(Handler) Handler 17 | queue *Queue 18 | serializer Serializer 19 | logger logging.Logger 20 | tracer Tracer 21 | wg *sync.WaitGroup 22 | closed bool 23 | mu *sync.Mutex 24 | } 25 | 26 | func (c *consumer) stop(ctx context.Context) { 27 | c.mu.Lock() 28 | defer c.mu.Unlock() 29 | 30 | if !c.closed { 31 | c.wg.Done() 32 | c.closed = true 33 | } 34 | 35 | c.logger.Debug(ctx, fmt.Sprintf("Stopped %s", c)) 36 | } 37 | 38 | func (c *consumer) String() string { 39 | return c.name 40 | } 41 | 42 | func (c *consumer) handleTask(ctx context.Context, r *Request) error { 43 | var ( 44 | err error 45 | task = r.Task 46 | ) 47 | 48 | timeout, cancel := context.WithTimeout(ctx, task.Timeout) 49 | defer cancel() 50 | 51 | type result struct { 52 | err error 53 | } 54 | 55 | conn := make(chan result) 56 | 57 | // we execute the complete request lifecycle in a goroutine 58 | // to handle timeout and retrieve the worker back if the executing 59 | // time a task is higher than timeout. 60 | go func() { 61 | defer func() { 62 | conn <- result{ 63 | err: err, 64 | } 65 | }() 66 | 67 | middlewares := c.middlewares 68 | middlewares = append(middlewares, c.handleRequest) 69 | 70 | // chain wraps the n middleware with its n-1 71 | // we keep the reference of the final request since it contains 72 | // a modified context by middlewares. 73 | err = chain(middlewares, HandlerFunc(func(req *Request) error { 74 | *r = *req 75 | 76 | // execute the underlying handler. 77 | return c.handler.Handle(req) 78 | })).Handle(r) 79 | 80 | // retrieve the error in the context added 81 | // by the recoverer middleware. 82 | if err == nil { 83 | err = GetContextError(r.Context()) 84 | } 85 | }() 86 | 87 | select { 88 | // timeout done, we have to cancel the task. 89 | case <-timeout.Done(): 90 | c.logger.Debug(ctx, "Task canceled by timeout", logging.Object("task", task)) 91 | err = ErrTaskCanceled 92 | case res := <-conn: 93 | err = res.err 94 | } 95 | 96 | if err != nil { 97 | return errors.Wrapf(err, "unable to handle %s", task) 98 | } 99 | 100 | return nil 101 | } 102 | 103 | // handleError can be called twice since it's called inside Handle 104 | // and in handleRequest. 105 | func (c *consumer) handleError(ctx context.Context, task *Task, err error) error { 106 | // Why do we have to call it twice? 107 | // A panicking task should be marked as failed, when it's panicking 108 | // the first handleError is skipped. 109 | if !task.IsStatusProcessing() { 110 | return nil 111 | } 112 | 113 | if err == nil { 114 | task.MarkAsSucceeded() 115 | 116 | c.logger.Debug(ctx, "Task marked as succeeded", logging.Object("task", task)) 117 | 118 | err = c.queue.Save(ctx, task) 119 | if err != nil { 120 | return errors.Wrapf(err, "unable to handle error %s", task) 121 | } 122 | 123 | return nil 124 | } 125 | 126 | c.tracer.Log(ctx, "Received an error when handling task", errors.Wrapf(err, "unable to handle task %s", task)) 127 | 128 | if errors.Cause(err) == ErrTaskCanceled { 129 | task.MarkAsCanceled() 130 | 131 | c.logger.Debug(ctx, "Task marked as canceled", logging.Object("task", task)) 132 | } else { 133 | task.MarkAsFailed(err) 134 | } 135 | 136 | if task.MaxRetries == 0 { 137 | c.logger.Debug(ctx, "Task marked as failed: no retry", logging.Object("task", task)) 138 | 139 | err = c.queue.Save(ctx, task) 140 | if err != nil { 141 | return errors.Wrapf(err, "unable to handle error %s", task) 142 | } 143 | 144 | return nil 145 | } 146 | 147 | task.MaxRetries -= 1 148 | task.ETA = task.RetryETA() 149 | 150 | c.logger.Debug(ctx, fmt.Sprintf("Task marked as failed: retrying in %s...", task.ETADisplay()), 151 | logging.Object("task", task)) 152 | 153 | err = c.queue.PublishTask(ctx, task) 154 | 155 | if err != nil { 156 | return errors.Wrapf(err, "unable to handle error %s", task) 157 | } 158 | 159 | return nil 160 | } 161 | 162 | func (c *consumer) handleRequest(next Handler) Handler { 163 | return HandlerFunc(func(req *Request) error { 164 | var ( 165 | ctx = req.Context() 166 | err = next.Handle(req) 167 | ) 168 | 169 | return c.handleError(ctx, req.Task, err) 170 | }) 171 | } 172 | 173 | func (c *consumer) Handle(r *Request) error { 174 | c.mu.Lock() 175 | defer c.mu.Unlock() 176 | 177 | var ( 178 | task = r.Task 179 | err error 180 | ) 181 | 182 | ctx := r.Context() 183 | 184 | c.logger.Debug(ctx, "Task received", logging.Object("task", task)) 185 | 186 | if task.IsStatusCanceled() { 187 | c.logger.Debug(ctx, "Task has been previously canceled", logging.Object("task", task)) 188 | } else { 189 | task.MarkAsProcessing() 190 | 191 | c.logger.Debug(ctx, "Task processing...", logging.Object("task", task)) 192 | 193 | err = c.queue.Save(ctx, task) 194 | if err != nil { 195 | return err 196 | } 197 | 198 | err = c.queue.fireEvents(r) 199 | if err != nil { 200 | return err 201 | } 202 | 203 | err = c.handleTask(ctx, r) 204 | 205 | err = c.handleError(ctx, task, err) 206 | if err != nil { 207 | return err 208 | } 209 | 210 | funcs := GetContextAfterRequestFuncs(r.Context()) 211 | for i := range funcs { 212 | funcs[i]() 213 | } 214 | } 215 | 216 | return c.queue.fireEvents(r) 217 | } 218 | 219 | func (c *consumer) start(ctx context.Context) { 220 | c.wg.Add(1) 221 | 222 | c.consume(ctx) 223 | 224 | c.logger.Debug(ctx, fmt.Sprintf("Started %s", c)) 225 | } 226 | 227 | func (c *consumer) isClosed() bool { 228 | c.mu.Lock() 229 | closed := c.closed 230 | defer c.mu.Unlock() 231 | 232 | return closed 233 | } 234 | 235 | func (c *consumer) consume(ctx context.Context) { 236 | go func() { 237 | for { 238 | ctx := context.Background() 239 | 240 | if c.isClosed() { 241 | return 242 | } 243 | 244 | tasks, err := c.queue.Consume(ctx) 245 | if err != nil { 246 | c.tracer.Log(ctx, "Receive error from publisher", err) 247 | } 248 | 249 | if len(tasks) == 0 { 250 | continue 251 | } 252 | 253 | c.logger.Debug(ctx, "Received tasks to consume", logging.Int("tasks_count", len(tasks))) 254 | 255 | for i := range tasks { 256 | task := tasks[i] 257 | 258 | req := &Request{Task: task} 259 | err = c.Handle(req.WithContext(WithContextTask(req.Context(), task))) 260 | if err != nil { 261 | c.tracer.Log(ctx, "Receive error when handling", err) 262 | } 263 | } 264 | } 265 | }() 266 | } 267 | 268 | var _ Handler = (*consumer)(nil) 269 | -------------------------------------------------------------------------------- /bokchoy.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "os" 8 | "os/user" 9 | "strings" 10 | "sync" 11 | 12 | "github.com/redis/go-redis/v9" 13 | "github.com/thoas/bokchoy/logging" 14 | "github.com/thoas/go-funk" 15 | 16 | "github.com/pkg/errors" 17 | ) 18 | 19 | // Bokchoy is the main object which stores all configuration, queues 20 | // and broker. 21 | type Bokchoy struct { 22 | cfg Config 23 | wg *sync.WaitGroup 24 | defaultOptions *Options 25 | broker Broker 26 | queues map[string]*Queue 27 | middlewares []func(Handler) Handler 28 | servers []Server 29 | 30 | Serializer Serializer 31 | Logger logging.Logger 32 | Tracer Tracer 33 | } 34 | 35 | // New initializes a new Bokchoy instance. 36 | func New(ctx context.Context, cfg Config, options ...Option) (*Bokchoy, error) { 37 | opts := newOptions() 38 | for i := range options { 39 | options[i](opts) 40 | } 41 | 42 | var ( 43 | err error 44 | tracer Tracer 45 | ) 46 | 47 | logger := logging.NewNopLogger() 48 | if opts.Logger != nil { 49 | logger = opts.Logger 50 | } 51 | 52 | tracer = opts.Tracer 53 | if tracer == nil { 54 | tracer = NewLoggerTracer(logger) 55 | } 56 | 57 | wg := &sync.WaitGroup{} 58 | bok := &Bokchoy{ 59 | cfg: cfg, 60 | Serializer: newSerializer(cfg.Serializer), 61 | queues: make(map[string]*Queue), 62 | wg: wg, 63 | Logger: logger, 64 | Tracer: tracer, 65 | defaultOptions: opts, 66 | servers: opts.Servers, 67 | } 68 | 69 | if opts.Serializer != nil { 70 | bok.Serializer = opts.Serializer 71 | } 72 | 73 | if opts.Broker != nil { 74 | bok.broker = opts.Broker 75 | } else { 76 | bok.broker = newBroker(cfg.Broker, 77 | logger.With(logging.String("component", "broker"))) 78 | } 79 | 80 | if opts.Initialize { 81 | bok.Logger.Debug(ctx, fmt.Sprintf("Connecting to %s...", bok.broker)) 82 | 83 | err = bok.broker.Initialize(ctx) 84 | if err != nil { 85 | return nil, errors.Wrap(err, "unable to initialize broker") 86 | } 87 | 88 | bok.Logger.Debug(ctx, fmt.Sprintf("Connected to %s", bok.broker)) 89 | } 90 | 91 | for i := range cfg.Queues { 92 | bok.Queue(cfg.Queues[i].Name) 93 | } 94 | 95 | return bok, nil 96 | } 97 | 98 | // NewDefault initializes a new Bokchoy instance for the most common scenario. 99 | func NewDefault(ctx context.Context, redisURL string) (*Bokchoy, error) { 100 | opt, err := redis.ParseURL(redisURL) 101 | if err != nil { 102 | return nil, errors.Wrap(err, "unable to parse redis url") 103 | } 104 | return New(ctx, Config{ 105 | Broker: BrokerConfig{ 106 | Type: "redis", 107 | Redis: RedisConfig{ 108 | Type: "client", 109 | Client: RedisClientConfig{ 110 | Addr: opt.Addr, 111 | Password: opt.Password, 112 | DB: opt.DB, 113 | }, 114 | }, 115 | }, 116 | }) 117 | } 118 | 119 | // Use append a new middleware to the system. 120 | func (b *Bokchoy) Use(sub ...func(Handler) Handler) *Bokchoy { 121 | b.middlewares = append(b.middlewares, sub...) 122 | 123 | return b 124 | } 125 | 126 | // Empty empties initialized queues. 127 | func (b *Bokchoy) Empty(ctx context.Context) error { 128 | for i := range b.queues { 129 | err := b.queues[i].Empty(ctx) 130 | if err != nil { 131 | return err 132 | } 133 | } 134 | 135 | return nil 136 | } 137 | 138 | // Flush flushes data of the entire system. 139 | func (b *Bokchoy) Flush(ctx context.Context) error { 140 | return b.broker.Flush(ctx) 141 | } 142 | 143 | // Queue gets or creates a new queue. 144 | func (b *Bokchoy) Queue(name string) *Queue { 145 | queue, ok := b.queues[name] 146 | if !ok { 147 | queue = &Queue{ 148 | name: name, 149 | broker: b.broker, 150 | serializer: b.Serializer, 151 | logger: b.Logger.With(logging.String("component", "queue")), 152 | tracer: b.Tracer, 153 | wg: b.wg, 154 | defaultOptions: b.defaultOptions, 155 | middlewares: b.middlewares, 156 | } 157 | 158 | b.queues[name] = queue 159 | } 160 | 161 | return queue 162 | } 163 | 164 | // Stop stops all queues and consumers. 165 | func (b *Bokchoy) Stop(ctx context.Context) { 166 | fields := []logging.Field{ 167 | logging.String("queues", strings.Join(b.QueueNames(), ", ")), 168 | } 169 | 170 | b.Logger.Debug(ctx, "Stopping queues...", fields...) 171 | for i := range b.queues { 172 | b.queues[i].stop(ctx) 173 | } 174 | b.Logger.Debug(ctx, "Queues stopped", fields...) 175 | 176 | if len(b.servers) == 0 { 177 | return 178 | } 179 | 180 | fields = []logging.Field{ 181 | logging.String("servers", strings.Join(b.ServerNames(), ", ")), 182 | } 183 | 184 | b.Logger.Debug(ctx, "Stopping servers...", fields...) 185 | for i := range b.servers { 186 | b.servers[i].Stop(ctx) 187 | 188 | b.wg.Done() 189 | } 190 | b.Logger.Debug(ctx, "Servers stopped", fields...) 191 | } 192 | 193 | // QueueNames returns the managed queue names. 194 | func (b *Bokchoy) QueueNames() []string { 195 | names := make([]string, 0, len(b.queues)) 196 | 197 | for k := range b.queues { 198 | names = append(names, k) 199 | } 200 | 201 | return names 202 | } 203 | 204 | // ServerNames returns the managed server names. 205 | func (b *Bokchoy) ServerNames() []string { 206 | names := make([]string, 0, len(b.servers)) 207 | 208 | for i := range b.servers { 209 | names = append(names, fmt.Sprintf("%s", b.servers[i])) 210 | } 211 | 212 | return names 213 | } 214 | 215 | func (b *Bokchoy) displayOutput(ctx context.Context, queueNames []string) { 216 | buf := NewColorWriter(ColorBrightGreen) 217 | buf.Write("%s\n", logo) 218 | buf = buf.WithColor(ColorBrightBlue) 219 | 220 | user, err := user.Current() 221 | if err == nil { 222 | hostname, err := os.Hostname() 223 | if err == nil { 224 | buf.Write("%s@%s %v\n", user.Username, hostname, Version) 225 | buf.Write("- uid: %s\n", user.Uid) 226 | buf.Write("- gid: %s\n\n", user.Gid) 227 | } 228 | } 229 | 230 | buf.Write("[config]\n") 231 | buf.Write(fmt.Sprintf("- concurrency: %d\n", b.defaultOptions.Concurrency)) 232 | buf.Write(fmt.Sprintf("- serializer: %s\n", b.Serializer)) 233 | buf.Write(fmt.Sprintf("- max retries: %d\n", b.defaultOptions.MaxRetries)) 234 | buf.Write(fmt.Sprintf("- retry intervals: %s\n", b.defaultOptions.RetryIntervalsDisplay())) 235 | buf.Write(fmt.Sprintf("- ttl: %s\n", b.defaultOptions.TTL)) 236 | buf.Write(fmt.Sprintf("- countdown: %s\n", b.defaultOptions.Countdown)) 237 | buf.Write(fmt.Sprintf("- timeout: %s\n", b.defaultOptions.Timeout)) 238 | buf.Write(fmt.Sprintf("- tracer: %s\n", b.Tracer)) 239 | buf.Write(fmt.Sprintf("- broker: %s\n", b.broker)) 240 | buf.Write("\n[queues]\n") 241 | 242 | for i := range queueNames { 243 | buf.Write(fmt.Sprintf("- %s\n", queueNames[i])) 244 | } 245 | 246 | if len(b.servers) > 0 { 247 | buf.Write("\n[servers]\n") 248 | 249 | for i := range b.servers { 250 | buf.Write(fmt.Sprintf("- %s", b.servers[i])) 251 | } 252 | } 253 | 254 | log.Print(buf) 255 | } 256 | 257 | // Run runs the system and block the current goroutine. 258 | func (b *Bokchoy) Run(ctx context.Context, options ...Option) error { 259 | opts := newOptions() 260 | for i := range options { 261 | options[i](opts) 262 | } 263 | 264 | if len(opts.Servers) > 0 { 265 | b.servers = opts.Servers 266 | } 267 | 268 | err := b.broker.Ping(ctx) 269 | if err != nil { 270 | return err 271 | } 272 | 273 | queueNames := b.QueueNames() 274 | if len(opts.Queues) > 0 { 275 | queueNames = funk.FilterString(queueNames, func(queueName string) bool { 276 | return funk.InStrings(opts.Queues, queueName) 277 | }) 278 | } 279 | 280 | if len(queueNames) == 0 { 281 | b.Logger.Debug(ctx, "No queue to run...") 282 | 283 | return ErrNoQueueToRun 284 | } 285 | 286 | fields := []logging.Field{ 287 | logging.String("queues", strings.Join(queueNames, ", ")), 288 | } 289 | 290 | b.Logger.Debug(ctx, "Starting queues...", fields...) 291 | 292 | for i := range b.queues { 293 | if !funk.InStrings(queueNames, b.queues[i].Name()) { 294 | continue 295 | } 296 | 297 | b.queues[i].start(ctx) 298 | } 299 | 300 | b.Logger.Debug(ctx, "Queues started", fields...) 301 | 302 | fields = []logging.Field{ 303 | logging.String("servers", strings.Join(b.ServerNames(), ", ")), 304 | } 305 | 306 | if len(b.servers) > 0 { 307 | b.Logger.Debug(ctx, "Starting servers...", fields...) 308 | 309 | for i := range b.servers { 310 | b.wg.Add(1) 311 | 312 | go func(server Server) { 313 | err := server.Start(ctx) 314 | if err != nil { 315 | b.Logger.Error(ctx, fmt.Sprintf("Receive error when starting %s", server), logging.Error(err)) 316 | } 317 | }(b.servers[i]) 318 | } 319 | 320 | b.Logger.Debug(ctx, "Servers started", fields...) 321 | } 322 | 323 | if !b.defaultOptions.DisableOutput { 324 | b.displayOutput(ctx, queueNames) 325 | } 326 | 327 | b.wg.Wait() 328 | 329 | return nil 330 | } 331 | 332 | // Publish publishes a new payload to a queue. 333 | func (b *Bokchoy) Publish(ctx context.Context, queueName string, payload interface{}, options ...Option) (*Task, error) { 334 | return b.Queue(queueName).Publish(ctx, payload, options...) 335 | } 336 | 337 | // Handle registers a new handler to consume tasks for a queue. 338 | func (b *Bokchoy) Handle(queueName string, sub Handler, options ...Option) { 339 | b.HandleFunc(queueName, sub.Handle, options...) 340 | } 341 | 342 | // HandleFunc registers a new handler function to consume tasks for a queue. 343 | func (b *Bokchoy) HandleFunc(queueName string, f HandlerFunc, options ...Option) { 344 | b.Queue(queueName).HandleFunc(f, options...) 345 | } 346 | -------------------------------------------------------------------------------- /task.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | "time" 8 | 9 | "github.com/pkg/errors" 10 | 11 | "github.com/thoas/bokchoy/logging" 12 | ) 13 | 14 | const ( 15 | // Task statuses 16 | taskStatusWaiting int = iota 17 | taskStatusProcessing 18 | taskStatusSucceeded 19 | taskStatusFailed 20 | taskStatusCanceled 21 | ) 22 | 23 | // Task is the model stored in a Queue. 24 | type Task struct { 25 | ID string 26 | Name string 27 | PublishedAt time.Time 28 | StartedAt time.Time 29 | ProcessedAt time.Time 30 | Status int 31 | OldStatus int 32 | MaxRetries int 33 | Payload interface{} 34 | Result interface{} 35 | Error interface{} 36 | ExecTime float64 37 | TTL time.Duration 38 | Timeout time.Duration 39 | ETA time.Time 40 | RetryIntervals []time.Duration 41 | } 42 | 43 | // NewTask initializes a new Task. 44 | func NewTask(name string, payload interface{}, options ...Option) *Task { 45 | opts := newOptions() 46 | for i := range options { 47 | options[i](opts) 48 | } 49 | 50 | t := &Task{ 51 | ID: ID(), 52 | Name: name, 53 | Payload: payload, 54 | Status: taskStatusWaiting, 55 | PublishedAt: time.Now().UTC(), 56 | } 57 | 58 | t.MaxRetries = opts.MaxRetries 59 | t.TTL = opts.TTL 60 | 61 | return t 62 | } 63 | 64 | // TaskFromPayload returns a Task instance from raw data. 65 | func TaskFromPayload(data map[string]interface{}, serializer Serializer) (*Task, error) { 66 | var ok bool 67 | var err error 68 | 69 | t := &Task{} 70 | t.ID, err = mapString(data, "id", false) 71 | if err != nil { 72 | return nil, err 73 | } 74 | 75 | t.Name, err = mapString(data, "name", false) 76 | if err != nil { 77 | return nil, err 78 | } 79 | 80 | t.Status, err = mapInt(data, "status", false) 81 | if err != nil { 82 | return nil, err 83 | } 84 | 85 | t.OldStatus = t.Status 86 | 87 | t.PublishedAt, err = mapTime(data, "published_at", false) 88 | if err != nil { 89 | return nil, err 90 | } 91 | 92 | t.ProcessedAt, err = mapTime(data, "processed_at", true) 93 | if err != nil { 94 | return nil, err 95 | } 96 | 97 | t.StartedAt, err = mapTime(data, "started_at", true) 98 | if err != nil { 99 | return nil, err 100 | } 101 | 102 | t.ETA, err = mapTime(data, "eta", true) 103 | if err != nil { 104 | return nil, err 105 | } 106 | 107 | t.Timeout, err = mapDuration(data, "timeout", true) 108 | if err != nil { 109 | return nil, err 110 | } 111 | 112 | payload, err := mapString(data, "payload", false) 113 | if err != nil { 114 | return nil, err 115 | } 116 | 117 | t.MaxRetries, err = mapInt(data, "max_retries", false) 118 | if err != nil { 119 | return nil, err 120 | } 121 | 122 | t.TTL, err = mapDuration(data, "ttl", false) 123 | if err != nil { 124 | return nil, err 125 | } 126 | 127 | t.ExecTime, err = mapFloat(data, "exec_time", true) 128 | if err != nil { 129 | return nil, err 130 | } 131 | 132 | rawRetryIntervals, err := mapString(data, "retry_intervals", true) 133 | if err != nil { 134 | return nil, err 135 | } 136 | 137 | if rawRetryIntervals != "" { 138 | strRetryIntervals := strings.Split(rawRetryIntervals, ",") 139 | t.RetryIntervals = make([]time.Duration, len(strRetryIntervals)) 140 | 141 | for i := range strRetryIntervals { 142 | value, err := strconv.ParseInt(strRetryIntervals[i], 10, 64) 143 | if err != nil { 144 | return nil, errors.Wrapf(ErrAttributeError, "cannot parse %s retry interval to integer", strRetryIntervals[i]) 145 | } 146 | 147 | t.RetryIntervals[i] = time.Duration(value) * time.Second 148 | } 149 | } 150 | 151 | err = serializer.Loads([]byte(payload), &t.Payload) 152 | if err != nil { 153 | return nil, errors.Wrapf(ErrAttributeError, "cannot unserialize `payload`") 154 | } 155 | 156 | rawError, ok := data["error"].(string) 157 | if ok { 158 | err = serializer.Loads([]byte(rawError), &t.Error) 159 | 160 | if err != nil { 161 | return nil, errors.Wrapf(ErrAttributeError, "cannot unserialize `error`") 162 | } 163 | } 164 | 165 | rawResult, ok := data["result"].(string) 166 | if ok { 167 | err = serializer.Loads([]byte(rawResult), &t.Result) 168 | 169 | if err != nil { 170 | return nil, errors.Wrapf(ErrAttributeError, "cannot unserialize `result`") 171 | } 172 | } 173 | 174 | return t, nil 175 | } 176 | 177 | // ETADisplay returns the string representation of the ETA. 178 | func (t Task) ETADisplay() string { 179 | if t.ETA.IsZero() { 180 | return "0s" 181 | } 182 | 183 | return t.ETA.Sub(time.Now().UTC()).String() 184 | } 185 | 186 | // RetryETA returns the next ETA. 187 | func (t Task) RetryETA() time.Time { 188 | if t.MaxRetries >= len(t.RetryIntervals) { 189 | return t.ETA 190 | } 191 | 192 | if len(t.RetryIntervals) > 0 { 193 | intervals := reverseDurations(t.RetryIntervals) 194 | 195 | if len(intervals) > t.MaxRetries { 196 | return time.Now().UTC().Add(intervals[t.MaxRetries]) 197 | } 198 | 199 | return time.Now().UTC().Add(intervals[0]) 200 | } 201 | 202 | return time.Time{} 203 | } 204 | 205 | // MarshalLogObject returns the log representation for the task. 206 | func (t Task) MarshalLogObject(enc logging.ObjectEncoder) error { 207 | enc.AddString("id", t.ID) 208 | enc.AddString("name", t.Name) 209 | enc.AddString("status", t.StatusDisplay()) 210 | enc.AddString("payload", fmt.Sprintf("%v", t.Payload)) 211 | enc.AddInt("max_retries", t.MaxRetries) 212 | enc.AddDuration("ttl", t.TTL) 213 | enc.AddDuration("timeout", t.Timeout) 214 | enc.AddTime("published_at", t.PublishedAt) 215 | 216 | if !t.StartedAt.IsZero() { 217 | enc.AddTime("started_at", t.StartedAt) 218 | } 219 | 220 | if !t.ProcessedAt.IsZero() { 221 | enc.AddTime("processed_at", t.ProcessedAt) 222 | enc.AddDuration("duration", t.ProcessedAt.Sub(t.StartedAt)) 223 | } 224 | 225 | if !t.ETA.IsZero() { 226 | enc.AddTime("eta", t.ETA) 227 | } 228 | 229 | if t.ExecTime != 0 { 230 | enc.AddFloat64("exec_time", t.ExecTime) 231 | } 232 | 233 | if len(t.RetryIntervals) > 0 { 234 | enc.AddString("retry_intervals", t.RetryIntervalsDisplay()) 235 | } 236 | 237 | return nil 238 | } 239 | 240 | // RetryIntervalsDisplay returns the string representation of the retry intervals. 241 | func (t Task) RetryIntervalsDisplay() string { 242 | intervals := make([]string, len(t.RetryIntervals)) 243 | for i := range t.RetryIntervals { 244 | intervals[i] = t.RetryIntervals[i].String() 245 | } 246 | 247 | return strings.Join(intervals, ", ") 248 | } 249 | 250 | // String returns the string representation of Task. 251 | func (t Task) String() string { 252 | return fmt.Sprintf( 253 | "", 254 | t.Name, t.ID, t.StatusDisplay(), t.PublishedAt.String(), 255 | ) 256 | } 257 | 258 | // StatusDisplay returns the status in human representation. 259 | func (t Task) StatusDisplay() string { 260 | switch t.Status { 261 | case taskStatusSucceeded: 262 | return "succeeded" 263 | case taskStatusProcessing: 264 | return "processing" 265 | case taskStatusFailed: 266 | return "failed" 267 | case taskStatusCanceled: 268 | return "canceled" 269 | } 270 | 271 | return "waiting" 272 | } 273 | 274 | // Serialize serializes a Task to raw data. 275 | func (t Task) Serialize(serializer Serializer) (map[string]interface{}, error) { 276 | var err error 277 | 278 | data := map[string]interface{}{ 279 | "id": t.ID, 280 | "name": t.Name, 281 | "status": t.Status, 282 | "published_at": t.PublishedAt.Unix(), 283 | "max_retries": t.MaxRetries, 284 | "ttl": int(t.TTL.Seconds()), 285 | "timeout": int(t.Timeout.Seconds()), 286 | } 287 | 288 | if !t.ETA.IsZero() { 289 | data["eta"] = t.ETA.Unix() 290 | } 291 | 292 | if !t.ProcessedAt.IsZero() { 293 | data["processed_at"] = t.ProcessedAt.Unix() 294 | } 295 | 296 | if !t.StartedAt.IsZero() { 297 | data["started_at"] = t.StartedAt.Unix() 298 | } 299 | 300 | if t.Payload != nil { 301 | payload, err := serializer.Dumps(t.Payload) 302 | if err != nil { 303 | return nil, err 304 | } 305 | 306 | data["payload"] = string(payload) 307 | } 308 | 309 | if t.Result != nil { 310 | result, err := serializer.Dumps(t.Result) 311 | if err != nil { 312 | return nil, err 313 | } 314 | 315 | data["result"] = string(result) 316 | } 317 | 318 | if t.Error != nil { 319 | rawErr, ok := t.Error.(error) 320 | if ok { 321 | data["error"], err = serializer.Dumps(rawErr.Error()) 322 | if err != nil { 323 | return nil, err 324 | } 325 | } 326 | } 327 | 328 | if t.ExecTime != 0 { 329 | data["exec_time"] = t.ExecTime 330 | } 331 | 332 | if len(t.RetryIntervals) > 0 { 333 | intervals := make([]string, len(t.RetryIntervals)) 334 | for i := range t.RetryIntervals { 335 | intervals[i] = fmt.Sprintf("%d", int(t.RetryIntervals[i].Seconds())) 336 | } 337 | 338 | data["retry_intervals"] = strings.Join(intervals, ",") 339 | } 340 | 341 | return data, err 342 | } 343 | 344 | // Key returns the task key. 345 | func (t Task) Key() string { 346 | return fmt.Sprintf("%s:%s", t.Name, t.ID) 347 | } 348 | 349 | // MarkAsProcessing marks a task as processing. 350 | func (t *Task) MarkAsProcessing() { 351 | t.StartedAt = time.Now().UTC() 352 | t.Status = taskStatusProcessing 353 | } 354 | 355 | // Finished returns if a task is finished or not. 356 | func (t *Task) Finished() bool { 357 | if t.OldStatus == taskStatusSucceeded { 358 | return true 359 | } 360 | 361 | if (t.OldStatus == taskStatusFailed || t.Status == taskStatusFailed) && t.MaxRetries == 0 { 362 | return true 363 | } 364 | 365 | if t.Status == taskStatusSucceeded { 366 | return true 367 | } 368 | 369 | return false 370 | } 371 | 372 | // IsStatusWaiting returns if the task status is waiting. 373 | func (t *Task) IsStatusWaiting() bool { 374 | return t.Status == taskStatusWaiting 375 | } 376 | 377 | // IsStatusSucceeded returns if the task status is succeeded. 378 | func (t *Task) IsStatusSucceeded() bool { 379 | return t.Status == taskStatusSucceeded 380 | } 381 | 382 | // IsStatusProcessing returns if the task status is processing. 383 | func (t *Task) IsStatusProcessing() bool { 384 | return t.Status == taskStatusProcessing 385 | } 386 | 387 | // IsStatusFailed returns if the task status is failed. 388 | func (t *Task) IsStatusFailed() bool { 389 | return t.Status == taskStatusFailed 390 | } 391 | 392 | // IsStatusCanceled returns if the task status is canceled. 393 | func (t *Task) IsStatusCanceled() bool { 394 | return t.Status == taskStatusCanceled 395 | } 396 | 397 | // MarkAsSucceeded marks a task as succeeded. 398 | func (t *Task) MarkAsSucceeded() { 399 | t.ProcessedAt = time.Now().UTC() 400 | t.Status = taskStatusSucceeded 401 | t.ExecTime = t.ProcessedAt.Sub(t.StartedAt).Seconds() 402 | } 403 | 404 | // MarkAsFailed marks a task as failed. 405 | func (t *Task) MarkAsFailed(err error) { 406 | t.ProcessedAt = time.Now().UTC() 407 | t.Status = taskStatusFailed 408 | if err != nil { 409 | t.Error = err 410 | } 411 | t.ExecTime = t.ProcessedAt.Sub(t.StartedAt).Seconds() 412 | } 413 | 414 | // MarkAsCanceled marks a task as canceled. 415 | func (t *Task) MarkAsCanceled() { 416 | t.ProcessedAt = time.Now().UTC() 417 | t.Status = taskStatusCanceled 418 | } 419 | -------------------------------------------------------------------------------- /queue.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math/rand" 7 | "sync" 8 | "time" 9 | 10 | "github.com/thoas/bokchoy/logging" 11 | 12 | "github.com/pkg/errors" 13 | ) 14 | 15 | // Queue contains consumers to enqueue. 16 | type Queue struct { 17 | broker Broker 18 | 19 | name string 20 | serializer Serializer 21 | logger logging.Logger 22 | tracer Tracer 23 | consumers []*consumer 24 | defaultOptions *Options 25 | wg *sync.WaitGroup 26 | middlewares []func(Handler) Handler 27 | onFailure []Handler 28 | onSuccess []Handler 29 | onComplete []Handler 30 | onStart []Handler 31 | } 32 | 33 | // Use appends a new handler middleware to the queue. 34 | func (q *Queue) Use(sub ...func(Handler) Handler) *Queue { 35 | q.middlewares = append(q.middlewares, sub...) 36 | 37 | return q 38 | } 39 | 40 | // OnStart registers a new handler to be executed when a task is started. 41 | func (q *Queue) OnStart(sub Handler) *Queue { 42 | q.OnStartFunc(sub.Handle) 43 | 44 | return q 45 | } 46 | 47 | // OnStartFunc registers a new handler function to be executed when a task is started. 48 | func (q *Queue) OnStartFunc(f HandlerFunc) *Queue { 49 | q.onStart = append(q.onStart, f) 50 | 51 | return q 52 | } 53 | 54 | // OnComplete registers a new handler to be executed when a task is completed. 55 | func (q *Queue) OnComplete(sub Handler) *Queue { 56 | q.OnCompleteFunc(sub.Handle) 57 | 58 | return q 59 | } 60 | 61 | // OnCompleteFunc registers a new handler function to be executed when a task is completed. 62 | func (q *Queue) OnCompleteFunc(f HandlerFunc) *Queue { 63 | q.onComplete = append(q.onComplete, f) 64 | 65 | return q 66 | } 67 | 68 | // OnFailure registers a new handler to be executed when a task is failed. 69 | func (q *Queue) OnFailure(sub Handler) *Queue { 70 | return q.OnFailureFunc(sub.Handle) 71 | } 72 | 73 | // OnFailureFunc registers a new handler function to be executed when a task is failed. 74 | func (q *Queue) OnFailureFunc(f HandlerFunc) *Queue { 75 | q.onFailure = append(q.onFailure, f) 76 | 77 | return q 78 | } 79 | 80 | // OnSuccess registers a new handler to be executed when a task is succeeded. 81 | func (q *Queue) OnSuccess(sub Handler) *Queue { 82 | return q.OnSuccessFunc(sub.Handle) 83 | } 84 | 85 | // OnSuccessFunc registers a new handler function to be executed when a task is succeeded. 86 | func (q *Queue) OnSuccessFunc(f HandlerFunc) *Queue { 87 | q.onSuccess = append(q.onSuccess, f) 88 | 89 | return q 90 | } 91 | 92 | // Name returns the queue name. 93 | func (q Queue) Name() string { 94 | return q.name 95 | } 96 | 97 | // Handle registers a new handler to consume tasks. 98 | func (q *Queue) Handle(sub Handler, options ...Option) *Queue { 99 | return q.HandleFunc(sub.Handle, options...) 100 | } 101 | 102 | // HandleFunc registers a new handler function to consume tasks. 103 | func (q *Queue) HandleFunc(f HandlerFunc, options ...Option) *Queue { 104 | opts := q.defaultOptions 105 | 106 | if len(options) > 0 { 107 | opts = newOptions() 108 | for i := range options { 109 | options[i](opts) 110 | } 111 | } 112 | 113 | for i := 0; i < opts.Concurrency; i++ { 114 | consumerName := fmt.Sprintf("consumer:%s#%d", q.name, i+1) 115 | 116 | consumer := &consumer{ 117 | name: consumerName, 118 | handler: f, 119 | queue: q, 120 | serializer: q.serializer, 121 | logger: q.logger.With(logging.String("component", consumerName)), 122 | tracer: q.tracer, 123 | wg: q.wg, 124 | mu: &sync.Mutex{}, 125 | middlewares: q.middlewares, 126 | } 127 | q.consumers = append(q.consumers, consumer) 128 | } 129 | 130 | return q 131 | } 132 | 133 | // start starts consumers. 134 | func (q *Queue) start(ctx context.Context) { 135 | q.logger.Debug(ctx, "Starting consumers...", 136 | logging.Object("queue", q)) 137 | 138 | for i := range q.consumers { 139 | q.consumers[i].start(ctx) 140 | } 141 | 142 | q.logger.Debug(ctx, "Consumers started", 143 | logging.Object("queue", q)) 144 | } 145 | 146 | // Empty empties queue. 147 | func (q *Queue) Empty(ctx context.Context) error { 148 | queueNames := []string{q.name} 149 | 150 | q.logger.Debug(ctx, "Emptying queue...", 151 | logging.Object("queue", q)) 152 | 153 | for i := range queueNames { 154 | err := q.broker.Empty(ctx, queueNames[i]) 155 | if err != nil { 156 | return errors.Wrapf(err, "unable to empty queue %s", queueNames[i]) 157 | } 158 | } 159 | 160 | q.logger.Debug(ctx, "Queue emptied", 161 | logging.Object("queue", q)) 162 | 163 | return nil 164 | } 165 | 166 | // MarshalLogObject returns the log representation for the queue. 167 | func (q Queue) MarshalLogObject(enc logging.ObjectEncoder) error { 168 | enc.AddString("name", q.name) 169 | enc.AddInt("consumers_count", len(q.consumers)) 170 | 171 | return nil 172 | } 173 | 174 | // stop stops consumers. 175 | func (q *Queue) stop(ctx context.Context) { 176 | q.logger.Debug(ctx, "Stopping consumers...", 177 | logging.Object("queue", q)) 178 | 179 | for i := range q.consumers { 180 | q.consumers[i].stop(ctx) 181 | } 182 | 183 | q.logger.Debug(ctx, "Consumers stopped", 184 | logging.Object("queue", q)) 185 | } 186 | 187 | // taskKey returns the task key prefixed by the queue name. 188 | func (q Queue) taskKey(taskID string) string { 189 | return fmt.Sprintf("%s:%s", q.name, taskID) 190 | } 191 | 192 | // Cancel cancels a task using its ID. 193 | func (q *Queue) Cancel(ctx context.Context, taskID string) (*Task, error) { 194 | task, err := q.Get(ctx, taskID) 195 | if err != nil { 196 | return nil, err 197 | } 198 | 199 | task.MarkAsCanceled() 200 | 201 | err = q.Save(ctx, task) 202 | if err != nil { 203 | return nil, err 204 | } 205 | 206 | return task, nil 207 | } 208 | 209 | // List returns tasks from the broker. 210 | func (q *Queue) List(ctx context.Context) ([]*Task, error) { 211 | results, err := q.broker.List(ctx, q.name) 212 | if err != nil { 213 | return nil, err 214 | } 215 | 216 | return q.payloadsToTasks(ctx, results), nil 217 | } 218 | 219 | // Get returns a task instance from the broker with its id. 220 | func (q *Queue) Get(ctx context.Context, taskID string) (*Task, error) { 221 | start := time.Now() 222 | 223 | taskKey := q.taskKey(taskID) 224 | results, err := q.broker.Get(ctx, taskKey) 225 | if err != nil { 226 | return nil, err 227 | } 228 | if results == nil { 229 | return nil, ErrTaskNotFound 230 | } 231 | 232 | task, err := TaskFromPayload(results, q.serializer) 233 | if err != nil { 234 | return nil, err 235 | } 236 | 237 | q.logger.Debug(ctx, "Task retrieved", 238 | logging.Object("queue", q), 239 | logging.Duration("duration", time.Since(start)), 240 | logging.Object("task", task)) 241 | 242 | return task, err 243 | } 244 | 245 | // Count returns statistics from queue: 246 | // * direct: number of waiting tasks 247 | // * delayed: number of waiting delayed tasks 248 | // * total: number of total tasks 249 | func (q *Queue) Count(ctx context.Context) (BrokerStats, error) { 250 | return q.broker.Count(ctx, q.name) 251 | } 252 | 253 | // Consume returns an array of tasks. 254 | func (q *Queue) Consume(ctx context.Context) ([]*Task, error) { 255 | results, err := q.broker.Consume(ctx, q.name, time.Time{}) 256 | if err != nil { 257 | return nil, err 258 | } 259 | 260 | return q.payloadsToTasks(ctx, results), nil 261 | } 262 | 263 | func (q *Queue) payloadsToTasks(ctx context.Context, results []map[string]interface{}) []*Task { 264 | tasks := make([]*Task, 0, len(results)) 265 | 266 | for i := range results { 267 | task, err := TaskFromPayload(results[i], q.serializer) 268 | if err != nil { 269 | q.tracer.Log(ctx, "Receive error when casting payload to Task", err) 270 | continue 271 | } 272 | 273 | tasks = append(tasks, task) 274 | } 275 | 276 | return tasks 277 | } 278 | 279 | // Consumer returns a random consumer. 280 | func (q *Queue) Consumer() *consumer { 281 | rand.Seed(time.Now().Unix()) 282 | 283 | n := rand.Int() % len(q.consumers) 284 | 285 | return q.consumers[n] 286 | } 287 | 288 | func (q *Queue) fireEvents(r *Request) error { 289 | task := r.Task 290 | 291 | if task.IsStatusProcessing() { 292 | for i := range q.onStart { 293 | err := q.onStart[i].Handle(r) 294 | if err != nil { 295 | return errors.Wrapf(err, "unable to handle onStart %s", r) 296 | } 297 | } 298 | } 299 | 300 | if task.IsStatusSucceeded() { 301 | for i := range q.onSuccess { 302 | err := q.onSuccess[i].Handle(r) 303 | if err != nil { 304 | return errors.Wrapf(err, "unable to handle onSuccess %s", r) 305 | } 306 | } 307 | } 308 | 309 | if task.IsStatusFailed() || task.IsStatusCanceled() { 310 | for i := range q.onFailure { 311 | err := q.onFailure[i].Handle(r) 312 | if err != nil { 313 | return errors.Wrapf(err, "unable to handle onFailure %s", r) 314 | } 315 | } 316 | } 317 | 318 | if task.Finished() { 319 | for i := range q.onComplete { 320 | err := q.onComplete[i].Handle(r) 321 | if err != nil { 322 | return errors.Wrapf(err, "unable to handle onComplete %s", task) 323 | } 324 | } 325 | 326 | } 327 | 328 | return nil 329 | } 330 | 331 | // HandleRequest handles a request synchronously with a consumer. 332 | func (q *Queue) HandleRequest(ctx context.Context, r *Request) error { 333 | consumer := q.Consumer() 334 | 335 | return consumer.Handle(r) 336 | } 337 | 338 | // Save saves a task to the queue. 339 | func (q *Queue) Save(ctx context.Context, task *Task) error { 340 | var err error 341 | 342 | start := time.Now() 343 | 344 | data, err := task.Serialize(q.serializer) 345 | if err != nil { 346 | return err 347 | } 348 | 349 | if task.Finished() { 350 | err = q.broker.Set(ctx, task.Key(), data, task.TTL) 351 | } else { 352 | err = q.broker.Set(ctx, task.Key(), data, 0) 353 | } 354 | 355 | if err != nil { 356 | return errors.Wrapf(err, "unable to save %s", task) 357 | } 358 | 359 | q.logger.Debug(ctx, "Task saved", 360 | logging.Object("queue", q), 361 | logging.Duration("duration", time.Since(start)), 362 | logging.Object("task", task)) 363 | 364 | return nil 365 | } 366 | 367 | // NewTask returns a new task instance from payload and options. 368 | func (q *Queue) NewTask(payload interface{}, options ...Option) *Task { 369 | opts := q.defaultOptions 370 | 371 | if len(options) > 0 { 372 | opts = newOptions() 373 | for i := range options { 374 | options[i](opts) 375 | } 376 | } 377 | 378 | task := NewTask(q.name, payload) 379 | task.MaxRetries = opts.MaxRetries 380 | task.TTL = opts.TTL 381 | task.Timeout = opts.Timeout 382 | task.RetryIntervals = opts.RetryIntervals 383 | 384 | var eta time.Time 385 | 386 | if opts.Countdown != nil { 387 | eta = time.Now().Add(*opts.Countdown).UTC() 388 | } 389 | 390 | task.ETA = eta 391 | 392 | return task 393 | } 394 | 395 | // Publish publishes a new payload to the queue. 396 | func (q *Queue) Publish(ctx context.Context, payload interface{}, options ...Option) (*Task, error) { 397 | task := q.NewTask(payload, options...) 398 | 399 | err := q.PublishTask(ctx, task) 400 | if err != nil { 401 | return nil, err 402 | } 403 | 404 | return task, nil 405 | } 406 | 407 | // PublishTask publishes a new task to the queue. 408 | func (q *Queue) PublishTask(ctx context.Context, task *Task) error { 409 | data, err := task.Serialize(q.serializer) 410 | if err != nil { 411 | return err 412 | } 413 | 414 | start := time.Now() 415 | 416 | err = q.broker.Publish(ctx, q.name, task.ID, data, task.ETA) 417 | if err != nil { 418 | return errors.Wrapf(err, "unable to publish %s", task) 419 | } 420 | 421 | q.logger.Debug(ctx, "Task published", 422 | logging.Object("queue", q), 423 | logging.Duration("duration", time.Since(start)), 424 | logging.Object("task", task)) 425 | 426 | return nil 427 | } 428 | -------------------------------------------------------------------------------- /contrib/rpc/proto/bokchoy.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-gogo. DO NOT EDIT. 2 | // source: bokchoy.proto 3 | 4 | package proto 5 | 6 | import ( 7 | context "context" 8 | fmt "fmt" 9 | _ "github.com/gogo/protobuf/gogoproto" 10 | proto "github.com/gogo/protobuf/proto" 11 | _ "github.com/golang/protobuf/ptypes/duration" 12 | _ "github.com/golang/protobuf/ptypes/timestamp" 13 | wrappers "github.com/golang/protobuf/ptypes/wrappers" 14 | grpc "google.golang.org/grpc" 15 | math "math" 16 | time "time" 17 | ) 18 | 19 | // Reference imports to suppress errors if they are not otherwise used. 20 | var _ = proto.Marshal 21 | var _ = fmt.Errorf 22 | var _ = math.Inf 23 | var _ = time.Kitchen 24 | 25 | // This is a compile-time assertion to ensure that this generated file 26 | // is compatible with the proto package it is being compiled against. 27 | // A compilation error at this line likely means your copy of the 28 | // proto package needs to be updated. 29 | const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package 30 | 31 | type PublishTaskRequest struct { 32 | Queue string `protobuf:"bytes,1,opt,name=queue,proto3" json:"queue,omitempty"` 33 | Payload *wrappers.BytesValue `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` 34 | Countdown *time.Duration `protobuf:"bytes,3,opt,name=countdown,proto3,stdduration" json:"countdown,omitempty"` 35 | Timeout *time.Duration `protobuf:"bytes,4,opt,name=timeout,proto3,stdduration" json:"timeout,omitempty"` 36 | TTL *time.Duration `protobuf:"bytes,5,opt,name=ttl,proto3,stdduration" json:"ttl,omitempty"` 37 | MaxRetries *wrappers.Int64Value `protobuf:"bytes,6,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` 38 | RetryIntervals []*time.Duration `protobuf:"bytes,7,rep,name=retry_intervals,json=retryIntervals,proto3,stdduration" json:"retry_intervals,omitempty"` 39 | XXX_NoUnkeyedLiteral struct{} `json:"-"` 40 | XXX_unrecognized []byte `json:"-"` 41 | XXX_sizecache int32 `json:"-"` 42 | } 43 | 44 | func (m *PublishTaskRequest) Reset() { *m = PublishTaskRequest{} } 45 | func (m *PublishTaskRequest) String() string { return proto.CompactTextString(m) } 46 | func (*PublishTaskRequest) ProtoMessage() {} 47 | func (*PublishTaskRequest) Descriptor() ([]byte, []int) { 48 | return fileDescriptor_4e2b3c797ca0eafb, []int{0} 49 | } 50 | func (m *PublishTaskRequest) XXX_Unmarshal(b []byte) error { 51 | return xxx_messageInfo_PublishTaskRequest.Unmarshal(m, b) 52 | } 53 | func (m *PublishTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 54 | return xxx_messageInfo_PublishTaskRequest.Marshal(b, m, deterministic) 55 | } 56 | func (m *PublishTaskRequest) XXX_Merge(src proto.Message) { 57 | xxx_messageInfo_PublishTaskRequest.Merge(m, src) 58 | } 59 | func (m *PublishTaskRequest) XXX_Size() int { 60 | return xxx_messageInfo_PublishTaskRequest.Size(m) 61 | } 62 | func (m *PublishTaskRequest) XXX_DiscardUnknown() { 63 | xxx_messageInfo_PublishTaskRequest.DiscardUnknown(m) 64 | } 65 | 66 | var xxx_messageInfo_PublishTaskRequest proto.InternalMessageInfo 67 | 68 | func (m *PublishTaskRequest) GetQueue() string { 69 | if m != nil { 70 | return m.Queue 71 | } 72 | return "" 73 | } 74 | 75 | func (m *PublishTaskRequest) GetPayload() *wrappers.BytesValue { 76 | if m != nil { 77 | return m.Payload 78 | } 79 | return nil 80 | } 81 | 82 | func (m *PublishTaskRequest) GetCountdown() *time.Duration { 83 | if m != nil { 84 | return m.Countdown 85 | } 86 | return nil 87 | } 88 | 89 | func (m *PublishTaskRequest) GetTimeout() *time.Duration { 90 | if m != nil { 91 | return m.Timeout 92 | } 93 | return nil 94 | } 95 | 96 | func (m *PublishTaskRequest) GetTTL() *time.Duration { 97 | if m != nil { 98 | return m.TTL 99 | } 100 | return nil 101 | } 102 | 103 | func (m *PublishTaskRequest) GetMaxRetries() *wrappers.Int64Value { 104 | if m != nil { 105 | return m.MaxRetries 106 | } 107 | return nil 108 | } 109 | 110 | func (m *PublishTaskRequest) GetRetryIntervals() []*time.Duration { 111 | if m != nil { 112 | return m.RetryIntervals 113 | } 114 | return nil 115 | } 116 | 117 | type Task struct { 118 | ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` 119 | Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` 120 | Payload *wrappers.BytesValue `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` 121 | Status int64 `protobuf:"varint,4,opt,name=status,proto3" json:"status,omitempty"` 122 | MaxRetries int64 `protobuf:"varint,5,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"` 123 | Timeout *time.Duration `protobuf:"bytes,6,opt,name=timeout,proto3,stdduration" json:"timeout,omitempty"` 124 | TTL *time.Duration `protobuf:"bytes,7,opt,name=ttl,proto3,stdduration" json:"ttl,omitempty"` 125 | RetryIntervals []*time.Duration `protobuf:"bytes,8,rep,name=retry_intervals,json=retryIntervals,proto3,stdduration" json:"retry_intervals,omitempty"` 126 | PublishedAt *time.Time `protobuf:"bytes,9,opt,name=published_at,json=publishedAt,proto3,stdtime" json:"published_at,omitempty"` 127 | StartedAt *time.Time `protobuf:"bytes,10,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at,omitempty"` 128 | ProcessedAt *time.Time `protobuf:"bytes,11,opt,name=processed_at,json=processedAt,proto3,stdtime" json:"processed_at,omitempty"` 129 | ETA *time.Time `protobuf:"bytes,12,opt,name=eta,proto3,stdtime" json:"eta,omitempty"` 130 | XXX_NoUnkeyedLiteral struct{} `json:"-"` 131 | XXX_unrecognized []byte `json:"-"` 132 | XXX_sizecache int32 `json:"-"` 133 | } 134 | 135 | func (m *Task) Reset() { *m = Task{} } 136 | func (m *Task) String() string { return proto.CompactTextString(m) } 137 | func (*Task) ProtoMessage() {} 138 | func (*Task) Descriptor() ([]byte, []int) { 139 | return fileDescriptor_4e2b3c797ca0eafb, []int{1} 140 | } 141 | func (m *Task) XXX_Unmarshal(b []byte) error { 142 | return xxx_messageInfo_Task.Unmarshal(m, b) 143 | } 144 | func (m *Task) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { 145 | return xxx_messageInfo_Task.Marshal(b, m, deterministic) 146 | } 147 | func (m *Task) XXX_Merge(src proto.Message) { 148 | xxx_messageInfo_Task.Merge(m, src) 149 | } 150 | func (m *Task) XXX_Size() int { 151 | return xxx_messageInfo_Task.Size(m) 152 | } 153 | func (m *Task) XXX_DiscardUnknown() { 154 | xxx_messageInfo_Task.DiscardUnknown(m) 155 | } 156 | 157 | var xxx_messageInfo_Task proto.InternalMessageInfo 158 | 159 | func (m *Task) GetID() string { 160 | if m != nil { 161 | return m.ID 162 | } 163 | return "" 164 | } 165 | 166 | func (m *Task) GetName() string { 167 | if m != nil { 168 | return m.Name 169 | } 170 | return "" 171 | } 172 | 173 | func (m *Task) GetPayload() *wrappers.BytesValue { 174 | if m != nil { 175 | return m.Payload 176 | } 177 | return nil 178 | } 179 | 180 | func (m *Task) GetStatus() int64 { 181 | if m != nil { 182 | return m.Status 183 | } 184 | return 0 185 | } 186 | 187 | func (m *Task) GetMaxRetries() int64 { 188 | if m != nil { 189 | return m.MaxRetries 190 | } 191 | return 0 192 | } 193 | 194 | func (m *Task) GetTimeout() *time.Duration { 195 | if m != nil { 196 | return m.Timeout 197 | } 198 | return nil 199 | } 200 | 201 | func (m *Task) GetTTL() *time.Duration { 202 | if m != nil { 203 | return m.TTL 204 | } 205 | return nil 206 | } 207 | 208 | func (m *Task) GetRetryIntervals() []*time.Duration { 209 | if m != nil { 210 | return m.RetryIntervals 211 | } 212 | return nil 213 | } 214 | 215 | func (m *Task) GetPublishedAt() *time.Time { 216 | if m != nil { 217 | return m.PublishedAt 218 | } 219 | return nil 220 | } 221 | 222 | func (m *Task) GetStartedAt() *time.Time { 223 | if m != nil { 224 | return m.StartedAt 225 | } 226 | return nil 227 | } 228 | 229 | func (m *Task) GetProcessedAt() *time.Time { 230 | if m != nil { 231 | return m.ProcessedAt 232 | } 233 | return nil 234 | } 235 | 236 | func (m *Task) GetETA() *time.Time { 237 | if m != nil { 238 | return m.ETA 239 | } 240 | return nil 241 | } 242 | 243 | func init() { 244 | proto.RegisterType((*PublishTaskRequest)(nil), "proto.PublishTaskRequest") 245 | proto.RegisterType((*Task)(nil), "proto.Task") 246 | } 247 | 248 | func init() { proto.RegisterFile("bokchoy.proto", fileDescriptor_4e2b3c797ca0eafb) } 249 | 250 | var fileDescriptor_4e2b3c797ca0eafb = []byte{ 251 | // 521 bytes of a gzipped FileDescriptorProto 252 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcf, 0x6e, 0xd3, 0x40, 253 | 0x10, 0xc6, 0x49, 0xec, 0x24, 0x78, 0x5c, 0x40, 0x5a, 0xa1, 0xca, 0x0d, 0x12, 0xae, 0x72, 0xea, 254 | 0x05, 0x47, 0x2a, 0x7f, 0x7b, 0x40, 0x28, 0xa6, 0x48, 0x44, 0xe2, 0x80, 0x2c, 0x8b, 0x6b, 0xb4, 255 | 0x8e, 0x17, 0xc7, 0xaa, 0xed, 0x75, 0xbd, 0xb3, 0xb4, 0x79, 0x0b, 0x8e, 0xdc, 0x79, 0x19, 0x9e, 256 | 0x22, 0x48, 0xbd, 0xf0, 0x1a, 0xc8, 0xbb, 0x36, 0x0d, 0x84, 0xaa, 0x69, 0x4f, 0xd9, 0xc9, 0xcc, 257 | 0xef, 0xf3, 0xcc, 0x7c, 0xda, 0x85, 0x7b, 0x11, 0x3f, 0x99, 0x2f, 0xf8, 0xd2, 0x2b, 0x2b, 0x8e, 258 | 0x9c, 0xf4, 0xd4, 0xcf, 0xd0, 0x4d, 0x38, 0x4f, 0x32, 0x36, 0x56, 0x51, 0x24, 0x3f, 0x8f, 0x31, 259 | 0xcd, 0x99, 0x40, 0x9a, 0x97, 0xba, 0x6e, 0xf8, 0xf8, 0xdf, 0x82, 0x58, 0x56, 0x14, 0x53, 0x5e, 260 | 0x5c, 0x95, 0x3f, 0xab, 0x68, 0x59, 0xb2, 0x4a, 0x34, 0xf9, 0x27, 0x49, 0x8a, 0x0b, 0x19, 0x79, 261 | 0x73, 0x9e, 0x8f, 0x13, 0x9e, 0xf0, 0xcb, 0xc2, 0x3a, 0x52, 0x81, 0x3a, 0xe9, 0xf2, 0xd1, 0x77, 262 | 0x03, 0xc8, 0x47, 0x19, 0x65, 0xa9, 0x58, 0x84, 0x54, 0x9c, 0x04, 0xec, 0x54, 0x32, 0x81, 0xe4, 263 | 0x21, 0xf4, 0x4e, 0x25, 0x93, 0xcc, 0xe9, 0xec, 0x77, 0x0e, 0xac, 0x40, 0x07, 0xe4, 0x39, 0x0c, 264 | 0x4a, 0xba, 0xcc, 0x38, 0x8d, 0x9d, 0xee, 0x7e, 0xe7, 0xc0, 0x3e, 0x7c, 0xe4, 0xe9, 0x6e, 0xbc, 265 | 0xf6, 0x23, 0x9e, 0xbf, 0x44, 0x26, 0x3e, 0xd1, 0x4c, 0xb2, 0xa0, 0xad, 0x25, 0xaf, 0xc1, 0x9a, 266 | 0x73, 0x59, 0x60, 0xcc, 0xcf, 0x0a, 0xc7, 0x50, 0xe0, 0xde, 0x06, 0x78, 0xdc, 0x8c, 0xe9, 0x9b, 267 | 0xdf, 0x7e, 0xba, 0x9d, 0xe0, 0x92, 0x20, 0x47, 0x30, 0xa8, 0x97, 0xc4, 0x25, 0x3a, 0xe6, 0x76, 268 | 0x70, 0x5b, 0x4f, 0x5e, 0x81, 0x81, 0x98, 0x39, 0xbd, 0xeb, 0x30, 0xfb, 0x62, 0xe5, 0x1a, 0x61, 269 | 0xf8, 0x41, 0xd1, 0x35, 0x42, 0x7c, 0xb0, 0x73, 0x7a, 0x3e, 0xab, 0x18, 0x56, 0x29, 0x13, 0x4e, 270 | 0xff, 0x8a, 0x71, 0xa7, 0x05, 0xbe, 0x78, 0xa6, 0xc6, 0xf5, 0xcd, 0x1f, 0x2b, 0xb7, 0x13, 0x40, 271 | 0x4e, 0xcf, 0x03, 0x0d, 0x91, 0xf7, 0xf0, 0xa0, 0xe6, 0x97, 0xb3, 0xb4, 0x40, 0x56, 0x7d, 0xa1, 272 | 0x99, 0x70, 0x06, 0xfb, 0xc6, 0x36, 0x03, 0xdc, 0x57, 0xdc, 0xb4, 0xc5, 0x46, 0xbf, 0x4c, 0x30, 273 | 0x6b, 0x7b, 0xc8, 0x2e, 0x74, 0xd3, 0x58, 0x9b, 0xe2, 0xf7, 0x2f, 0x56, 0x6e, 0x77, 0x7a, 0x1c, 274 | 0x74, 0xd3, 0x98, 0x10, 0x30, 0x0b, 0x9a, 0x33, 0x65, 0x8b, 0x15, 0xa8, 0xf3, 0xba, 0x5b, 0xc6, 275 | 0x0d, 0xdc, 0xda, 0x85, 0xbe, 0x40, 0x8a, 0x52, 0xa8, 0x6d, 0x1b, 0x41, 0x13, 0x11, 0xf7, 0xef, 276 | 0x8d, 0xf4, 0x54, 0x72, 0x7d, 0xdc, 0x35, 0x9f, 0xfa, 0xb7, 0xf3, 0x69, 0x70, 0x73, 0x9f, 0xfe, 277 | 0xb3, 0xe3, 0xbb, 0xb7, 0xda, 0x31, 0x79, 0x0b, 0x3b, 0xa5, 0xbe, 0x08, 0x2c, 0x9e, 0x51, 0x74, 278 | 0x2c, 0xd5, 0xcc, 0x70, 0x43, 0x26, 0x6c, 0x2f, 0xac, 0x6f, 0x7e, 0xad, 0x75, 0xec, 0x3f, 0xd4, 279 | 0x04, 0xc9, 0x1b, 0x00, 0x81, 0xb4, 0x42, 0x2d, 0x01, 0x5b, 0x4a, 0x58, 0x0d, 0x33, 0x41, 0xd5, 280 | 0x45, 0xc5, 0xe7, 0x4c, 0x08, 0x2d, 0x61, 0x6f, 0xdd, 0x45, 0x4b, 0x4d, 0x90, 0x1c, 0x81, 0xc1, 281 | 0x90, 0x3a, 0x3b, 0xd7, 0xb2, 0x6a, 0x9f, 0xef, 0xc2, 0x89, 0x92, 0xa8, 0x99, 0x43, 0x1f, 0x06, 282 | 0xbe, 0x7e, 0xb7, 0xc8, 0x4b, 0xb0, 0xd7, 0x5e, 0x06, 0xb2, 0xa7, 0x05, 0xbc, 0xcd, 0xd7, 0x62, 283 | 0x68, 0x37, 0xa9, 0xfa, 0xbf, 0xd1, 0x9d, 0xa8, 0xaf, 0xa2, 0xa7, 0xbf, 0x03, 0x00, 0x00, 0xff, 284 | 0xff, 0xd3, 0xde, 0x3a, 0x24, 0x02, 0x05, 0x00, 0x00, 285 | } 286 | 287 | // Reference imports to suppress errors if they are not otherwise used. 288 | var _ context.Context 289 | var _ grpc.ClientConn 290 | 291 | // This is a compile-time assertion to ensure that this generated file 292 | // is compatible with the grpc package it is being compiled against. 293 | const _ = grpc.SupportPackageIsVersion4 294 | 295 | // BokchoyClient is the client API for Bokchoy service. 296 | // 297 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. 298 | type BokchoyClient interface { 299 | PublishTask(ctx context.Context, in *PublishTaskRequest, opts ...grpc.CallOption) (*Task, error) 300 | } 301 | 302 | type bokchoyClient struct { 303 | cc *grpc.ClientConn 304 | } 305 | 306 | func NewBokchoyClient(cc *grpc.ClientConn) BokchoyClient { 307 | return &bokchoyClient{cc} 308 | } 309 | 310 | func (c *bokchoyClient) PublishTask(ctx context.Context, in *PublishTaskRequest, opts ...grpc.CallOption) (*Task, error) { 311 | out := new(Task) 312 | err := c.cc.Invoke(ctx, "/proto.Bokchoy/PublishTask", in, out, opts...) 313 | if err != nil { 314 | return nil, err 315 | } 316 | return out, nil 317 | } 318 | 319 | // BokchoyServer is the server API for Bokchoy service. 320 | type BokchoyServer interface { 321 | PublishTask(context.Context, *PublishTaskRequest) (*Task, error) 322 | } 323 | 324 | func RegisterBokchoyServer(s *grpc.Server, srv BokchoyServer) { 325 | s.RegisterService(&_Bokchoy_serviceDesc, srv) 326 | } 327 | 328 | func _Bokchoy_PublishTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 329 | in := new(PublishTaskRequest) 330 | if err := dec(in); err != nil { 331 | return nil, err 332 | } 333 | if interceptor == nil { 334 | return srv.(BokchoyServer).PublishTask(ctx, in) 335 | } 336 | info := &grpc.UnaryServerInfo{ 337 | Server: srv, 338 | FullMethod: "/proto.Bokchoy/PublishTask", 339 | } 340 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 341 | return srv.(BokchoyServer).PublishTask(ctx, req.(*PublishTaskRequest)) 342 | } 343 | return interceptor(ctx, in, info, handler) 344 | } 345 | 346 | var _Bokchoy_serviceDesc = grpc.ServiceDesc{ 347 | ServiceName: "proto.Bokchoy", 348 | HandlerType: (*BokchoyServer)(nil), 349 | Methods: []grpc.MethodDesc{ 350 | { 351 | MethodName: "PublishTask", 352 | Handler: _Bokchoy_PublishTask_Handler, 353 | }, 354 | }, 355 | Streams: []grpc.StreamDesc{}, 356 | Metadata: "bokchoy.proto", 357 | } 358 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # bokchoy 2 | 3 | [![Build Status](https://travis-ci.org/thoas/bokchoy.svg?branch=master)](https://travis-ci.org/thoas/bokchoy) 4 | [![GoDoc](https://godoc.org/github.com/thoas/bokchoy?status.svg)](https://godoc.org/github.com/thoas/bokchoy) 5 | [![Go report](https://goreportcard.com/badge/github.com/thoas/bokchoy)](https://goreportcard.com/report/github.com/thoas/bokchoy) 6 | 7 | ## Introduction 8 | 9 | Bokchoy is a simple Go library for queueing tasks and processing them in the background with workers. 10 | It should be integrated in your web stack easily and it's designed to have a low barrier entry for newcomers. 11 | 12 | It currently only supports [Redis](broker_redis.go) 13 | (client, sentinel and cluster) with some Lua magic, but internally it relies on a generic 14 | broker implementation to extends it. 15 | 16 | ![screen](https://d1sz9tkli0lfjq.cloudfront.net/items/1a2w0d2g1N0T0z1u261j/screen.gif?v=871e2898) 17 | 18 | ## Motivation 19 | 20 | It's relatively easy to make a producer/receiver system in Go since the language contains builtins 21 | features to build it from scratch but we keep adding the same system everywhere instead of thinking reusable. 22 | 23 | Bokchoy is a plug and play component, it does its job and it does it well for you that you can focus 24 | on your business logic. 25 | 26 | ## Features 27 | 28 | * **Lightweight** 29 | * **A Simple API close to net/http** - if you already use `net/http` then you can learn it pretty quickly 30 | * **Designed with a modular/composable APIs** - middlewares, queue middlewares 31 | * **Context control** - built on `context` package, providing value chaining, cancelations and timeouts 32 | * **Highly configurable** - tons of options to swap internal parts (broker, logger, timeouts, etc), if you cannot customize something then an option is missing 33 | * **Extensions** - RPC server powered by [gRPC](examples/rpc), [Sentry](examples/sentry), etc. 34 | 35 | ## Getting started 36 | 37 | First, run a Redis server, of course: 38 | 39 | ```console 40 | redis-server 41 | ``` 42 | 43 | Define your producer which will send tasks: 44 | 45 | ```go 46 | package main 47 | 48 | import ( 49 | "context" 50 | "fmt" 51 | "log" 52 | 53 | "github.com/thoas/bokchoy" 54 | ) 55 | 56 | func main() { 57 | ctx := context.Background() 58 | 59 | // define the main engine which will manage queues 60 | engine, err := bokchoy.NewDefault(ctx, "redis://localhost:6379") 61 | if err != nil { 62 | log.Fatal(err) 63 | } 64 | 65 | payload := map[string]string{ 66 | "data": "hello world", 67 | } 68 | 69 | task, err := engine.Queue("tasks.message").Publish(ctx, payload) 70 | if err != nil { 71 | log.Fatal(err) 72 | } 73 | 74 | fmt.Println(task, "has been published") 75 | } 76 | ``` 77 | 78 | See [producer](examples/producer) directory for more information and to run it. 79 | 80 | Now we have a producer which can send tasks to our engine, we need a worker to process 81 | them in the background: 82 | 83 | ```go 84 | package main 85 | 86 | import ( 87 | "context" 88 | "fmt" 89 | "log" 90 | "os" 91 | "os/signal" 92 | 93 | "github.com/thoas/bokchoy" 94 | ) 95 | 96 | func main() { 97 | ctx := context.Background() 98 | 99 | engine, err := bokchoy.NewDefault(ctx, "redis://localhost:6379") 100 | if err != nil { 101 | log.Fatal(err) 102 | } 103 | 104 | engine.Queue("tasks.message").HandleFunc(func(r *bokchoy.Request) error { 105 | fmt.Println("Receive request", r) 106 | fmt.Println("Payload:", r.Task.Payload) 107 | 108 | return nil 109 | }) 110 | 111 | c := make(chan os.Signal, 1) 112 | signal.Notify(c, os.Interrupt) 113 | 114 | go func() { 115 | for range c { 116 | log.Print("Received signal, gracefully stopping") 117 | engine.Stop(ctx) 118 | } 119 | }() 120 | 121 | engine.Run(ctx) 122 | } 123 | ``` 124 | 125 | A worker is defined by handlers, to define a `Handler` you have to follow this interface: 126 | 127 | ```go 128 | type Handler interface { 129 | Handle(*Request) error 130 | } 131 | ``` 132 | 133 | You can create your own struct which implements this interface or use the `HandlerFunc` to 134 | generate a `Handler` from your function. 135 | 136 | See [worker](examples/worker) directory for more information and to run it. 137 | 138 | If you want a complete application example, you can read [A Tour of Bokchoy](docs/a-tour-of-bokchoy.md) which 139 | explain how to use the main features of it. 140 | 141 | ## Installation 142 | 143 | Using [Go Modules](https://github.com/golang/go/wiki/Modules) 144 | 145 | ```console 146 | go get github.com/thoas/bokchoy 147 | ``` 148 | 149 | ## Advanced topics 150 | 151 | ### Delayed tasks 152 | 153 | When publishing a task, it will be immediately processed by the worker if it's not already occupied, 154 | you may want to delay the task on some occasions by using `bokchoy.WithCountdown` option: 155 | 156 | ```go 157 | payload := map[string]string{ 158 | "data": "hello world", 159 | } 160 | 161 | queue.Publish(ctx, payload, bokchoy.WithCountdown(5*time.Second)) 162 | ``` 163 | 164 | This task will be executed in 5 seconds. 165 | 166 | ### Priority tasks 167 | 168 | A task can be published at front of others by providing a negative countdown. 169 | 170 | ```go 171 | payload := map[string]string{ 172 | "data": "hello world", 173 | } 174 | 175 | queue.Publish(ctx, payload, bokchoy.WithCountdown(-1)) 176 | ``` 177 | 178 | This task will be published and processed immediately. 179 | 180 | ### Custom instantiation 181 | 182 | `bokchoy.NewDefault` allows simple instantiation when you have a simple setup. You may 183 | want to have more control over your setup. This allows you to use a customer serializer, 184 | custom logger, etc. Use `bokchoy.New` to do this: 185 | 186 | ```go 187 | bokchoy.New(ctx, bokchoy.Config{ 188 | Broker: bokchoy.BrokerConfig{ 189 | Type: "redis", 190 | Redis: bokchoy.RedisConfig{ 191 | Type: "client", 192 | Client: bokchoy.RedisClientConfig{ 193 | Addr: "localhost:6379", 194 | }, 195 | }, 196 | }, 197 | }) 198 | ``` 199 | 200 | **Note**: `bokchoy.NewDefault` uses `redis.ParseURL` internally so it can handle connection 201 | strings like `redis://user:pass@host:port/db` without the need for `bokchoy.New`. 202 | 203 | ### Custom serializer 204 | 205 | By default the task serializer is `JSON`, you can customize it when initializing 206 | the Bokchoy engine, it must respect the 207 | [Serializer](https://github.com/thoas/bokchoy/blob/master/serializer.go) interface. 208 | 209 | ```go 210 | bokchoy.New(ctx, bokchoy.Config{ 211 | Broker: bokchoy.BrokerConfig{ 212 | Type: "redis", 213 | Redis: bokchoy.RedisConfig{ 214 | Type: "client", 215 | Client: bokchoy.RedisClientConfig{ 216 | Addr: "localhost:6379", 217 | }, 218 | }, 219 | }, 220 | }, bokchoy.WithSerializer(MySerializer{})) 221 | ``` 222 | 223 | You will be capable to define a [msgpack](https://msgpack.org/), [yaml](https://yaml.org/) serializers if you want. 224 | 225 | ### Custom logger 226 | 227 | By default the internal logger is disabled, you can provide a more verbose logger with options: 228 | 229 | ```go 230 | import ( 231 | "context" 232 | "fmt" 233 | "log" 234 | 235 | "github.com/thoas/bokchoy/logging" 236 | ) 237 | 238 | func main() { 239 | logger, err := logging.NewDevelopmentLogger() 240 | if err != nil { 241 | log.Fatal(err) 242 | } 243 | 244 | defer logger.Sync() 245 | 246 | bokchoy.New(ctx, bokchoy.Config{ 247 | Broker: bokchoy.BrokerConfig{ 248 | Type: "redis", 249 | Redis: bokchoy.RedisConfig{ 250 | Type: "client", 251 | Client: bokchoy.RedisClientConfig{ 252 | Addr: "localhost:6379", 253 | }, 254 | }, 255 | }, 256 | }, bokchoy.WithLogger(logger)) 257 | } 258 | ``` 259 | 260 | The builtin logger is based on [zap](https://github.com/uber-go/zap) but you can provide your 261 | own implementation easily if you have a central component. 262 | 263 | If you don't need that much information, you can enable the [Logger middleware](#core-middlewares). 264 | 265 | ### Worker Concurrency 266 | 267 | By default the worker concurrency is set to `1`, you can override it based on your server 268 | capability, Bokchoy will spawn multiple goroutines to handle your tasks. 269 | 270 | ```go 271 | engine.Queue("tasks.message").HandleFunc(func(r *bokchoy.Request) error { 272 | fmt.Println("Receive request", r) 273 | fmt.Println("Payload:", r.Task.Payload) 274 | 275 | return nil 276 | }, bokchoy.WithConcurrency(5)) 277 | ``` 278 | 279 | You can still set it globally with `bokchoy.WithConcurrency` option when initializing the engine. 280 | 281 | ### Retries 282 | 283 | If your task handler is returning an error, the task will be marked as `failed` and retried `3 times`, 284 | based on intervals: `60 seconds`, `120 seconds`, `180 seconds`. 285 | 286 | You can customize this globally on the engine or when publishing a new task by using `bokchoy.WithMaxRetries` 287 | and `bokchoy.WithRetryIntervals` options. 288 | 289 | ```go 290 | bokchoy.WithMaxRetries(1) 291 | bokchoy.WithRetryIntervals([]time.Duration{ 292 | 180 * time.Second, 293 | }) 294 | ``` 295 | 296 | ### Timeout 297 | 298 | By default a task will be forced to timeout and marked as `canceled` if its running time exceed `180 seconds`. 299 | 300 | You can customize this globally or when publishing a new task by using `bokchoy.WithTimeout` option: 301 | 302 | ```go 303 | bokchoy.WithTimeout(5*time.Second) 304 | ``` 305 | 306 | The worker will regain control and process the next task but be careful, each task is running 307 | in a goroutine so you have to cancel your task at some point or it will be leaking. 308 | 309 | ### Catch events 310 | 311 | You can catch events by registering handlers on your queue when your tasks are 312 | starting, succeeding, completing or failing. 313 | 314 | ```go 315 | queue := engine.Queue("tasks.message") 316 | queue.OnStartFunc(func(r *bokchoy.Request) error { 317 | // we update the context by adding a value 318 | *r = *r.WithContext(context.WithValue(r.Context(), "foo", "bar")) 319 | 320 | return nil 321 | }) 322 | 323 | queue.OnCompleteFunc(func(r *bokchoy.Request) error { 324 | fmt.Println(r.Context().Value("foo")) 325 | 326 | return nil 327 | }) 328 | 329 | queue.OnSuccessFunc(func(r *bokchoy.Request) error { 330 | fmt.Println(r.Context().Value("foo")) 331 | 332 | return nil 333 | }) 334 | 335 | queue.OnFailureFunc(func(r *bokchoy.Request) error { 336 | fmt.Println(r.Context().Value("foo")) 337 | 338 | return nil 339 | }) 340 | ``` 341 | 342 | ### Store results 343 | 344 | By default, if you don't mutate the task in the handler its result will be always `nil`. 345 | 346 | You can store a result in your task to keep it for later, for example: you might need statistics from a twitter profile 347 | to save them later. 348 | 349 | ```go 350 | queue.HandleFunc(func(r *bokchoy.Request) error { 351 | r.Task.Result = map[string]string{"result": "wow!"} 352 | 353 | return nil 354 | }) 355 | ``` 356 | 357 | You can store anything as long as your serializer can serializes it. 358 | 359 | Keep in mind the default task TTL is `180 seconds`, you can override it with `bokchoy.WithTTL` option. 360 | 361 | ### Helpers 362 | 363 | Let's define our previous queue: 364 | 365 | ```go 366 | queue := engine.Queue("tasks.message") 367 | ``` 368 | 369 | #### Empty the queue 370 | 371 | ```go 372 | queue.Empty() 373 | ``` 374 | 375 | It will remove all waiting tasks from your queue. 376 | 377 | #### Cancel a waiting task 378 | 379 | We produce a task without running the worker: 380 | 381 | ```go 382 | payload := map[string]string{ 383 | "data": "hello world", 384 | } 385 | 386 | task, err := queue.Publish(ctx, payload) 387 | if err != nil { 388 | log.Fatal(err) 389 | } 390 | ``` 391 | 392 | Then we can cancel it by using its ID: 393 | 394 | ```go 395 | queue.Cancel(ctx, task.ID) 396 | ``` 397 | 398 | #### Retrieve a published task from the queue 399 | 400 | ```go 401 | queue.Get(ctx, task.ID) 402 | ``` 403 | 404 | #### Retrieve statistics from a queue 405 | 406 | ```go 407 | stats, err := queue.Count(ctx) 408 | if err != nil { 409 | log.Fatal(err) 410 | } 411 | 412 | fmt.Println("Number of waiting tasks:", stats.Direct) 413 | fmt.Println("Number of delayed tasks:", stats.Delayed) 414 | fmt.Println("Number of total tasks:", stats.Total) 415 | ``` 416 | 417 | ## Middleware handlers 418 | 419 | Bokchoy comes equipped with an optional middleware package, providing a suite of standard middlewares. 420 | Middlewares have the same API as handlers. It's easy to implement them and think of them like `net/http` middlewares, 421 | they share the same purpose to follow the lifecycle of a Bokchoy request. 422 | 423 | ### Core middlewares 424 | 425 | ----------------------------------------------------------------------------------------------------------- 426 | | bokchoy/middleware | description | 427 | |:----------------------|:--------------------------------------------------------------------------------- 428 | | Logger | Logs the start and end of each request with the elapsed processing time | 429 | | Recoverer | Gracefully absorb panics and prints the stack trace | 430 | | RequestID | Injects a request ID into the context of each request | 431 | | Timeout | Signals to the request context when the timeout deadline is reached | 432 | ----------------------------------------------------------------------------------------------------------- 433 | 434 | See [middleware](middleware) directory for more information. 435 | 436 | 437 | ## FAQs 438 | 439 | ### Are Task IDs unique? 440 | 441 | Yes! There are based on [ulid](https://github.com/oklog/ulid). 442 | 443 | ### Is exactly-once execution of tasks guaranteed? 444 | 445 | It's guaranteed by the underlying broker, 446 | it uses [BRPOP](https://redis.io/commands/brpop)/[BLPOP](https://redis.io/commands/blpop) from Redis. 447 | 448 | If multiple clients are blocked for the same key, the first client to be served 449 | is the one that was waiting for more time (the first that blocked for the key). 450 | 451 | ## Contributing 452 | 453 | * Ping me on twitter: 454 | * [@thoas](https://twitter.com/thoas) 455 | * Fork the [project](https://github.com/thoas/bokchoy) 456 | * Fix [bugs](https://github.com/thoas/bokchoy/issues) 457 | 458 | **Don't hesitate ;)** 459 | 460 | ## Project history 461 | 462 | Bokchoy is highly influenced by the great [rq](https://github.com/rq/rq) and [celery](http://www.celeryproject.org/). 463 | 464 | Both are great projects well maintained but only used in a Python ecosystem. 465 | 466 | Some parts (middlewares mostly) of Bokchoy are heavily inspired or taken from [go-chi](https://github.com/go-chi/chi). 467 | -------------------------------------------------------------------------------- /broker_redis.go: -------------------------------------------------------------------------------- 1 | package bokchoy 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "github.com/thoas/bokchoy/logging" 12 | 13 | "github.com/pkg/errors" 14 | "github.com/redis/go-redis/v9" 15 | ) 16 | 17 | // RedisBroker is the redis broker. 18 | type RedisBroker struct { 19 | ClientType string 20 | Client redis.UniversalClient 21 | Prefix string 22 | Logger logging.Logger 23 | scripts map[string]string 24 | mu *sync.Mutex 25 | queues map[string]struct{} 26 | } 27 | 28 | const ( 29 | // Redis type 30 | redisTypeSentinel = "sentinel" 31 | redisTypeCluster = "cluster" 32 | ) 33 | 34 | var redisScripts = map[string]string{ 35 | "HMSETEXPIRE": `local key = KEYS[1] 36 | local data = ARGV 37 | local ttl = table.remove(data, 1) 38 | local res = redis.call('HMSET', key, unpack(data)) 39 | redis.call('EXPIRE', key, ttl) 40 | return res`, 41 | "ZPOPBYSCORE": `local key = ARGV[1] 42 | local min = ARGV[2] 43 | local max = ARGV[3] 44 | local results = redis.call('ZRANGEBYSCORE', key, min, max) 45 | local length = #results 46 | if length > 0 then 47 | redis.call('ZREMRANGEBYSCORE', key, min, max) 48 | return results 49 | else 50 | return nil 51 | end`, 52 | "MULTIHGETALL": `local collate = function (key) 53 | local raw_data = redis.call('HGETALL', key) 54 | local data = {} 55 | 56 | for idx = 1, #raw_data, 2 do 57 | data[raw_data[idx]] = raw_data[idx + 1] 58 | end 59 | 60 | return data; 61 | end 62 | 63 | local data = {} 64 | 65 | for _, key in ipairs(KEYS) do 66 | data[key] = collate(key) 67 | end 68 | 69 | return cjson.encode(data)`, 70 | } 71 | 72 | // newRedisBroker initializes a new redis client. 73 | func newRedisBroker(cfg RedisConfig, logger logging.Logger) *RedisBroker { 74 | var clt redis.UniversalClient 75 | 76 | switch cfg.Type { 77 | case redisTypeSentinel: 78 | clt = redis.NewFailoverClient(&redis.FailoverOptions{ 79 | MasterName: cfg.Sentinel.MasterName, 80 | SentinelAddrs: cfg.Sentinel.SentinelAddrs, 81 | Password: cfg.Sentinel.Password, 82 | MaxRetries: cfg.Sentinel.MaxRetries, 83 | DialTimeout: cfg.Sentinel.DialTimeout, 84 | ReadTimeout: cfg.Sentinel.ReadTimeout, 85 | WriteTimeout: cfg.Sentinel.WriteTimeout, 86 | PoolSize: cfg.Sentinel.PoolSize, 87 | PoolTimeout: cfg.Sentinel.PoolTimeout, 88 | ConnMaxIdleTime: cfg.Sentinel.ConnMaxIdleTime, 89 | ConnMaxLifetime: cfg.Sentinel.ConnMaxLifetime, 90 | MinIdleConns: cfg.Sentinel.MinIdleConns, 91 | }) 92 | case redisTypeCluster: 93 | clt = redis.NewClusterClient(&redis.ClusterOptions{ 94 | Addrs: cfg.Cluster.Addrs, 95 | Password: cfg.Cluster.Password, 96 | MaxRetries: cfg.Cluster.MaxRetries, 97 | DialTimeout: cfg.Cluster.DialTimeout, 98 | ReadTimeout: cfg.Cluster.ReadTimeout, 99 | WriteTimeout: cfg.Cluster.WriteTimeout, 100 | PoolSize: cfg.Cluster.PoolSize, 101 | PoolTimeout: cfg.Cluster.PoolTimeout, 102 | ConnMaxIdleTime: cfg.Cluster.ConnMaxIdleTime, 103 | ConnMaxLifetime: cfg.Cluster.ConnMaxLifetime, 104 | MinIdleConns: cfg.Cluster.MinIdleConns, 105 | ReadOnly: false, 106 | RouteRandomly: false, 107 | RouteByLatency: false, 108 | }) 109 | default: 110 | clt = redis.NewClient(&redis.Options{ 111 | Addr: cfg.Client.Addr, 112 | Password: cfg.Client.Password, 113 | DB: cfg.Client.DB, 114 | MaxRetries: cfg.Client.MaxRetries, 115 | DialTimeout: cfg.Client.DialTimeout, 116 | ReadTimeout: cfg.Client.ReadTimeout, 117 | WriteTimeout: cfg.Client.WriteTimeout, 118 | PoolSize: cfg.Client.PoolSize, 119 | PoolTimeout: cfg.Client.PoolTimeout, 120 | ConnMaxIdleTime: cfg.Client.ConnMaxIdleTime, 121 | ConnMaxLifetime: cfg.Client.ConnMaxLifetime, 122 | MinIdleConns: cfg.Client.MinIdleConns, 123 | TLSConfig: cfg.Client.TLSConfig, 124 | }) 125 | 126 | } 127 | 128 | return NewRedisBroker(clt, cfg.Type, cfg.Prefix, logger) 129 | } 130 | 131 | // NewRedisBroker initializes a new redis broker instance. 132 | func NewRedisBroker(clt redis.UniversalClient, clientType string, prefix string, logger logging.Logger) *RedisBroker { 133 | return &RedisBroker{ 134 | ClientType: clientType, 135 | Client: clt, 136 | Prefix: prefix, 137 | Logger: logger, 138 | queues: make(map[string]struct{}), 139 | mu: &sync.Mutex{}, 140 | } 141 | } 142 | 143 | func (p RedisBroker) String() string { 144 | return fmt.Sprintf("redis (%s)", p.ClientType) 145 | } 146 | 147 | // Initialize initializes the redis broker. 148 | func (p *RedisBroker) Initialize(ctx context.Context) error { 149 | err := p.Client.Ping(ctx).Err() 150 | if err != nil { 151 | return err 152 | } 153 | 154 | p.scripts = make(map[string]string) 155 | for key := range redisScripts { 156 | sha, err := p.Client.ScriptLoad(ctx, redisScripts[key]).Result() 157 | if err != nil { 158 | return errors.Wrapf(err, "Unable to load script %s", key) 159 | } 160 | 161 | p.scripts[key] = sha 162 | } 163 | 164 | return nil 165 | } 166 | 167 | // Ping pings the redis broker to ensure it's well connected. 168 | func (p RedisBroker) Ping(ctx context.Context) error { 169 | _, err := p.Client.Ping(ctx).Result() 170 | if err != nil { 171 | return errors.Wrapf(err, "unable to ping redis %s", p.ClientType) 172 | } 173 | 174 | return nil 175 | } 176 | 177 | func (p RedisBroker) prefixed(keys ...interface{}) string { 178 | parts := []interface{}{p.Prefix} 179 | parts = append(parts, keys...) 180 | 181 | return fmt.Sprint(parts...) 182 | } 183 | 184 | func (p *RedisBroker) consumeDelayed(ctx context.Context, name string, duration time.Duration) { 185 | p.mu.Lock() 186 | 187 | delayName := fmt.Sprint(name, ":delay") 188 | _, ok := p.queues[delayName] 189 | if !ok { 190 | go func() { 191 | ticker := time.NewTicker(duration) 192 | 193 | for range ticker.C { 194 | max := time.Now().UTC() 195 | 196 | results, err := p.consume(ctx, delayName, name, max) 197 | if err != nil { 198 | p.Logger.Error(ctx, "Received error when retrieving delayed payloads", 199 | logging.Error(err)) 200 | } 201 | 202 | if len(results) == 0 { 203 | continue 204 | } 205 | 206 | _, err = p.Client.TxPipelined(ctx, func(pipe redis.Pipeliner) error { 207 | for i := range results { 208 | taskID, ok := results[i]["id"].(string) 209 | if !ok { 210 | continue 211 | } 212 | 213 | err := p.publish(ctx, pipe, name, taskID, results[i], time.Time{}) 214 | if err != nil { 215 | return err 216 | } 217 | } 218 | 219 | // To avoid data loss, we only remove the range when results are processed 220 | _, err = pipe.ZRemRangeByScore(ctx, delayName, "0", fmt.Sprintf("%d", max.Unix())).Result() 221 | if err != nil { 222 | return err 223 | } 224 | 225 | return nil 226 | }) 227 | } 228 | }() 229 | 230 | p.queues[delayName] = struct{}{} 231 | } 232 | 233 | p.mu.Unlock() 234 | 235 | } 236 | 237 | func (p *RedisBroker) consume(ctx context.Context, name string, taskPrefix string, eta time.Time) ([]map[string]interface{}, error) { 238 | var ( 239 | err error 240 | result []string 241 | queueKey = p.prefixed(name) 242 | ) 243 | 244 | if eta.IsZero() { 245 | p.consumeDelayed(ctx, name, 1*time.Second) 246 | 247 | result, err = p.Client.BRPop(ctx, 1*time.Second, queueKey).Result() 248 | 249 | if err != nil && err != redis.Nil { 250 | return nil, errors.Wrapf(err, "unable to BRPOP %s", queueKey) 251 | } 252 | } else { 253 | max := fmt.Sprintf("%d", eta.UTC().Unix()) 254 | results := p.Client.ZRangeByScore(ctx, queueKey, &redis.ZRangeBy{ 255 | Min: "0", 256 | Max: max, 257 | }) 258 | 259 | if results.Err() != nil && results.Err() != redis.Nil { 260 | return nil, errors.Wrapf(err, "unable to ZRANGEBYSCORE %s", queueKey) 261 | } 262 | 263 | result = results.Val() 264 | } 265 | 266 | if len(result) == 0 { 267 | return nil, nil 268 | } 269 | 270 | taskKeys := make([]string, 0, len(result)) 271 | for i := range result { 272 | if result[i] == name { 273 | continue 274 | } 275 | 276 | taskKeys = append(taskKeys, p.prefixed(taskPrefix, ":", result[i])) 277 | } 278 | 279 | values, err := p.payloadsFromKeys(ctx, taskKeys) 280 | if err != nil { 281 | return nil, err 282 | } 283 | 284 | results := make([]map[string]interface{}, 0, len(taskKeys)) 285 | for _, data := range values { 286 | if len(data) == 0 { 287 | continue 288 | } 289 | 290 | results = append(results, data) 291 | } 292 | 293 | return results, nil 294 | } 295 | 296 | // Consume returns an array of raw data. 297 | func (p *RedisBroker) Consume(ctx context.Context, name string, eta time.Time) ([]map[string]interface{}, error) { 298 | return p.consume(ctx, name, name, eta) 299 | 300 | } 301 | 302 | func (p *RedisBroker) payloadsFromKeys(ctx context.Context, taskKeys []string) (map[string]map[string]interface{}, error) { 303 | vals, err := p.evalSha(ctx, p.scripts["MULTIHGETALL"], taskKeys) 304 | if err != nil { 305 | return nil, errors.Wrapf(err, "unable to MULTIHGETALL %s", strings.Join(taskKeys, ", ")) 306 | } 307 | 308 | var values map[string]map[string]interface{} 309 | err = json.Unmarshal([]byte(vals.(string)), &values) 310 | if err != nil { 311 | return nil, errors.Wrapf(err, "unable to unmarshal %s", strings.Join(taskKeys, ", ")) 312 | } 313 | 314 | return values, nil 315 | } 316 | 317 | // Try to re-init broker (effectively run ScriptLoad again) if we get NOSCRIPT error on EvalSha(). 318 | // This is helpful when Redis is restarted or failover happens and our LUA scripts are gone from Redis. 319 | func (p *RedisBroker) evalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) (interface{}, error) { 320 | vals, err := p.Client.EvalSha(ctx, sha1, keys, args).Result() 321 | if err != nil && strings.Contains(err.Error(), "NOSCRIPT") { 322 | if errInit := p.Initialize(ctx); errInit != nil { 323 | p.Logger.Error(ctx, "Failed to re-initialize broker after NOSCRIPT error.", logging.Error(errInit)) 324 | } else { 325 | p.Logger.Debug(ctx, "Successfully re-initialized broker after NOSCRIPT error.") 326 | vals, err = p.Client.EvalSha(ctx, sha1, keys, args).Result() 327 | } 328 | } 329 | return vals, err 330 | } 331 | 332 | // Get returns stored raw data from task key. 333 | func (p *RedisBroker) Get(ctx context.Context, taskKey string) (map[string]interface{}, error) { 334 | taskKey = p.prefixed(taskKey) 335 | 336 | res, err := p.Client.HGetAll(ctx, taskKey).Result() 337 | if err != nil { 338 | return nil, errors.Wrapf(err, "unable to HGETALL %s", taskKey) 339 | } 340 | 341 | results := make(map[string]interface{}) 342 | for k, v := range res { 343 | results[k] = v 344 | } 345 | 346 | return results, nil 347 | } 348 | 349 | // Delete deletes raw data in broker based on key. 350 | func (p *RedisBroker) Delete(ctx context.Context, name string, taskID string) error { 351 | return p.delete(ctx, p.Client, name, taskID) 352 | } 353 | 354 | func (p *RedisBroker) delete(ctx context.Context, client redis.Cmdable, name string, taskID string) error { 355 | var ( 356 | prefixedTaskKey = p.prefixed(name, ":", taskID) 357 | ) 358 | 359 | _, err := client.Del(ctx, prefixedTaskKey).Result() 360 | if err != nil { 361 | return errors.Wrapf(err, "unable to DEL %s", prefixedTaskKey) 362 | } 363 | 364 | return nil 365 | } 366 | 367 | func (p *RedisBroker) List(ctx context.Context, name string) ([]map[string]interface{}, error) { 368 | taskIDs, err := p.Client.LRange(ctx, name, 0, -1).Result() 369 | if err != nil { 370 | return nil, errors.Wrapf(err, "unable to LRANGE %s", name) 371 | } 372 | 373 | taskKeys := make([]string, 0, len(taskIDs)) 374 | for i := range taskIDs { 375 | taskKeys = append(taskKeys, p.prefixed(name, ":", taskIDs[i])) 376 | } 377 | 378 | payloads, err := p.payloadsFromKeys(ctx, taskKeys) 379 | if err != nil { 380 | return nil, err 381 | } 382 | 383 | results := make([]map[string]interface{}, 0, len(taskKeys)) 384 | for _, data := range payloads { 385 | if len(data) == 0 { 386 | continue 387 | } 388 | 389 | results = append(results, data) 390 | } 391 | 392 | return results, nil 393 | } 394 | 395 | // Count returns number of items from a queue name. 396 | func (p *RedisBroker) Count(ctx context.Context, queueName string) (BrokerStats, error) { 397 | var ( 398 | stats = BrokerStats{} 399 | err error 400 | ) 401 | 402 | queueName = p.prefixed(queueName) 403 | direct, err := p.Client.LLen(ctx, queueName).Result() 404 | if err != nil && err != redis.Nil { 405 | return stats, err 406 | } 407 | 408 | stats.Direct = int(direct) 409 | 410 | delayed, err := p.Client.ZCount(ctx, fmt.Sprint(queueName, ":delay"), "-inf", "+inf").Result() 411 | if err != nil && err != redis.Nil { 412 | return stats, err 413 | } 414 | 415 | stats.Delayed = int(delayed) 416 | 417 | stats.Total = stats.Direct + stats.Delayed 418 | 419 | return stats, nil 420 | } 421 | 422 | // Save synchronizes the stored item in redis. 423 | func (p *RedisBroker) Set(ctx context.Context, taskKey string, data map[string]interface{}, expiration time.Duration) error { 424 | prefixedTaskKey := p.prefixed(taskKey) 425 | 426 | if int(expiration.Seconds()) == 0 { 427 | _, err := p.Client.HMSet(ctx, prefixedTaskKey, data).Result() 428 | if err != nil { 429 | return errors.Wrapf(err, "unable to HMSET %s", prefixedTaskKey) 430 | } 431 | 432 | return nil 433 | } 434 | 435 | values := []interface{}{int(expiration.Seconds())} 436 | values = append(values, unpack(data)...) 437 | 438 | _, err := p.evalSha(ctx, p.scripts["HMSETEXPIRE"], []string{prefixedTaskKey}, values...) 439 | if err != nil { 440 | return errors.Wrapf(err, "unable to HMSETEXPIRE %s", prefixedTaskKey) 441 | } 442 | 443 | return nil 444 | } 445 | 446 | // Publish publishes raw data. 447 | // it uses a hash to store the task itself 448 | // pushes the task id to the list or a zset if the task is delayed. 449 | func (p *RedisBroker) Publish(ctx context.Context, queueName string, 450 | taskID string, data map[string]interface{}, eta time.Time) error { 451 | 452 | _, err := p.Client.Pipelined(ctx, func(pipe redis.Pipeliner) error { 453 | return p.publish(ctx, pipe, queueName, taskID, data, eta) 454 | }) 455 | if err != nil { 456 | return err 457 | } 458 | 459 | return nil 460 | } 461 | 462 | func (p *RedisBroker) publish(ctx context.Context, client redis.Cmdable, queueName string, 463 | taskID string, data map[string]interface{}, eta time.Time) error { 464 | 465 | var ( 466 | prefixedTaskKey = p.prefixed(queueName, ":", taskID) 467 | err error 468 | ) 469 | 470 | err = client.HMSet(ctx, prefixedTaskKey, data).Err() 471 | if err == nil { 472 | if eta.IsZero() { 473 | err = client.RPush(ctx, p.prefixed(queueName), taskID).Err() 474 | } else { 475 | // if eta is before now, then we should push this 476 | // taskID in priority 477 | if eta.Before(time.Now().UTC()) { 478 | err = client.LPush(ctx, p.prefixed(queueName), taskID).Err() 479 | } else { 480 | err = client.ZAdd(ctx, p.prefixed(fmt.Sprint(queueName, ":delay")), redis.Z{ 481 | Score: float64(eta.UTC().Unix()), 482 | Member: taskID, 483 | }).Err() 484 | } 485 | } 486 | } 487 | if err != nil { 488 | return errors.Wrapf(err, "unable to HMSET %s", taskID) 489 | } 490 | 491 | return nil 492 | } 493 | 494 | // Empty removes the redis key for a queue. 495 | func (p *RedisBroker) Empty(ctx context.Context, name string) error { 496 | err := p.Client.Del(ctx, p.prefixed(name)).Err() 497 | if err != nil && err != redis.Nil { 498 | return errors.Wrapf(err, "unable to DEL %s", p.prefixed(name)) 499 | } 500 | 501 | return nil 502 | } 503 | 504 | // Flush flushes the entire redis database. 505 | func (p *RedisBroker) Flush(ctx context.Context) error { 506 | err := p.Client.FlushDB(ctx).Err() 507 | if err != nil { 508 | return errors.Wrap(err, "unable to FLUSHDB") 509 | } 510 | 511 | return nil 512 | } 513 | 514 | var _ Broker = (*RedisBroker)(nil) 515 | -------------------------------------------------------------------------------- /docs/a-tour-of-bokchoy.md: -------------------------------------------------------------------------------- 1 | # A Tour of Bokchoy, a simple job queues for Go backed by Redis 2 | 3 | Bokchoy is a simple Go library for queueing tasks and processing them in the background with workers. 4 | It can be used in multiple cases: crawling third party APIs, slow processes, compute, analysis, etc. 5 | 6 | It should be integrated in any web stack easily and it's designed to have a low barrier entry for newcomers. 7 | 8 | To demonstrate each feature, we will create a minimalist and dumb web crawler 9 | inspired by the one found in [A Tour of Go](https://tour.golang.org/concurrency/10) 10 | with Bokchoy, links will be distributed around multiple servers. 11 | 12 | TL;DR: the complete application can be found [here](../examples/crawler) 13 | 14 | ## Overview 15 | 16 | We will start small and go deeper and deeper, to crawl a website we need: 17 | 18 | * A base URL to start crawling 19 | * A `depth` parameter to stop the crawler when it's too deep 20 | * An urls collector to extract urls from a webpage and propagate them to subtasks 21 | * A common storage to list all urls found and theirs statuses 22 | 23 | ## Installation 24 | 25 | First, run a Redis server: 26 | 27 | ```console 28 | $ redis-server 29 | ``` 30 | 31 | Ensure Go is installed: 32 | 33 | ```console 34 | $ go version 35 | go version go1.12.6 darwin/amd64 36 | ``` 37 | 38 | Export `GO111MODULE=on` globally to let Go engine install dependencies: 39 | 40 | ```console 41 | $ export GO111MODULE=on 42 | ``` 43 | 44 | ## Setup 45 | 46 | We will export our code in a single file named `main.go` to keep it readable for this tutorial 47 | and iterate over it, step by step. 48 | 49 | Define an initial `Crawl` structure: 50 | 51 | ```go 52 | // main.go 53 | package main 54 | 55 | import ( 56 | "fmt" 57 | ) 58 | 59 | // Crawl defines a crawl. 60 | type Crawl struct { 61 | URL string `json:"url"` 62 | Depth int `json:"depth"` 63 | } 64 | 65 | // Strings returns string representation of a crawl. 66 | func (c Crawl) String() string { 67 | return fmt.Sprintf( 68 | "", 69 | c.URL, c.Depth) 70 | } 71 | 72 | func main() { 73 | } 74 | ``` 75 | 76 | In order to publish an initial URL to crawl, we use [flag](https://golang.org/pkg/flag/) package: 77 | 78 | ```go 79 | func main() { 80 | var ( 81 | // which service needs to be run 82 | run string 83 | 84 | // url to crawl 85 | url string 86 | 87 | // until depth 88 | depth int 89 | 90 | // redis address to customize 91 | redisAddr string 92 | ) 93 | 94 | flag.IntVar(&depth, "depth", 1, "depth to crawl") 95 | flag.StringVar(&url, "url", "", "url to crawl") 96 | flag.StringVar(&run, "run", "", "service to run") 97 | flag.StringVar(&redisAddr, "redis-addr", "localhost:6379", "redis address") 98 | flag.Parse() 99 | } 100 | ``` 101 | 102 | Our CLI API to produce a new task: 103 | 104 | ```console 105 | $ go run main.go -run producer -url {url} -depth {depth} 106 | ``` 107 | 108 | Bokchoy is a complete engine which exposes queues to publish: 109 | 110 | ```go 111 | ctx := context.Background() 112 | 113 | bok, err := bokchoy.New(ctx, bokchoy.Config{ 114 | Broker: bokchoy.BrokerConfig{ 115 | Type: "redis", 116 | Redis: bokchoy.RedisConfig{ 117 | Type: "client", 118 | Client: bokchoy.RedisClientConfig{ 119 | Addr: redisAddr, 120 | }, 121 | }, 122 | }, 123 | }) 124 | ``` 125 | 126 | Multiple syntax can be used to publish a new task to a queue: 127 | 128 | ```go 129 | err := bok.Queue("tasks.crawl").Publish(ctx, &Crawl{URL: url, Depth: depth}) 130 | if err != nil { 131 | log.Fatal(err) 132 | } 133 | ``` 134 | 135 | or 136 | 137 | ```go 138 | err := bok.Publish(ctx, "tasks.crawl", &Crawl{URL: url, Depth: depth}) 139 | if err != nil { 140 | log.Fatal(err) 141 | } 142 | ``` 143 | 144 | We use the `run` variable to implement the producer service: 145 | 146 | ```go 147 | // ... 148 | 149 | queue := bok.Queue("tasks.crawl") 150 | 151 | switch run { 152 | case "producer": 153 | task, err := queue.Publish(ctx, &Crawl{ 154 | URL: url, 155 | Depth: depth, 156 | }) 157 | if err != nil { 158 | log.Fatal(err) 159 | } 160 | 161 | log.Printf("%s published", task) 162 | } 163 | ``` 164 | 165 | We can now test the producer by running: 166 | 167 | ```console 168 | $ go run main.go -run producer -url https://golang.org/ 169 | 2019/07/10 17:22:14 published 170 | ``` 171 | 172 | The task is in `waiting` state, we need a worker to process it. 173 | 174 | It remains in the broker until it's completely processed (`failed` or `succeeded`) 175 | then it's be kept in the broker with a default TTL as `180 seconds`. 176 | This duration can be customized globally on the engine 177 | or per tasks on the publish statement. 178 | 179 | The `bokchoy.WithTTL` option customizes this duration: 180 | 181 | ```go 182 | queue.Publish(ctx, &Crawl{ 183 | URL: url, 184 | Depth: depth, 185 | }, bokchoy.WithTTL(5*time.Minute)) 186 | ``` 187 | 188 | As a result, the following task is kept `5 minutes` after being processed. 189 | 190 | The first implementation of the worker is basic and only output the task: 191 | 192 | ```go 193 | // ... 194 | 195 | switch run { 196 | // ... 197 | 198 | case "worker": 199 | // initialize a signal to close Bokchoy 200 | c := make(chan os.Signal, 1) 201 | signal.Notify(c, os.Interrupt) 202 | 203 | // iterate over the channel to stop 204 | go func() { 205 | for range c { 206 | log.Print("Received signal, gracefully stopping") 207 | 208 | // gracefully shutdown consumers 209 | bok.Stop(ctx) 210 | } 211 | }() 212 | 213 | queue.HandleFunc(func(r *bokchoy.Request) error { 214 | // double marshalling to avoid casting 215 | // we can also use https://github.com/mitchellh/mapstructure 216 | res, err := json.Marshal(r.Task.Payload) 217 | if err != nil { 218 | return err 219 | } 220 | 221 | var crawl Crawl 222 | 223 | err = json.Unmarshal(res, &crawl) 224 | if err != nil { 225 | return err 226 | } 227 | 228 | log.Print("Received ", crawl) 229 | 230 | return nil 231 | }) 232 | 233 | // blocking operation, everything is done for you 234 | bok.Run(ctx) 235 | } 236 | ``` 237 | 238 | Launch the worker: 239 | 240 | ```console 241 | $ go run docs/main.go -run worker 242 | 2019/07/10 17:28:47 Received 243 | ``` 244 | 245 | ## Error handling 246 | 247 | If the handler function doesn't return an error, the task is 248 | marked as `succeeded`, but what's happening when an error occurred? 249 | 250 | The handler is replaced by this one: 251 | 252 | ```go 253 | // ... 254 | 255 | queue.HandleFunc(func(r *bokchoy.Request) error { 256 | log.Print("Received ", r) 257 | 258 | return fmt.Errorf("An unexpected error has happened") 259 | }) 260 | ``` 261 | 262 | If the worker again is run again, three attempts to process the task are dispatched: 263 | 264 | ```console 265 | $ go run docs/main.go -run worker 266 | 2019/07/10 17:35:27 Received > 267 | 2019/07/10 17:36:27 Received > 268 | 2019/07/10 17:37:14 Received > 269 | 2019/07/10 17:38:27 Received > 270 | ``` 271 | 272 | By default, Bokchoy retries three times the task with 273 | the following intervals: `60 seconds`, `120 seconds`, `180 seconds`. 274 | Finally, the task is marked as `failed` in the broker. 275 | 276 | We customize it globally by reducing intervals and the number of retries: 277 | 278 | ```go 279 | bok, err := bokchoy.New(ctx, bokchoy.Config{ 280 | Broker: bokchoy.BrokerConfig{ 281 | Type: "redis", 282 | Redis: bokchoy.RedisConfig{ 283 | Type: "client", 284 | Client: bokchoy.RedisClientConfig{ 285 | Addr: redisAddr, 286 | }, 287 | }, 288 | }, 289 | }, bokchoy.WithMaxRetries(2), bokchoy.WithRetryIntervals([]time.Duration{ 290 | 5 * time.Second, 291 | 10 * time.Second, 292 | })) 293 | ``` 294 | 295 | Failed tasks are handled but a panic can happen in Go and we don't want our worker to crash in this case. 296 | 297 | Bokchoy comes equipped with a middleware package, providing a suite of standard middlewares. 298 | Middlewares have the same API as handlers. It's easy to implement them and 299 | can be assimiliated as net/http middlewares, they share the same purpose to 300 | follow the lifecycle of a Bokchoy request and interact with it. 301 | 302 | The previous handler is rewritten to panic: 303 | 304 | ```go 305 | // ... 306 | 307 | queue.HandleFunc(func(r *bokchoy.Request) error { 308 | log.Print("Received ", r) 309 | 310 | panic("An unexpected error has happened") 311 | return nil 312 | }) 313 | ``` 314 | 315 | The worker exits and fails miserably: 316 | 317 | ```console 318 | $ go run docs/main.go -run worker 319 | 2019/07/10 17:57:52 Received > 320 | panic: An unexpected error has happened 321 | 322 | goroutine 42 [running]: 323 | main.main.func2(0xc000128200, 0x0, 0x0) 324 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/docs/main.go:109 +0x98 325 | github.com/thoas/bokchoy.HandlerFunc.Handle(0x1395618, 0xc000128200, 0x0, 0x0) 326 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/handler.go:8 +0x30 327 | github.com/thoas/bokchoy.(*consumer).handleTask.func1(0xc00017a000, 0xc000128200, 0xc0000aaf40, 0xc0000ae360) 328 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/consumer.go:54 +0x42 329 | created by github.com/thoas/bokchoy.(*consumer).handleTask 330 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/consumer.go:53 +0x12e 331 | exit status 2 332 | ``` 333 | 334 | The engine has to know which middleware to use: 335 | 336 | ```go 337 | // ... 338 | 339 | bok.Use(middleware.Recoverer) 340 | ``` 341 | 342 | Now if an another task is produced and the worker run again: 343 | 344 | ```console 345 | $ go run docs/main.go -run worker 346 | 2019/07/10 18:08:43 Received > 347 | Panic: An unexpected error has happened 348 | goroutine 23 [running]: 349 | runtime/debug.Stack(0x28, 0x0, 0x0) 350 | /usr/local/Cellar/go/1.12.6/libexec/src/runtime/debug/stack.go:24 +0x9d 351 | runtime/debug.PrintStack() 352 | /usr/local/Cellar/go/1.12.6/libexec/src/runtime/debug/stack.go:16 +0x22 353 | github.com/thoas/bokchoy/middleware.Recoverer.func1.1(0xc000150200) 354 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/middleware/recoverer.go:20 +0x150 355 | panic(0x12ff540, 0x13ea1f0) 356 | /usr/local/Cellar/go/1.12.6/libexec/src/runtime/panic.go:522 +0x1b5 357 | main.main.func2(0xc000150200, 0x20, 0xc000046708) 358 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/docs/main.go:111 +0x98 359 | github.com/thoas/bokchoy.HandlerFunc.Handle(0x1398de8, 0xc000150200, 0xc000150200, 0xc00010e120) 360 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/handler.go:8 +0x30 361 | github.com/thoas/bokchoy/middleware.Recoverer.func1(0xc000150200, 0x0, 0x0) 362 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/middleware/recoverer.go:25 +0x7f 363 | github.com/thoas/bokchoy.HandlerFunc.Handle(0xc00010e120, 0xc000150200, 0x1, 0x13f0c00) 364 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/handler.go:8 +0x30 365 | github.com/thoas/bokchoy.(*consumer).handleTask.func1(0xc00011e090, 0xc000150200, 0xc000102090, 0xc00008a060) 366 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/consumer.go:54 +0x75 367 | created by github.com/thoas/bokchoy.(*consumer).handleTask 368 | /Users/thoas/Sites/golang/src/github.com/thoas/bokchoy/consumer.go:53 +0x12e 369 | ``` 370 | 371 | It keeps its state and continue the workflow even after the panic, the task is marked as `failed` 372 | like any handler which returns an error. 373 | 374 | ## Error reporting 375 | 376 | All errors are now handled but how can we report them properly 377 | in an error tracking software ([sentry](https://sentry.io) for example)? 378 | 379 | There are three ways to report an error in Bokchoy, each option can be customized. 380 | 381 | ### Custom request logger 382 | 383 | Bokchoy allows to implement a custom [LogFormatter](https://github.com/thoas/bokchoy/blob/master/middleware/logger.go) 384 | which follow this interface: 385 | 386 | ```go 387 | type LogFormatter interface { 388 | NewLogEntry(r *bokchoy.Request) LogEntry 389 | } 390 | ``` 391 | 392 | or the default one can be used: 393 | 394 | ```go 395 | bok.Use(middleware.DefaultLogger) 396 | ``` 397 | 398 | This middleware follows the Bokchoy request and also catch panic 399 | if `middleware.Recoverer` is installed as well. 400 | 401 | ### Custom tracer 402 | 403 | Internal errors and task errors can be catched by implementing 404 | a custom [Tracer](https://github.com/thoas/bokchoy/blob/master/tracer.go) and provide it as an option 405 | when initializing the engine. 406 | 407 | The `Tracer` must follow the following interface: 408 | 409 | ```go 410 | type Tracer interface { 411 | Log(context.Context, string, error) 412 | } 413 | ``` 414 | 415 | A tracer implementation with sentry can be found [here](../examples/crawler/sentry). 416 | 417 | ### Catch failure events 418 | 419 | Bokchoy has an internal event listener system to catch the state of the task during its lifecycle. 420 | 421 | ```go 422 | queue.OnStartFunc(func(r *bokchoy.Request) error { 423 | // we update the context by adding a value 424 | *r = *r.WithContext(context.WithValue(r.Context(), "start_at", time.Now())) 425 | 426 | return nil 427 | }) 428 | 429 | queue.OnCompleteFunc(func(r *bokchoy.Request) error { 430 | startAt, ok := r.Context().Value("start_at").(time.Time) 431 | if ok { 432 | fmt.Println(time.Since(startAt)) 433 | } 434 | 435 | return nil 436 | }) 437 | 438 | queue.OnSuccessFunc(func(r *bokchoy.Request) error { 439 | fmt.Println(r.Context().Value("start_at")) 440 | 441 | return nil 442 | }) 443 | 444 | queue.OnFailureFunc(func(r *bokchoy.Request) error { 445 | fmt.Println(r.Context().Value("start_at")) 446 | fmt.Println("Error catched", r.Task.Error) 447 | 448 | return nil 449 | }) 450 | ``` 451 | 452 | The error can be catched in `OnFailureFunc` with an error reporting. 453 | 454 | ## Implementation 455 | 456 | Both `middleware.Recoverer` and `middleware.DefaultLogger` are used: 457 | 458 | ```go 459 | engine.Use(middleware.Recoverer) 460 | engine.Use(middleware.DefaultLogger) 461 | ``` 462 | 463 | As we may need to trace each requests individually, we are adding `middleware.RequestID` 464 | which attachs an unique ID to each requests. 465 | 466 | This reference may be used to debug our application in production 467 | using [Kibana](https://www.elastic.co/fr/products/kibana) to follow 468 | the lifecycle of a request. 469 | 470 | Bokchoy allows you to write your handler with two syntaxes: 471 | 472 | ```go 473 | queue.HandleFunc(func(r *bokchoy.Request) error { 474 | // logic here 475 | 476 | return nil 477 | }) 478 | ``` 479 | 480 | or 481 | 482 | ```go 483 | type crawlHandler struct { 484 | } 485 | 486 | func (h *crawlHandler) Handle(r *bokchoy.Request) error { 487 | // logic here 488 | 489 | return nil 490 | } 491 | 492 | queue.Handle(&crawlHandler{}) 493 | ``` 494 | 495 | The first syntax is useful for writing small handlers with 496 | less logic, the second is used to store attributes in the 497 | handler structure, we will use this one to store our HTTP client 498 | instance. 499 | 500 | A rewrite of the existing handler function: 501 | 502 | ```go 503 | type crawlHandler struct { 504 | } 505 | 506 | func (h *crawlHandler) Handle(r *bokchoy.Request) error { 507 | res, err := json.Marshal(r.Task.Payload) 508 | if err != nil { 509 | return err 510 | } 511 | 512 | var crawl Crawl 513 | 514 | err = json.Unmarshal(res, &crawl) 515 | if err != nil { 516 | return err 517 | } 518 | 519 | log.Print("Received ", crawl) 520 | 521 | return nil 522 | } 523 | ``` 524 | 525 | To start crawling, we need an HTTP client and an in-memory storage to store 526 | urls already crawled to avoid crawling them twice: 527 | 528 | ```go 529 | type crawlHandler struct { 530 | clt *http.Client 531 | crawls map[string]int // Map 532 | } 533 | ``` 534 | 535 | To initialize a `crawlHandler` instance we declare a constructor which 536 | creates a new `net/http` client with a custom timeout as parameter. 537 | 538 | ```go 539 | func newCrawlHandler(timeout time.Duration) *crawlHandler { 540 | return &crawlHandler{ 541 | clt: &http.Client{ 542 | Timeout: time.Second * timeout, 543 | Transport: &http.Transport{ 544 | Dial: (&net.Dialer{ 545 | Timeout: timeout * time.Second, 546 | }).Dial, 547 | TLSHandshakeTimeout: timeout * time.Second, 548 | }, 549 | }, 550 | crawls: map[string]int{}, 551 | } 552 | } 553 | ``` 554 | 555 | The custom timeout is needed to force the `net/http` client to timeout 556 | after a duration, Bokchoy already contains a timeout system to regain control 557 | but it will leak the goroutine if the task doesn't have its proper timeout system. 558 | 559 | [goquery](https://github.com/PuerkitoBio/goquery) will parse the response body and extract urls 560 | from the document, urls will be filtered from a base url. 561 | 562 | ```go 563 | // Crawls returns the crawls. 564 | func (h *crawlHandler) Crawls() []string { 565 | crawls := make([]string, len(h.crawls)) 566 | i := 0 567 | for url, _ := range h.crawls { 568 | crawls[i] = url 569 | i++ 570 | } 571 | return crawls 572 | } 573 | 574 | // extractRelativeLinks extracts relative links from an net/http response with a base url. 575 | // It returns links which only contain the base url to avoid crawling external links. 576 | func (h *crawlHandler) extractRelativeLinks(baseURL string, res *http.Response) ([]string, error) { 577 | doc, err := goquery.NewDocumentFromResponse(res) 578 | if err != nil { 579 | return nil, err 580 | } 581 | 582 | links := h.filterLinks(baseURL, h.extractLinks(doc)) 583 | crawls := h.Crawls() 584 | 585 | filteredLinks := []string{} 586 | 587 | for i := range links { 588 | if funk.InStrings(crawls, links[i]) { 589 | continue 590 | } 591 | 592 | filteredLinks = append(filteredLinks, links[i]) 593 | } 594 | 595 | return filteredLinks, nil 596 | } 597 | 598 | // extractLinks extracts links from a goquery.Document. 599 | func (h *crawlHandler) extractLinks(doc *goquery.Document) []string { 600 | foundUrls := []string{} 601 | doc.Find("a").Each(func(i int, s *goquery.Selection) { 602 | res, _ := s.Attr("href") 603 | foundUrls = append(foundUrls, res) 604 | }) 605 | 606 | return foundUrls 607 | } 608 | 609 | // filterLinks filters links with a base url. 610 | func (h *crawlHandler) filterLinks(baseURL string, links []string) []string { 611 | filteredLinks := []string{} 612 | 613 | for _, link := range links { 614 | if strings.HasPrefix(link, baseURL) { 615 | filteredLinks = append(filteredLinks, link) 616 | } 617 | 618 | if strings.HasPrefix(link, "/") { 619 | resolvedURL := fmt.Sprintf("%s%s", baseURL, link) 620 | filteredLinks = append(filteredLinks, resolvedURL) 621 | } 622 | } 623 | 624 | return filteredLinks 625 | } 626 | ``` 627 | 628 | To keep the base URL between the main task and subtasks generated 629 | by urls extracted from the document, we will pass it in the publish task. 630 | 631 | ```go 632 | // Crawl defines a crawl. 633 | type Crawl struct { 634 | BaseURL string `json:url` 635 | URL string `json:"url"` 636 | Depth int `json:"depth"` 637 | } 638 | 639 | // Strings returns string representation of a crawl. 640 | func (c Crawl) String() string { 641 | return fmt.Sprintf( 642 | "", 643 | c.URL, c.Depth) 644 | } 645 | ``` 646 | 647 | `BaseURL` attribute is added to the `Crawl` structure. 648 | 649 | Last part is to update the `Handle` method to use `extractRelativeLinks` and publish 650 | subtasks with `depth` decremented to stop the handler when it reaches zero: 651 | 652 | ```go 653 | type crawlHandler struct { 654 | // ... 655 | queue *bokchoy.Queue 656 | } 657 | 658 | func (h *crawlHandler) Handle(r *bokchoy.Request) error { 659 | res, err := json.Marshal(r.Task.Payload) 660 | if err != nil { 661 | return err 662 | } 663 | 664 | var crawl Crawl 665 | 666 | err = json.Unmarshal(res, &crawl) 667 | if err != nil { 668 | return err 669 | } 670 | 671 | log.Print("Received ", crawl) 672 | 673 | resp, err := h.clt.Get(crawl.URL) 674 | if err != nil { 675 | return err 676 | } 677 | 678 | log.Print("Crawled ", crawl.URL, " - [", resp.Status, "]") 679 | h.AddCrawl(crawl.URL, resp.StatusCode) 680 | 681 | if resp.StatusCode != 200 { 682 | return nil 683 | } 684 | 685 | defer resp.Body.Close() 686 | 687 | // depth is zero, the handler should stop 688 | if crawl.Depth == 0 { 689 | return nil 690 | } 691 | 692 | // extract relative links 693 | links, err := h.extractRelativeLinks(crawl.BaseURL, resp) 694 | if err != nil { 695 | return nil 696 | } 697 | 698 | for i := range links { 699 | // next crawls will still have the same base url 700 | // depth is decremented to stop the flow 701 | task, err := h.queue.Publish(r.Context(), &Crawl{ 702 | URL: links[i], 703 | BaseURL: crawl.BaseURL, 704 | Depth: crawl.Depth - 1, 705 | }) 706 | if err != nil { 707 | return err 708 | } 709 | 710 | log.Printf("%s published", task) 711 | } 712 | 713 | return nil 714 | } 715 | ``` 716 | 717 | It's time to test the complete workflow by running the producer again: 718 | 719 | ```console 720 | $ go run docs/main.go -run producer -url https://golang.org 721 | ``` 722 | 723 | Then the worker: 724 | 725 | ```console 726 | $ go run docs/main.go -run worker 727 | 2019/07/13 08:56:24 Received 728 | 2019/07/13 08:56:25 Crawled https://golang.org - [200 OK] 729 | 2019/07/13 08:56:25 published 730 | 2019/07/13 08:56:25 published 731 | 2019/07/13 08:56:25 published 732 | 2019/07/13 08:56:25 published 733 | 2019/07/13 08:56:25 published 734 | 2019/07/13 08:56:25 published 735 | 2019/07/13 08:56:25 published 736 | 2019/07/13 08:56:25 published 737 | 2019/07/13 08:56:25 - succeeded - result: (empty) in 445.079628ms 738 | 2019/07/13 08:56:25 Received 739 | 2019/07/13 08:56:25 Crawled https://golang.org/doc/tos.html - [200 OK] 740 | 2019/07/13 08:56:25 - succeeded - result: (empty) in 137.121649ms 741 | 2019/07/13 08:56:25 Received 742 | 2019/07/13 08:56:25 Crawled https://golang.org/doc/copyright.html - [200 OK] 743 | 2019/07/13 08:56:25 - succeeded - result: (empty) in 294.807462ms 744 | 2019/07/13 08:56:25 Received 745 | 2019/07/13 08:56:26 Crawled https://golang.org/dl - [200 OK] 746 | 2019/07/13 08:56:26 - succeeded - result: (empty) in 1.051028215s 747 | 2019/07/13 08:56:26 Received 748 | 2019/07/13 08:56:27 Crawled https://golang.org/blog - [200 OK] 749 | 2019/07/13 08:56:27 - succeeded - result: (empty) in 813.442227ms 750 | 2019/07/13 08:56:27 Received 751 | 2019/07/13 08:56:28 Crawled https://golang.org/help - [200 OK] 752 | 2019/07/13 08:56:28 - succeeded - result: (empty) in 721.972494ms 753 | 2019/07/13 08:56:28 Received 754 | 2019/07/13 08:56:28 Crawled https://golang.org/project - [200 OK] 755 | 2019/07/13 08:56:28 - succeeded - result: (empty) in 411.728612ms 756 | 2019/07/13 08:56:28 Received 757 | 2019/07/13 08:56:29 Crawled https://golang.org/pkg - [200 OK] 758 | 2019/07/13 08:56:29 - succeeded - result: (empty) in 408.950376ms 759 | 2019/07/13 08:56:29 Received 760 | 2019/07/13 08:56:29 Crawled https://golang.org/doc - [200 OK] 761 | 2019/07/13 08:56:29 - succeeded - result: (empty) in 367.4162ms 762 | ``` 763 | 764 | It works like a charm, still a bit slow to crawl and it can be even slower with a higher `depth` value. 765 | 766 | ## Concurrency 767 | 768 | Bokchoy allows you to spawn easily multiple goroutines per queue or globally on the engine. 769 | 770 | We will add a new `concurrency` flag to the top to configure the number of worker set for this queue: 771 | 772 | ```go 773 | var concurrency int 774 | flag.IntVar(&concurrency, "concurrency", 1, "number of workers") 775 | ``` 776 | 777 | The line is updated: 778 | 779 | ```go 780 | queue.Handle(&crawlHandler{}) 781 | ``` 782 | 783 | as follow: 784 | 785 | ```go 786 | queue.Handle(&crawlHandler{}, bokchoy.WithConcurrency(concurrency)) 787 | ``` 788 | 789 | Concurrency comes with a potential race condition issue, 790 | we use [sync](https://golang.org/pkg/sync/) package to avoid it: 791 | 792 | ```go 793 | 794 | type crawlHandler struct { 795 | // ... 796 | mu sync.RWMutex 797 | } 798 | 799 | // AddCrawl adds a new crawl to the storage. 800 | func (h *crawlHandler) AddCrawl(url string, statusCode int) { 801 | h.mu.Lock() 802 | defer h.mu.Unlock() 803 | 804 | h.crawls[url] = statusCode 805 | } 806 | 807 | // Crawls returns the crawls. 808 | func (h *crawlHandler) Crawls() []string { 809 | h.mu.RLock() 810 | crawls := make([]string, len(h.crawls)) 811 | i := 0 812 | for url, _ := range h.crawls { 813 | crawls[i] = url 814 | i++ 815 | } 816 | h.mu.RUnlock() 817 | 818 | return crawls 819 | } 820 | ``` 821 | 822 | ## Conclusion 823 | 824 | It has been a long tour, if you have reach to the bottom you belong to the brave ☺. 825 | 826 | There are multiple others features ([timeout](https://github.com/thoas/bokchoy#timeout), 827 | [custom logger](https://github.com/thoas/bokchoy#custom-logger), 828 | [delayed task](https://github.com/thoas/bokchoy#delayed-task), ...) 829 | which are not described in this tour, if you are curious enough 830 | go check the [README](https://github.com/thoas/bokchoy) of the project. 831 | 832 | 833 | * Ping me on twitter [@thoas](https://twitter.com/thoas) 834 | * Fork the [project](https://github.com/thoas/bokchoy) 835 | * Fix [bugs](https://github.com/thoas/bokchoy/issues) 836 | --------------------------------------------------------------------------------