├── .gitignore ├── cmd ├── gwctl │ └── main.go ├── stats.go ├── retries.go └── root.go ├── api_stats_test.go ├── signals_windows.go ├── .github └── workflows │ └── ci.yml ├── signals_posix.go ├── storage ├── keys.go ├── storage.go └── redis.go ├── Makefile ├── msg_test.go ├── middleware_stats.go ├── go.mod ├── LICENSE.txt ├── api_stats.go ├── middleware_logging.go ├── scheduled_test.go ├── middleware.go ├── scheduled.go ├── api_retries.go ├── task_runner.go ├── msg.go ├── api_server.go ├── heartbeat_test.go ├── middleware_stats_test.go ├── api_retries_test.go ├── worker.go ├── task_runner_test.go ├── test_utils.go ├── middleware_test.go ├── middleware_retry.go ├── heartbeat.go ├── fetcher.go ├── options_test.go ├── fetcher_test.go ├── README.md ├── producer.go ├── options.go ├── worker_test.go ├── producer_test.go ├── middleware_retry_test.go ├── manager.go ├── manager_test.go └── go.sum /.gitignore: -------------------------------------------------------------------------------- 1 | vendor/ 2 | /.idea/ 3 | target/ 4 | .vscode/ 5 | -------------------------------------------------------------------------------- /cmd/gwctl/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/digitalocean/go-workers2/cmd" 4 | 5 | func main() { 6 | cmd.Execute() 7 | } 8 | -------------------------------------------------------------------------------- /api_stats_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "net/http/httptest" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestStats_Empty(t *testing.T) { 11 | a := apiServer{} 12 | 13 | recorder := httptest.NewRecorder() 14 | request := httptest.NewRequest("GET", "/stats", nil) 15 | a.Stats(recorder, request) 16 | 17 | assert.Equal(t, "[]\n", recorder.Body.String()) 18 | } 19 | -------------------------------------------------------------------------------- /signals_windows.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "os/signal" 5 | "syscall" 6 | ) 7 | 8 | func (m *Manager) handleSignals() { 9 | signal.Notify(m.signal, syscall.SIGINT, syscall.SIGTERM) 10 | defer signal.Stop(m.signal) 11 | 12 | for sig := range m.signal { 13 | switch sig { 14 | case syscall.SIGINT, syscall.SIGTERM: 15 | m.Stop() 16 | // Don't stop more than once. 17 | return 18 | } 19 | } 20 | } 21 | 22 | func (m *Manager) stopSignalHandler() { 23 | signal.Stop(m.signal) 24 | close(m.signal) 25 | } 26 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Test go-workers2 2 | on: [push] 3 | 4 | jobs: 5 | test: 6 | name: Test 7 | runs-on: ubuntu-latest 8 | 9 | services: 10 | redis: 11 | image: redis:6.0-alpine 12 | ports: 13 | - 6379:6379 14 | 15 | steps: 16 | - name: Set up Go 1.13 17 | uses: actions/setup-go@v2 18 | with: 19 | go-version: 1.13 20 | id: go 21 | 22 | - name: Checkout code 23 | uses: actions/checkout@v2 24 | 25 | - name: Test 26 | run: make test 27 | -------------------------------------------------------------------------------- /signals_posix.go: -------------------------------------------------------------------------------- 1 | // +build !windows 2 | 3 | package workers 4 | 5 | import ( 6 | "os/signal" 7 | "syscall" 8 | ) 9 | 10 | func (m *Manager) handleSignals() { 11 | signal.Notify(m.signal, syscall.SIGUSR1, syscall.SIGINT, syscall.SIGTERM) 12 | 13 | for sig := range m.signal { 14 | switch sig { 15 | case syscall.SIGINT, syscall.SIGUSR1, syscall.SIGTERM: 16 | m.Stop() 17 | // Don't stop more than once. 18 | return 19 | } 20 | } 21 | } 22 | 23 | func (m *Manager) stopSignalHandler() { 24 | signal.Stop(m.signal) 25 | close(m.signal) 26 | } 27 | -------------------------------------------------------------------------------- /storage/keys.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import "fmt" 4 | 5 | var heartbeatWorkKey = ":work" 6 | 7 | // GetManagerKey gets redis key for manager 8 | func GetManagerKey(namespace, heartbeatID string) string { 9 | return namespace + heartbeatID 10 | } 11 | 12 | // GetWorkersKey gets redis key for manager's workers' heartbeat 13 | func GetWorkersKey(managerKey string) string { 14 | return managerKey + heartbeatWorkKey 15 | } 16 | 17 | // GetWorkerID gets a worker's ID 18 | func GetWorkerID(pid int, tid string) string { 19 | return fmt.Sprintf("%d-%s", pid, tid) 20 | } 21 | 22 | // GetProcessesKey gets redis key for manager processes 23 | func GetProcessesKey(namespace string) string { 24 | return namespace + "processes" 25 | } 26 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Redis - used for local development 2 | .PHONY: start-docker 3 | start-docker: 4 | docker run -p 6379:6379 --name test-redis -d redis 5 | 6 | .PHONY: clean-docker 7 | clean-docker: 8 | docker stop test-redis 9 | docker rm test-redis 10 | 11 | # Test 12 | .PHONY: test 13 | test: 14 | go test -timeout 180s -v 15 | 16 | .PHONY: local-test 17 | local-test: start-docker test clean-docker 18 | 19 | # Vendored dependencies 20 | .PHONY: vendor 21 | vendor: 22 | go mod tidy 23 | go mod vendor 24 | 25 | .PHONY: vendor-upgrade 26 | vendor-upgrade: 27 | go get -u -d all 28 | go mod vendor 29 | go mod tidy 30 | 31 | # cmd 32 | .PHONY: build-cmd 33 | build-cmd: 34 | go build -o ./target/gw2ctl github.com/digitalocean/go-workers2/cmd/gwctl -------------------------------------------------------------------------------- /cmd/stats.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "net/http" 7 | 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var statsCmd = &cobra.Command{ 12 | Use: "stats", 13 | Short: "go-workers2 stats info", 14 | Long: `Use the stats command to get stats from a specified host address and 15 | port number, like so: 16 | 17 | goworkersctl stats --a 127.0.0.1 --p 8080`, 18 | RunE: runStats, 19 | } 20 | 21 | func init() { 22 | rootCmd.AddCommand(statsCmd) 23 | } 24 | 25 | func runStats(cmd *cobra.Command, args []string) error { 26 | address := "http://" + hostAddress + ":" + port + "/stats" 27 | 28 | resp, err := http.Get(address) 29 | if err != nil { 30 | return err 31 | } 32 | 33 | defer resp.Body.Close() 34 | body, err := ioutil.ReadAll(resp.Body) 35 | if err != nil { 36 | return err 37 | } 38 | fmt.Printf("Body: %v\n", string(body)) 39 | return nil 40 | } 41 | -------------------------------------------------------------------------------- /msg_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestNewMsg(t *testing.T) { 10 | //unmarshals json 11 | msg, _ := NewMsg("{\"hello\":\"world\",\"foo\":3}") 12 | hello, _ := msg.Get("hello").String() 13 | foo, _ := msg.Get("foo").Int() 14 | 15 | assert.Equal(t, "world", hello) 16 | assert.Equal(t, 3, foo) 17 | 18 | //returns an error if invalid json 19 | msg, err := NewMsg("{\"hello:\"world\",\"foo\":3}") 20 | 21 | assert.Nil(t, msg) 22 | assert.NotNil(t, err) 23 | } 24 | 25 | func TestArgs(t *testing.T) { 26 | //returns args key 27 | msg, _ := NewMsg("{\"hello\":\"world\",\"args\":[\"foo\",\"bar\"]}") 28 | assert.Equal(t, "[\"foo\",\"bar\"]", msg.Args().ToJson()) 29 | 30 | //returns empty array if args key doesn't exist 31 | msg, _ = NewMsg("{\"hello\":\"world\"}") 32 | assert.Equal(t, "[]", msg.Args().ToJson()) 33 | } 34 | -------------------------------------------------------------------------------- /cmd/retries.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "net/http" 7 | 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var retriesCmd = &cobra.Command{ 12 | Use: "retries", 13 | Short: "go-workers2 retries info", 14 | Long: `Use the retries command to get retries information from a specified host address and 15 | port number, like so: 16 | 17 | goworkersctl retries --a 127.0.0.1 --p 8080`, 18 | RunE: runRetries, 19 | } 20 | 21 | func init() { 22 | rootCmd.AddCommand(retriesCmd) 23 | } 24 | 25 | func runRetries(cmd *cobra.Command, args []string) error { 26 | address := "http://" + hostAddress + ":" + port + "/retries" 27 | 28 | resp, err := http.Get(address) 29 | if err != nil { 30 | return err 31 | } 32 | 33 | defer resp.Body.Close() 34 | body, err := ioutil.ReadAll(resp.Body) 35 | if err != nil { 36 | return err 37 | } 38 | fmt.Printf("Body: %v\n", string(body)) 39 | return nil 40 | } 41 | -------------------------------------------------------------------------------- /middleware_stats.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | ) 7 | 8 | // StatsMiddleware middleware to collect stats on processed messages 9 | func StatsMiddleware(queue string, mgr *Manager, next JobFunc) JobFunc { 10 | return func(message *Msg) (err error) { 11 | defer func() { 12 | if e := recover(); e != nil { 13 | var ok bool 14 | if err, ok = e.(error); !ok { 15 | err = fmt.Errorf("%v", e) 16 | } 17 | 18 | if err != nil { 19 | incrementStats(mgr, "failed") 20 | } 21 | } 22 | 23 | }() 24 | 25 | err = next(message) 26 | if err != nil { 27 | incrementStats(mgr, "failed") 28 | } else { 29 | incrementStats(mgr, "processed") 30 | } 31 | 32 | return 33 | } 34 | } 35 | 36 | func incrementStats(mgr *Manager, metric string) { 37 | err := mgr.opts.store.IncrementStats(context.Background(), metric) 38 | 39 | if err != nil { 40 | mgr.logger.Println("couldn't save stats:", err) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/digitalocean/go-workers2 2 | 3 | require ( 4 | github.com/bitly/go-simplejson v0.5.0 5 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect 6 | github.com/go-redis/redis/v8 v8.4.4 7 | github.com/golang/protobuf v1.4.3 // indirect 8 | github.com/google/uuid v1.1.4 9 | github.com/kr/text v0.2.0 // indirect 10 | github.com/nxadm/tail v1.4.6 // indirect 11 | github.com/spf13/cobra v1.1.1 12 | github.com/stretchr/testify v1.6.1 13 | golang.org/x/net v0.0.0-20201224014010-6772e930b67b // indirect 14 | golang.org/x/sys v0.0.0-20210105210732-16f7687f5001 // indirect 15 | golang.org/x/text v0.3.4 // indirect 16 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect 17 | google.golang.org/protobuf v1.25.0 // indirect 18 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect 19 | gopkg.in/yaml.v2 v2.4.0 // indirect 20 | gopkg.in/yaml.v3 v3.0.0-20210105161348-2e78108cf5f8 // indirect 21 | ) 22 | 23 | go 1.13 24 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2013 John Allison 4 | Copyright (c) 2018 DigitalOcean 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy of 7 | this software and associated documentation files (the "Software"), to deal in 8 | the Software without restriction, including without limitation the rights to 9 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 10 | the Software, and to permit persons to whom the Software is furnished to do so, 11 | subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 18 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 19 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 20 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 21 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /api_stats.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | ) 7 | 8 | func (s *apiServer) Stats(w http.ResponseWriter, req *http.Request) { 9 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 10 | w.Header().Set("Access-Control-Allow-Origin", "*") 11 | 12 | allStats := []Stats{} 13 | for _, m := range s.managers { 14 | stats, err := m.GetStats() 15 | if err != nil { 16 | s.logger.Println("couldn't retrieve stats for manager:", err) 17 | } else { 18 | allStats = append(allStats, stats) 19 | } 20 | } 21 | 22 | enc := json.NewEncoder(w) 23 | enc.SetIndent("", " ") 24 | enc.Encode(allStats) 25 | } 26 | 27 | // Stats containts current stats for a manager 28 | type Stats struct { 29 | Name string `json:"manager_name"` 30 | Processed int64 `json:"processed"` 31 | Failed int64 `json:"failed"` 32 | Jobs map[string][]JobStatus `json:"jobs"` 33 | Enqueued map[string]int64 `json:"enqueued"` 34 | RetryCount int64 `json:"retry_count"` 35 | } 36 | 37 | // JobStatus contains the status and data for active jobs of a manager 38 | type JobStatus struct { 39 | Message *Msg `json:"message"` 40 | StartedAt int64 `json:"started_at"` 41 | } 42 | -------------------------------------------------------------------------------- /middleware_logging.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "runtime" 7 | "time" 8 | ) 9 | 10 | // LogMiddleware is the default logging middleware 11 | func LogMiddleware(queue string, mgr *Manager, next JobFunc) JobFunc { 12 | return func(message *Msg) (err error) { 13 | prefix := fmt.Sprint(queue, " JID-", message.Jid()) 14 | 15 | start := time.Now() 16 | mgr.logger.Println(prefix, "start") 17 | mgr.logger.Println(prefix, "args:", message.Args().ToJson()) 18 | 19 | defer func() { 20 | if e := recover(); e != nil { 21 | var ok bool 22 | if err, ok = e.(error); !ok { 23 | err = fmt.Errorf("%v", e) 24 | } 25 | 26 | if err != nil { 27 | logProcessError(mgr.logger, prefix, start, err) 28 | } 29 | } 30 | 31 | }() 32 | 33 | err = next(message) 34 | if err != nil { 35 | logProcessError(mgr.logger, prefix, start, err) 36 | } else { 37 | mgr.logger.Println(prefix, "done:", time.Since(start)) 38 | } 39 | 40 | return 41 | } 42 | 43 | } 44 | 45 | func logProcessError(logger *log.Logger, prefix string, start time.Time, err error) { 46 | logger.Println(prefix, "fail:", time.Since(start)) 47 | 48 | buf := make([]byte, 4096) 49 | buf = buf[:runtime.Stack(buf, false)] 50 | logger.Printf("%s error: %v\n%s", prefix, err, buf) 51 | } 52 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | var ( 11 | hostAddress string 12 | port string 13 | ) 14 | 15 | // rootCmd represents the base command when called without any subcommands 16 | var rootCmd = &cobra.Command{ 17 | Use: "gwctl", 18 | Short: "gwctl is the GoWorkers2 cli tool", 19 | Long: ` 20 | __ ________ 21 | ____ ____ __ _ _____________| | __ ___________ _____\_____ \ 22 | / ___\ / _ \ ______ \ \/ \/ / _ \_ __ \ |/ // __ \_ __ \/ ___// ____/ 23 | / /_/ > <_> ) /_____/ \ ( <_> ) | \/ <\ ___/| | \/\___ \/ \ 24 | \___ / \____/ \/\_/ \____/|__| |__|_ \\____ >__| /_____ >________\ 25 | /_____/ 26 | 27 | gwctl is a cli tool that allows a user to easily get data from a go-workers2 instance.`, 28 | } 29 | 30 | // Execute adds all child commands to the root command and sets flags appropriately. 31 | // This is called by main.main(). It only needs to happen once to the rootCmd. 32 | func Execute() { 33 | if err := rootCmd.Execute(); err != nil { 34 | fmt.Println(err) 35 | os.Exit(1) 36 | } 37 | } 38 | 39 | func init() { 40 | rootCmd.PersistentFlags().StringVar(&hostAddress, "a", "localhost", "Host address for a specific goworkers2 instance.") 41 | rootCmd.PersistentFlags().StringVar(&port, "p", "8080", "Port number for a specific goworkers2 instance.") 42 | } 43 | -------------------------------------------------------------------------------- /scheduled_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/digitalocean/go-workers2/storage" 8 | "github.com/go-redis/redis/v8" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestScheduled(t *testing.T) { 13 | ctx := context.Background() 14 | 15 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 16 | assert.NoError(t, err) 17 | 18 | scheduled := newScheduledWorker(opts) 19 | 20 | rc := opts.client 21 | 22 | now := nowToSecondsWithNanoPrecision() 23 | 24 | message1, _ := NewMsg("{\"queue\":\"default\",\"foo\":\"bar1\"}") 25 | message2, _ := NewMsg("{\"queue\":\"myqueue\",\"foo\":\"bar2\"}") 26 | message3, _ := NewMsg("{\"queue\":\"default\",\"foo\":\"bar3\"}") 27 | 28 | rc.ZAdd(ctx, retryQueue(opts.Namespace), &redis.Z{Score: now - 60.0, Member: message1.ToJson()}).Result() 29 | rc.ZAdd(ctx, retryQueue(opts.Namespace), &redis.Z{Score: now - 10.0, Member: message2.ToJson()}).Result() 30 | rc.ZAdd(ctx, retryQueue(opts.Namespace), &redis.Z{Score: now + 60.0, Member: message3.ToJson()}).Result() 31 | 32 | scheduled.poll() 33 | 34 | defaultCount, _ := rc.LLen(ctx, "prod:queue:default").Result() 35 | myqueueCount, _ := rc.LLen(ctx, "prod:queue:myqueue").Result() 36 | pending, _ := rc.ZCard(ctx, retryQueue(opts.Namespace)).Result() 37 | 38 | assert.Equal(t, int64(1), defaultCount) 39 | assert.Equal(t, int64(1), myqueueCount) 40 | assert.Equal(t, int64(1), pending) 41 | } 42 | 43 | func retryQueue(namespace string) string { 44 | return namespace + storage.RetryKey 45 | } 46 | -------------------------------------------------------------------------------- /middleware.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | // JobFunc is a message processor 4 | type JobFunc func(message *Msg) error 5 | 6 | // MiddlewareFunc is an extra function on the processing pipeline 7 | type MiddlewareFunc func(queue string, m *Manager, next JobFunc) JobFunc 8 | 9 | // Middlewares contains the lists of all configured middleware functions 10 | type Middlewares []MiddlewareFunc 11 | 12 | // Append adds middleware to the end of the processing pipeline 13 | func (m Middlewares) Append(mid MiddlewareFunc) Middlewares { 14 | return append(m, mid) 15 | } 16 | 17 | // Prepend adds middleware to the front of the processing pipeline 18 | func (m Middlewares) Prepend(mid MiddlewareFunc) Middlewares { 19 | return append(Middlewares{mid}, m...) 20 | } 21 | 22 | func (m Middlewares) build(queue string, mgr *Manager, final JobFunc) JobFunc { 23 | for i := len(m) - 1; i >= 0; i-- { 24 | final = m[i](queue, mgr, final) 25 | } 26 | return final 27 | } 28 | 29 | // NewMiddlewares creates the processing pipeline given the list of middleware funcs 30 | func NewMiddlewares(mids ...MiddlewareFunc) Middlewares { 31 | return Middlewares(mids) 32 | } 33 | 34 | // This is a variable for testing reasons 35 | var defaultMiddlewares = NewMiddlewares( 36 | LogMiddleware, 37 | RetryMiddleware, 38 | StatsMiddleware, 39 | ) 40 | 41 | // DefaultMiddlewares creates the default middleware pipeline 42 | func DefaultMiddlewares() Middlewares { 43 | return defaultMiddlewares 44 | } 45 | 46 | // NopMiddleware does nothing 47 | func NopMiddleware(queue string, mgr *Manager, final JobFunc) JobFunc { 48 | return final 49 | } 50 | -------------------------------------------------------------------------------- /scheduled.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "time" 7 | ) 8 | 9 | type scheduledWorker struct { 10 | opts Options 11 | done chan bool 12 | } 13 | 14 | func (s *scheduledWorker) run() { 15 | for { 16 | select { 17 | case <-s.done: 18 | return 19 | default: 20 | } 21 | 22 | s.poll() 23 | 24 | time.Sleep(s.opts.PollInterval) 25 | } 26 | } 27 | 28 | func (s *scheduledWorker) quit() { 29 | close(s.done) 30 | } 31 | 32 | func (s *scheduledWorker) poll() { 33 | now := nowToSecondsWithNanoPrecision() 34 | 35 | for { 36 | rawMessage, err := s.opts.store.DequeueScheduledMessage(context.Background(), now) 37 | 38 | if err != nil { 39 | break 40 | } 41 | 42 | message, _ := NewMsg(rawMessage) 43 | queue, _ := message.Get("queue").String() 44 | queue = strings.TrimPrefix(queue, s.opts.Namespace) 45 | message.Set("enqueued_at", nowToSecondsWithNanoPrecision()) 46 | 47 | s.opts.store.EnqueueMessageNow(context.Background(), queue, message.ToJson()) 48 | } 49 | 50 | for { 51 | rawMessage, err := s.opts.store.DequeueRetriedMessage(context.Background(), now) 52 | 53 | if err != nil { 54 | break 55 | } 56 | 57 | message, _ := NewMsg(rawMessage) 58 | queue, _ := message.Get("queue").String() 59 | queue = strings.TrimPrefix(queue, s.opts.Namespace) 60 | message.Set("enqueued_at", nowToSecondsWithNanoPrecision()) 61 | 62 | s.opts.store.EnqueueMessageNow(context.Background(), queue, message.ToJson()) 63 | } 64 | } 65 | 66 | func newScheduledWorker(opts Options) *scheduledWorker { 67 | return &scheduledWorker{ 68 | opts: opts, 69 | done: make(chan bool), 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /api_retries.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | "strconv" 8 | ) 9 | 10 | func (s *apiServer) Retries(w http.ResponseWriter, req *http.Request) { 11 | page, pageSizeVal, query, err := parseURLQuery(req) 12 | if err != nil { 13 | s.logger.Println("couldn't retrieve retries filtering query:", err) 14 | } 15 | 16 | allRetries := []Retries{} 17 | for _, m := range s.managers { 18 | r, err := m.GetRetries(page, pageSizeVal, query) 19 | if err != nil { 20 | s.logger.Println("couldn't retrieve retries for manager:", err) 21 | } else { 22 | allRetries = append(allRetries, r) 23 | } 24 | } 25 | 26 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 27 | w.Header().Set("Access-Control-Allow-Origin", "*") 28 | 29 | enc := json.NewEncoder(w) 30 | enc.SetIndent("", " ") 31 | enc.Encode(allRetries) 32 | } 33 | 34 | // Retries stores retry information 35 | type Retries struct { 36 | TotalRetryCount int64 `json:"total_retry_count"` 37 | RetryJobs []*Msg `json:"retry_jobs"` 38 | } 39 | 40 | func parseURLQuery(req *http.Request) (uint64, int64, string, error) { 41 | query := req.URL.Query().Get("q") 42 | if len(query) > 0 { 43 | query = fmt.Sprintf("*" + query + "*") 44 | } else { 45 | return 0, 10, query, nil 46 | } 47 | 48 | var pageVal uint64 49 | page := req.URL.Query().Get("page") 50 | if len(page) > 0 { 51 | pageVal, err := strconv.ParseUint(page, 10, 64) 52 | if err != nil { 53 | return pageVal, 10, query, nil 54 | } 55 | } else { 56 | return 0, 10, query, nil 57 | } 58 | 59 | var pageSizeVal int64 60 | pageSize := req.URL.Query().Get("page_size") 61 | if len(pageSize) > 0 { 62 | pageSizeVal, err := strconv.ParseInt(pageSize, 10, 64) 63 | if err != nil { 64 | return pageVal, pageSizeVal, query, nil 65 | } 66 | } else { 67 | return pageVal, 10, query, nil 68 | } 69 | 70 | return pageVal, pageSizeVal, query, nil 71 | } 72 | -------------------------------------------------------------------------------- /task_runner.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "math/rand" 7 | "sync" 8 | "time" 9 | ) 10 | 11 | type taskRunner struct { 12 | stop chan bool 13 | handler JobFunc 14 | currentMsg *Msg 15 | lock sync.RWMutex 16 | logger *log.Logger 17 | tid string 18 | } 19 | 20 | func (w *taskRunner) quit() { 21 | close(w.stop) 22 | } 23 | 24 | var alphaNumericRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890") 25 | 26 | func init() { 27 | rand.Seed(time.Now().UnixNano()) 28 | } 29 | 30 | func randSeq(n int) string { 31 | b := make([]rune, n) 32 | for i := range b { 33 | b[i] = alphaNumericRunes[rand.Intn(len(alphaNumericRunes))] 34 | } 35 | return string(b) 36 | } 37 | 38 | func (w *taskRunner) work(messages <-chan *Msg, done chan<- *Msg, ready chan<- bool) { 39 | for { 40 | select { 41 | case msg := <-messages: 42 | msg.startedAt = time.Now().UTC().Unix() 43 | 44 | w.lock.Lock() 45 | w.currentMsg = msg 46 | w.lock.Unlock() 47 | 48 | if err := w.process(msg); err != nil { 49 | w.logger.Println("ERR:", err) 50 | } 51 | 52 | w.lock.Lock() 53 | w.currentMsg = nil 54 | w.lock.Unlock() 55 | 56 | done <- msg 57 | 58 | case ready <- true: 59 | // Signaled to fetcher that we're 60 | // ready to accept a message 61 | case <-w.stop: 62 | return 63 | } 64 | } 65 | } 66 | 67 | func (w *taskRunner) process(message *Msg) (err error) { 68 | defer func() { 69 | if e := recover(); e != nil { 70 | var ok bool 71 | if err, ok = e.(error); !ok { 72 | err = fmt.Errorf("%v", e) 73 | } 74 | } 75 | }() 76 | 77 | return w.handler(message) 78 | } 79 | 80 | func (w *taskRunner) inProgressMessage() *Msg { 81 | w.lock.RLock() 82 | defer w.lock.RUnlock() 83 | return w.currentMsg 84 | } 85 | 86 | func newTaskRunner(logger *log.Logger, handler JobFunc) *taskRunner { 87 | return &taskRunner{ 88 | handler: handler, 89 | stop: make(chan bool), 90 | logger: logger, 91 | tid: randSeq(3), 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /msg.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "reflect" 7 | 8 | "github.com/bitly/go-simplejson" 9 | ) 10 | 11 | // Logger is the default go-workers2 logger, only used here in this file. 12 | // TODO: remove this 13 | var Logger = log.New(os.Stdout, "go-workers2: ", log.Ldate|log.Lmicroseconds) 14 | 15 | type data struct { 16 | *simplejson.Json 17 | } 18 | 19 | // Msg is the struct for job data (parameters and metadata) 20 | type Msg struct { 21 | *data 22 | original string 23 | ack bool 24 | startedAt int64 25 | } 26 | 27 | // Args is the set of parameters for a message 28 | type Args struct { 29 | *data 30 | } 31 | 32 | // Class returns class attribute of a message 33 | func (m *Msg) Class() string { 34 | return m.Get("class").MustString() 35 | } 36 | 37 | // Jid returns job id attribute of a message 38 | func (m *Msg) Jid() string { 39 | return m.Get("jid").MustString() 40 | } 41 | 42 | // Args returns arguments attribute of a message 43 | func (m *Msg) Args() *Args { 44 | if args, ok := m.CheckGet("args"); ok { 45 | return &Args{&data{args}} 46 | } 47 | 48 | d, _ := newData("[]") 49 | return &Args{d} 50 | } 51 | 52 | // OriginalJson returns the original JSON message 53 | func (m *Msg) OriginalJson() string { 54 | return m.original 55 | } 56 | 57 | // ToJson return data in JSON format th message 58 | func (d *data) ToJson() string { 59 | json, err := d.Encode() 60 | 61 | if err != nil { 62 | Logger.Println("ERR: Couldn't generate json from", d, ":", err) 63 | } 64 | 65 | return string(json) 66 | } 67 | 68 | func (d *data) Equals(other interface{}) bool { 69 | otherJSON := reflect.ValueOf(other).MethodByName("ToJson").Call([]reflect.Value{}) 70 | return d.ToJson() == otherJSON[0].String() 71 | } 72 | 73 | // NewMsg returns a new message 74 | func NewMsg(content string) (*Msg, error) { 75 | d, err := newData(content) 76 | if err != nil { 77 | return nil, err 78 | } 79 | return &Msg{ 80 | data: d, 81 | original: content, 82 | ack: true, 83 | startedAt: 0, 84 | }, nil 85 | } 86 | 87 | func newData(content string) (*data, error) { 88 | json, err := simplejson.NewJson([]byte(content)) 89 | if err != nil { 90 | return nil, err 91 | } 92 | return &data{json}, nil 93 | } 94 | -------------------------------------------------------------------------------- /api_server.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "net/http" 8 | "os" 9 | "sync" 10 | ) 11 | 12 | // APIOptions contains the set of configuration options for the global api 13 | type APIOptions struct { 14 | Logger *log.Logger 15 | Mux *http.ServeMux 16 | } 17 | 18 | type apiServer struct { 19 | lock sync.Mutex 20 | managers map[string]*Manager 21 | logger *log.Logger 22 | mux *http.ServeMux 23 | } 24 | 25 | func (s *apiServer) registerManager(m *Manager) { 26 | s.lock.Lock() 27 | defer s.lock.Unlock() 28 | if s.managers == nil { 29 | s.managers = make(map[string]*Manager) 30 | } 31 | s.managers[m.uuid] = m 32 | } 33 | 34 | func (s *apiServer) deregisterManager(m *Manager) { 35 | s.lock.Lock() 36 | defer s.lock.Unlock() 37 | delete(s.managers, m.uuid) 38 | } 39 | 40 | var globalHTTPServer *http.Server 41 | 42 | var globalAPIServer = &apiServer{ 43 | managers: map[string]*Manager{}, 44 | logger: log.New(os.Stdout, "go-workers2: ", log.Ldate|log.Lmicroseconds), 45 | mux: http.NewServeMux(), 46 | } 47 | 48 | // ConfigureAPIServer allows global API server configuration with the given options 49 | func ConfigureAPIServer(options APIOptions) { 50 | if options.Logger != nil { 51 | globalAPIServer.logger = options.Logger 52 | } 53 | 54 | if options.Mux != nil { 55 | globalAPIServer.mux = options.Mux 56 | } 57 | } 58 | 59 | // RegisterAPIEndpoints sets up API server endpoints 60 | func RegisterAPIEndpoints(mux *http.ServeMux) { 61 | mux.HandleFunc("/stats", globalAPIServer.Stats) 62 | mux.HandleFunc("/retries", globalAPIServer.Retries) 63 | } 64 | 65 | // StartAPIServer starts the API server 66 | func StartAPIServer(port int) { 67 | RegisterAPIEndpoints(globalAPIServer.mux) 68 | 69 | globalAPIServer.logger.Println("APIs are available at", fmt.Sprintf("http://localhost:%v/", port)) 70 | 71 | globalHTTPServer = &http.Server{Addr: fmt.Sprint(":", port), Handler: globalAPIServer.mux} 72 | if err := globalHTTPServer.ListenAndServe(); err != nil { 73 | globalAPIServer.logger.Println(err) 74 | } 75 | } 76 | 77 | // StopAPIServer stops the API server 78 | func StopAPIServer() { 79 | if globalHTTPServer != nil { 80 | globalHTTPServer.Shutdown(context.Background()) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /heartbeat_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestBuildHeartbeat(t *testing.T) { 14 | namespace := "prod" 15 | opts := testOptionsWithNamespace(namespace) 16 | mgr, err := newTestManager(opts, true) 17 | assert.NoError(t, err) 18 | 19 | mgr.AddWorker("somequeue", 5, func(m *Msg) error { 20 | return nil 21 | }) 22 | 23 | mgr.AddWorker("second_queue", 10, func(m *Msg) error { 24 | return nil 25 | }) 26 | 27 | heartbeat, err := mgr.buildHeartbeat(time.Now().UTC(), time.Second) 28 | assert.Nil(t, err) 29 | 30 | hostname, _ := os.Hostname() 31 | 32 | info := &HeartbeatInfo{} 33 | 34 | err = json.Unmarshal([]byte(heartbeat.Info), info) 35 | assert.Nil(t, err) 36 | 37 | assert.Equal(t, hostname, info.Hostname) 38 | assert.Equal(t, "prod", info.Tag) 39 | assert.ElementsMatch(t, []string{"somequeue", "second_queue"}, info.Queues) 40 | assert.Equal(t, 15, info.Concurrency) 41 | assert.Equal(t, []string{}, info.Labels) 42 | 43 | assert.Equal(t, false, heartbeat.Quiet) 44 | } 45 | 46 | func TestBuildHeartbeatWorkerMessage(t *testing.T) { 47 | namespace := "prod" 48 | opts := testOptionsWithNamespace(namespace) 49 | mgr, err := newTestManager(opts, true) 50 | assert.NoError(t, err) 51 | 52 | mgr.AddWorker("somequeue", 1, func(m *Msg) error { 53 | return nil 54 | }) 55 | msg, err := NewMsg("{\"class\":\"MyWorker\",\"jid\":\"jid-123\"}") 56 | assert.NoError(t, err) 57 | 58 | testLogger := log.New(os.Stdout, "test-go-workers2: ", log.Ldate|log.Lmicroseconds) 59 | 60 | tr := newTaskRunner(testLogger, func(m *Msg) error { 61 | return nil 62 | }) 63 | 64 | tr.currentMsg = msg 65 | 66 | firstWorker := mgr.workers[0] 67 | firstWorker.inProgressQueue = "testinprogressqueue" 68 | firstWorker.runners = []*taskRunner{tr} 69 | 70 | heartbeat, err := mgr.buildHeartbeat(time.Now().UTC(), time.Second) 71 | assert.Nil(t, err) 72 | 73 | assert.Equal(t, 1, len(heartbeat.WorkerHeartbeats)) 74 | for _, v := range heartbeat.WorkerHeartbeats { 75 | assert.Equal(t, firstWorker.queue, v.Queue) 76 | assert.Equal(t, firstWorker.inProgressQueue, v.InProgressQueue) 77 | assert.Equal(t, tr.tid, v.Tid) 78 | assert.Nil(t, err) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /middleware_stats_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "strconv" 7 | "testing" 8 | "time" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestProcessedStats(t *testing.T) { 14 | ctx := context.Background() 15 | 16 | namespace := "prod" 17 | opts, err := SetupDefaultTestOptionsWithNamespace(namespace) 18 | assert.NoError(t, err) 19 | 20 | mgr := &Manager{opts: opts} 21 | 22 | rc := opts.client 23 | 24 | count, _ := rc.Get(ctx, "prod:stat:processed").Result() 25 | countInt, _ := strconv.ParseInt(count, 10, 64) 26 | assert.Equal(t, int64(0), countInt) 27 | 28 | layout := "2006-01-02" 29 | dayCount, _ := rc.Get(ctx, "prod:stat:processed:"+time.Now().UTC().Format(layout)).Result() 30 | dayCountInt, _ := strconv.ParseInt(dayCount, 10, 64) 31 | assert.Equal(t, int64(0), dayCountInt) 32 | 33 | message, _ := NewMsg("{\"jid\":\"2\",\"retry\":true}") 34 | NewMiddlewares(StatsMiddleware).build("myqueue", mgr, func(m *Msg) error { 35 | // noop 36 | return nil 37 | })(message) 38 | 39 | count, _ = rc.Get(ctx, "prod:stat:processed").Result() 40 | countInt, _ = strconv.ParseInt(count, 10, 64) 41 | assert.Equal(t, int64(1), countInt) 42 | 43 | dayCount, _ = rc.Get(ctx, "prod:stat:processed:"+time.Now().UTC().Format(layout)).Result() 44 | dayCountInt, _ = strconv.ParseInt(dayCount, 10, 64) 45 | assert.Equal(t, int64(1), dayCountInt) 46 | } 47 | 48 | func TestFailedStats(t *testing.T) { 49 | ctx := context.Background() 50 | 51 | namespace := "prod" 52 | opts, err := SetupDefaultTestOptionsWithNamespace(namespace) 53 | assert.NoError(t, err) 54 | 55 | mgr := &Manager{opts: opts} 56 | 57 | rc := opts.client 58 | 59 | layout := "2006-01-02" 60 | 61 | count, _ := rc.Get(ctx, "prod:stat:failed").Result() 62 | countInt, _ := strconv.ParseInt(count, 10, 64) 63 | assert.Equal(t, int64(0), countInt) 64 | 65 | dayCount, _ := rc.Get(ctx, "prod:stat:failed:"+time.Now().UTC().Format(layout)).Result() 66 | dayCountInt, _ := strconv.ParseInt(dayCount, 10, 64) 67 | assert.Equal(t, int64(0), dayCountInt) 68 | 69 | message, _ := NewMsg("{\"jid\":\"2\",\"retry\":true}") 70 | 71 | var job = func(message *Msg) error { 72 | panic(errors.New("AHHHH")) 73 | } 74 | 75 | NewMiddlewares(StatsMiddleware).build("myqueue", mgr, job)(message) 76 | 77 | count, _ = rc.Get(ctx, "prod:stat:failed").Result() 78 | countInt, _ = strconv.ParseInt(count, 10, 64) 79 | assert.Equal(t, int64(1), countInt) 80 | 81 | dayCount, _ = rc.Get(ctx, "prod:stat:failed:"+time.Now().UTC().Format(layout)).Result() 82 | dayCountInt, _ = strconv.ParseInt(dayCount, 10, 64) 83 | assert.Equal(t, int64(1), dayCountInt) 84 | } 85 | -------------------------------------------------------------------------------- /api_retries_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "log" 8 | "net/http/httptest" 9 | "os" 10 | "testing" 11 | 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func TestRetries_Empty(t *testing.T) { 16 | a := apiServer{} 17 | 18 | recorder := httptest.NewRecorder() 19 | request := httptest.NewRequest("GET", "/retries", nil) 20 | a.Retries(recorder, request) 21 | 22 | assert.Equal(t, "[]\n", recorder.Body.String()) 23 | } 24 | 25 | func TestRetries_NotEmpty(t *testing.T) { 26 | a := &apiServer{ 27 | logger: log.New(os.Stdout, "go-workers2: ", log.Ldate|log.Lmicroseconds), 28 | } 29 | 30 | // test API replies without registered workers 31 | recorder := httptest.NewRecorder() 32 | request := httptest.NewRequest("GET", "/retries", nil) 33 | a.Retries(recorder, request) 34 | 35 | assert.Equal(t, "[]\n", recorder.Body.String()) 36 | 37 | // test API replies with registered workers 38 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 39 | assert.NoError(t, err) 40 | 41 | mgr := &Manager{opts: opts} 42 | a.registerManager(mgr) 43 | 44 | recorder = httptest.NewRecorder() 45 | request = httptest.NewRequest("GET", "/retries", nil) 46 | a.Retries(recorder, request) 47 | 48 | actualWithManagerBytes := recorder.Body.Bytes() 49 | actualReplyParsed := []*Retries{} 50 | err = json.Unmarshal(actualWithManagerBytes, &actualReplyParsed) 51 | assert.NoError(t, err) 52 | assert.Equal(t, []*Retries{{}}, actualReplyParsed) 53 | 54 | //puts messages in retry queue when they fail 55 | message, _ := NewMsg("{\"jid\":\"2\",\"retry\":true}") 56 | 57 | tests := []struct { 58 | name string 59 | f JobFunc 60 | }{ 61 | { 62 | name: "retry on panic", 63 | f: panickingFunc, 64 | }, 65 | { 66 | name: "retry on error", 67 | f: func(m *Msg) error { 68 | return errors.New("ERROR") 69 | }, 70 | }, 71 | } 72 | 73 | var messages []string 74 | for index, test := range tests { 75 | // Test panic 76 | wares.build("myqueue", mgr, test.f)(message) 77 | 78 | // retries order is not guaranteed 79 | retries, err := opts.client.ZRange(context.Background(), retryQueue(opts.Namespace), 0, -1).Result() 80 | assert.NoError(t, err) 81 | assert.Len(t, retries, index+1) 82 | messages = append(messages, message.ToJson()) 83 | assert.ElementsMatch(t, messages, retries) 84 | } 85 | 86 | recorder = httptest.NewRecorder() 87 | request = httptest.NewRequest("GET", "/retries", nil) 88 | a.Retries(recorder, request) 89 | assert.NoError(t, err) 90 | assert.NotEqual(t, "[]\n", recorder.Body.String()) 91 | assert.NotEqual(t, string(actualWithManagerBytes), recorder.Body.String()) 92 | } 93 | -------------------------------------------------------------------------------- /worker.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "log" 5 | "sync" 6 | ) 7 | 8 | type worker struct { 9 | queue string 10 | inProgressQueue string 11 | handler JobFunc 12 | concurrency int 13 | runners []*taskRunner 14 | runnersLock sync.Mutex 15 | stop chan bool 16 | running bool 17 | fetcher Fetcher 18 | logger *log.Logger 19 | } 20 | 21 | func newWorker(logger *log.Logger, queue string, concurrency int, handler JobFunc) *worker { 22 | if concurrency <= 0 { 23 | concurrency = 1 24 | } 25 | w := &worker{ 26 | queue: queue, 27 | handler: handler, 28 | concurrency: concurrency, 29 | stop: make(chan bool), 30 | logger: logger, 31 | } 32 | return w 33 | } 34 | 35 | func (w *worker) start(fetcher Fetcher) { 36 | w.runnersLock.Lock() 37 | if w.running { 38 | w.runnersLock.Unlock() 39 | return 40 | } 41 | w.running = true 42 | w.fetcher = fetcher 43 | w.inProgressQueue = fetcher.InProgressQueue() 44 | defer func() { 45 | w.runnersLock.Lock() 46 | w.running = false 47 | w.runnersLock.Unlock() 48 | }() 49 | 50 | var wg sync.WaitGroup 51 | wg.Add(w.concurrency) 52 | 53 | go fetcher.Fetch() 54 | 55 | done := make(chan *Msg) 56 | w.runners = make([]*taskRunner, w.concurrency) 57 | for i := 0; i < w.concurrency; i++ { 58 | r := newTaskRunner(w.logger, w.handler) 59 | w.runners[i] = r 60 | go func() { 61 | r.work(fetcher.Messages(), done, fetcher.Ready()) 62 | wg.Done() 63 | }() 64 | } 65 | exit := make(chan bool) 66 | go func() { 67 | wg.Wait() 68 | close(exit) 69 | }() 70 | 71 | // Now that we're all set up, unlock so that stats can check. 72 | w.runnersLock.Unlock() 73 | 74 | for { 75 | select { 76 | case msg := <-done: 77 | if msg.ack { 78 | fetcher.Acknowledge(msg) 79 | } 80 | case <-w.stop: 81 | if !fetcher.Closed() { 82 | fetcher.Close() 83 | 84 | // we need to relock the runners so we can shut this down 85 | w.runnersLock.Lock() 86 | for _, r := range w.runners { 87 | r.quit() 88 | } 89 | w.runnersLock.Unlock() 90 | } 91 | case <-exit: 92 | return 93 | } 94 | } 95 | } 96 | 97 | func (w *worker) quit() { 98 | w.runnersLock.Lock() 99 | defer w.runnersLock.Unlock() 100 | if w.running { 101 | w.stop <- true 102 | } 103 | } 104 | 105 | func (w *worker) inProgressMessages() []*Msg { 106 | w.runnersLock.Lock() 107 | defer w.runnersLock.Unlock() 108 | var res []*Msg 109 | for _, r := range w.runners { 110 | if m := r.inProgressMessage(); m != nil { 111 | res = append(res, m) 112 | } 113 | } 114 | return res 115 | } 116 | -------------------------------------------------------------------------------- /task_runner_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "errors" 5 | "log" 6 | "os" 7 | "sync" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestTaskRunner_process(t *testing.T) { 14 | testLogger := log.New(os.Stdout, "test-go-workers2: ", log.Ldate|log.Lmicroseconds) 15 | 16 | msg, _ := NewMsg(`{}`) 17 | 18 | t.Run("handles-panic", func(t *testing.T) { 19 | tr := newTaskRunner(testLogger, func(m *Msg) error { 20 | panic("task-test-panic") 21 | }) 22 | err := tr.process(msg) 23 | assert.EqualError(t, err, "task-test-panic") 24 | 25 | }) 26 | 27 | t.Run("returns-error", func(t *testing.T) { 28 | var errorToRet error 29 | tr := newTaskRunner(testLogger, func(m *Msg) error { 30 | return errorToRet 31 | }) 32 | err := tr.process(msg) 33 | assert.NoError(t, err) 34 | 35 | errorToRet = errors.New("ret me") 36 | err = tr.process(msg) 37 | assert.EqualError(t, err, errorToRet.Error()) 38 | }) 39 | } 40 | 41 | func TestTaskRunner(t *testing.T) { 42 | msgCh := make(chan *Msg) 43 | doneCh := make(chan *Msg) 44 | readyCh := make(chan bool) 45 | 46 | syncCh := make(chan bool) 47 | noSyncMsg := func() *Msg { 48 | m, _ := NewMsg(`{}`) 49 | return m 50 | } 51 | syncMsg := func() *Msg { 52 | m, _ := NewMsg(`{"sync": true}`) 53 | return m 54 | } 55 | 56 | tr := newTaskRunner(Logger, func(m *Msg) error { 57 | if m.Get("sync").MustBool() { 58 | syncCh <- true 59 | <-syncCh 60 | } 61 | return nil 62 | }) 63 | 64 | var wg sync.WaitGroup 65 | wg.Add(1) 66 | go func() { 67 | tr.work(msgCh, doneCh, readyCh) 68 | wg.Done() 69 | }() 70 | 71 | t.Run("consumes-messages", func(t *testing.T) { 72 | msgCh <- noSyncMsg() 73 | doneMsg := <-doneCh 74 | assert.NotNil(t, doneMsg) 75 | assert.NotZero(t, doneMsg.startedAt) 76 | 77 | msgCh <- noSyncMsg() 78 | doneMsg = <-doneCh 79 | assert.NotNil(t, doneMsg) 80 | assert.NotZero(t, doneMsg.startedAt) 81 | }) 82 | 83 | t.Run("sends-to-ready-when-no-message", func(t *testing.T) { 84 | <-readyCh 85 | }) 86 | 87 | t.Run(".inProgressMessage", func(t *testing.T) { 88 | msgCh <- syncMsg() 89 | <-syncCh 90 | ipm := tr.inProgressMessage() 91 | assert.NotNil(t, ipm) 92 | assert.NotZero(t, ipm.startedAt) 93 | 94 | syncCh <- true 95 | doneMsg := <-doneCh 96 | assert.NotNil(t, doneMsg) 97 | assert.NotZero(t, doneMsg.startedAt) 98 | 99 | ipm = tr.inProgressMessage() 100 | assert.Nil(t, ipm) 101 | }) 102 | 103 | t.Run(".quit", func(t *testing.T) { 104 | tr.quit() 105 | // wg.Wait will cause the test to timeout if tr.quit() doesn't shut down the taskRunner 106 | wg.Wait() 107 | }) 108 | 109 | } 110 | -------------------------------------------------------------------------------- /test_utils.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "time" 6 | ) 7 | 8 | const ( 9 | testServerAddr = "localhost:6379" 10 | testDatabase = 15 11 | ) 12 | 13 | // SetupDefaultTestOptions will setup processed default test options without a namespace 14 | func SetupDefaultTestOptions() (Options, error) { 15 | return SetupDefaultTestOptionsWithNamespace("") 16 | } 17 | 18 | // SetupDefaultTestOptionsWithNamespace sets up processed default test options with namespace and flushes redis 19 | // if client is configured 20 | func SetupDefaultTestOptionsWithNamespace(namespace string) (Options, error) { 21 | opts, err := processOptions(testOptionsWithNamespace(namespace)) 22 | if opts.client != nil { 23 | _, err = opts.client.FlushDB(context.Background()).Result() 24 | if err != nil { 25 | return Options{}, err 26 | } 27 | } 28 | return opts, err 29 | } 30 | 31 | func testOptionsWithNamespace(namespace string) Options { 32 | return Options{ 33 | ServerAddr: testServerAddr, 34 | ProcessID: "1", 35 | Database: testDatabase, 36 | PoolSize: 1, 37 | Namespace: namespace, 38 | } 39 | } 40 | 41 | // SetupDefaultTestOptionsWithHeartbeat creates default options for testing heartbeat related features 42 | func SetupDefaultTestOptionsWithHeartbeat(namespace, processID string) Options { 43 | return Options{ 44 | ServerAddr: testServerAddr, 45 | ProcessID: processID, 46 | Database: testDatabase, 47 | PoolSize: 1, 48 | Namespace: namespace, 49 | Heartbeat: &HeartbeatOptions{ 50 | Interval: 2 * time.Second, 51 | HeartbeatTTL: 6 * time.Second, 52 | }, 53 | } 54 | } 55 | 56 | // CallCounter counts and synchronizes calls 57 | type CallCounter struct { 58 | count int 59 | syncCh chan *Msg 60 | ackSyncCh chan bool 61 | } 62 | 63 | // NewCallCounter returns a new CallCounter 64 | func NewCallCounter() *CallCounter { 65 | return &CallCounter{ 66 | syncCh: make(chan *Msg), 67 | ackSyncCh: make(chan bool), 68 | } 69 | } 70 | 71 | func (j *CallCounter) getOpt(m *Msg, opt string) bool { 72 | if m == nil { 73 | return false 74 | } 75 | return m.Args().GetIndex(0).Get(opt).MustBool() 76 | } 77 | 78 | func (j *CallCounter) F(m *Msg) error { 79 | j.count++ 80 | if m != nil { 81 | if j.getOpt(m, "sync") { 82 | j.syncCh <- m 83 | <-j.ackSyncCh 84 | } 85 | m.ack = !j.getOpt(m, "noack") 86 | } 87 | return nil 88 | } 89 | 90 | func (j *CallCounter) syncMsg() *Msg { 91 | m, _ := NewMsg(`{"args": [{"sync": true}]}`) 92 | return m 93 | } 94 | 95 | func (j *CallCounter) msg() *Msg { 96 | m, _ := NewMsg(`{"args": []}`) 97 | return m 98 | } 99 | 100 | func (j *CallCounter) noAckMsg() *Msg { 101 | m, _ := NewMsg(`{"args": [{"noack": true}]}`) 102 | return m 103 | } 104 | -------------------------------------------------------------------------------- /middleware_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func arrayCompare(a1, a2 []string) bool { 10 | if len(a1) != len(a2) { 11 | return false 12 | } 13 | 14 | for i := 0; i < len(a1); i++ { 15 | if a1[i] != a2[i] { 16 | return false 17 | } 18 | } 19 | 20 | return true 21 | } 22 | 23 | type orderMiddleware struct { 24 | name string 25 | order *[]string 26 | } 27 | 28 | func (m *orderMiddleware) f() MiddlewareFunc { 29 | return func(queue string, mgr *Manager, next JobFunc) JobFunc { 30 | return func(message *Msg) (result error) { 31 | *m.order = append(*m.order, m.name+" enter") 32 | result = next(message) 33 | *m.order = append(*m.order, m.name+" leave") 34 | return 35 | } 36 | } 37 | } 38 | 39 | func TestNewMiddlewares(t *testing.T) { 40 | //no middleware 41 | middlewares := NewMiddlewares() 42 | assert.Equal(t, 0, len(middlewares)) 43 | 44 | //middleware set when initializing 45 | order := make([]string, 0) 46 | first := orderMiddleware{"m1", &order} 47 | second := orderMiddleware{"m2", &order} 48 | middlewares = NewMiddlewares(first.f(), second.f()) 49 | 50 | message, _ := NewMsg("{\"foo\":\"bar\"}") 51 | middlewares.build("myqueue", nil, func(message *Msg) error { 52 | order = append(order, "job") 53 | return nil 54 | })(message) 55 | 56 | expectedOrder := []string{ 57 | "m1 enter", 58 | "m2 enter", 59 | "job", 60 | "m2 leave", 61 | "m1 leave", 62 | } 63 | 64 | assert.Equal(t, expectedOrder, order) 65 | } 66 | 67 | func TestAppendMiddleware(t *testing.T) { 68 | order := make([]string, 0) 69 | first := orderMiddleware{"m1", &order} 70 | second := orderMiddleware{"m2", &order} 71 | middleware := NewMiddlewares().Append(first.f()).Append(second.f()) 72 | 73 | message, _ := NewMsg("{\"foo\":\"bar\"}") 74 | middleware.build("myqueue", nil, func(message *Msg) error { 75 | order = append(order, "job") 76 | return nil 77 | })(message) 78 | 79 | expectedOrder := []string{ 80 | "m1 enter", 81 | "m2 enter", 82 | "job", 83 | "m2 leave", 84 | "m1 leave", 85 | } 86 | 87 | assert.Equal(t, expectedOrder, order) 88 | } 89 | 90 | func TestPrependMiddleware(t *testing.T) { 91 | order := make([]string, 0) 92 | first := orderMiddleware{"m1", &order} 93 | second := orderMiddleware{"m2", &order} 94 | 95 | middleware := NewMiddlewares().Prepend(first.f()).Prepend(second.f()) 96 | 97 | message, _ := NewMsg("{\"foo\":\"bar\"}") 98 | middleware.build("myqueue", nil, func(message *Msg) error { 99 | order = append(order, "job") 100 | return nil 101 | })(message) 102 | 103 | expectedOrder := []string{ 104 | "m2 enter", 105 | "m1 enter", 106 | "job", 107 | "m1 leave", 108 | "m2 leave", 109 | } 110 | 111 | assert.Equal(t, expectedOrder, order) 112 | } 113 | -------------------------------------------------------------------------------- /middleware_retry.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "math" 7 | "math/rand" 8 | "time" 9 | ) 10 | 11 | // RetriesExhaustedFunc gets executed when retry attempts have been exhausted. 12 | type RetriesExhaustedFunc func(queue string, message *Msg, err error) 13 | 14 | const ( 15 | // DefaultRetryMax is default for max number of retries for a job 16 | DefaultRetryMax = 25 17 | 18 | // RetryTimeFormat is default for retry time format 19 | RetryTimeFormat = "2006-01-02 15:04:05 MST" 20 | ) 21 | 22 | func retryProcessError(queue string, mgr *Manager, message *Msg, err error) error { 23 | if !retry(message) { 24 | return err 25 | } 26 | if retryCount(message) < retryMax(message) { 27 | message.Set("queue", queue) 28 | message.Set("error_message", fmt.Sprintf("%v", err)) 29 | retryCount := incrementRetry(message) 30 | 31 | waitDuration := durationToSecondsWithNanoPrecision( 32 | time.Duration( 33 | secondsToDelay(retryCount), 34 | ) * time.Second, 35 | ) 36 | 37 | err = mgr.opts.store.EnqueueRetriedMessage(context.Background(), nowToSecondsWithNanoPrecision()+waitDuration, message.ToJson()) 38 | 39 | // If we can't add the job to the retry queue, 40 | // then we shouldn't acknowledge the job, otherwise 41 | // it'll disappear into the void. 42 | if err != nil { 43 | message.ack = false 44 | } 45 | } else { 46 | for _, retriesExhaustedHandler := range mgr.retriesExhaustedHandlers { 47 | retriesExhaustedHandler(queue, message, err) 48 | } 49 | } 50 | return err 51 | } 52 | 53 | // RetryMiddleware middleware that allows retries for jobs failures 54 | func RetryMiddleware(queue string, mgr *Manager, next JobFunc) JobFunc { 55 | return func(message *Msg) (err error) { 56 | defer func() { 57 | if e := recover(); e != nil { 58 | var ok bool 59 | if err, ok = e.(error); !ok { 60 | err = fmt.Errorf("%v", e) 61 | } 62 | 63 | if err != nil { 64 | err = retryProcessError(queue, mgr, message, err) 65 | } 66 | } 67 | 68 | }() 69 | 70 | err = next(message) 71 | if err != nil { 72 | err = retryProcessError(queue, mgr, message, err) 73 | } 74 | 75 | return 76 | } 77 | } 78 | 79 | func retry(message *Msg) bool { 80 | retry := false 81 | 82 | if param, err := message.Get("retry").Bool(); err == nil { 83 | retry = param 84 | } 85 | 86 | return retry 87 | } 88 | 89 | func retryCount(message *Msg) int { 90 | count, _ := message.Get("retry_count").Int() 91 | return count 92 | } 93 | 94 | func retryMax(message *Msg) int { 95 | max := DefaultRetryMax 96 | if messageRetryMax, err := message.Get("retry_max").Int(); err == nil && messageRetryMax >= 0 { 97 | max = messageRetryMax 98 | } 99 | return max 100 | } 101 | 102 | func incrementRetry(message *Msg) (retryCount int) { 103 | retryCount = 0 104 | 105 | if count, err := message.Get("retry_count").Int(); err != nil { 106 | message.Set("failed_at", time.Now().UTC().Format(RetryTimeFormat)) 107 | } else { 108 | message.Set("retried_at", time.Now().UTC().Format(RetryTimeFormat)) 109 | retryCount = count + 1 110 | } 111 | 112 | message.Set("retry_count", retryCount) 113 | 114 | return 115 | } 116 | 117 | func secondsToDelay(count int) int { 118 | power := math.Pow(float64(count), 4) 119 | return int(power) + 15 + (rand.Intn(30) * (count + 1)) 120 | } 121 | -------------------------------------------------------------------------------- /storage/storage.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "context" 5 | "time" 6 | ) 7 | 8 | // TODO(wtlangford): Check if the value of these keys are Sidekiq-compatible 9 | const ( 10 | RetryKey = "goretry" 11 | ScheduledJobsKey = "schedule" 12 | ) 13 | 14 | // StorageError is used to return errors from the storage layer 15 | type StorageError string 16 | 17 | func (e StorageError) Error() string { return string(e) } 18 | 19 | // list of known errors 20 | const ( 21 | NoMessage = StorageError("no message") 22 | ) 23 | 24 | // Stats has all the stats related to a manager 25 | type Stats struct { 26 | Processed int64 27 | Failed int64 28 | RetryCount int64 29 | Enqueued map[string]int64 30 | } 31 | 32 | // Retries has the list of messages in the retry queue 33 | type Retries struct { 34 | TotalRetryCount int64 35 | RetryJobs []string 36 | } 37 | 38 | // Heartbeat is used for the ruby sidekiq web ui 39 | type Heartbeat struct { 40 | Identity string `json:"identity"` 41 | 42 | Beat int64 `json:"beat,string"` 43 | Quiet bool `json:"quiet,string"` 44 | Busy int `json:"busy,string"` 45 | RttUS int `json:"rtt_us,string"` 46 | RSS int64 `json:"rss,string"` 47 | Info string `json:"info"` 48 | Pid int `json:"pid,string"` 49 | ManagerPriority int `json:"manager_priority,string"` 50 | ActiveManager bool `json:"active_manager,string"` 51 | 52 | Ttl time.Duration 53 | 54 | WorkerHeartbeats []WorkerHeartbeat `json:"-"` 55 | } 56 | 57 | type WorkerHeartbeat struct { 58 | Pid int `json:"pid,string"` 59 | Tid string `json:"tid,string"` 60 | Queue string `json:"queue,string"` 61 | InProgressQueue string `json:"in_progress_queue,string"` 62 | } 63 | 64 | // Store is the interface for storing and retrieving data 65 | type Store interface { 66 | 67 | // General queue operations 68 | CreateQueue(ctx context.Context, queue string) error 69 | ListMessages(ctx context.Context, queue string) ([]string, error) 70 | AcknowledgeMessage(ctx context.Context, queue string, message string) error 71 | EnqueueMessage(ctx context.Context, queue string, priority float64, message string) error 72 | EnqueueMessageNow(ctx context.Context, queue string, message string) error 73 | DequeueMessage(ctx context.Context, queue string, inprogressQueue string, timeout time.Duration) (string, error) 74 | RequeueMessagesFromInProgressQueue(ctx context.Context, inprogressQueue, queue string) ([]string, error) 75 | 76 | // Special purpose queue operations 77 | EnqueueScheduledMessage(ctx context.Context, priority float64, message string) error 78 | DequeueScheduledMessage(ctx context.Context, priority float64) (string, error) 79 | 80 | EnqueueRetriedMessage(ctx context.Context, priority float64, message string) error 81 | DequeueRetriedMessage(ctx context.Context, priority float64) (string, error) 82 | 83 | // Stats 84 | IncrementStats(ctx context.Context, metric string) error 85 | GetAllStats(ctx context.Context, queues []string) (*Stats, error) 86 | 87 | // Heartbeat 88 | GetAllHeartbeats(ctx context.Context) ([]*Heartbeat, error) 89 | SendHeartbeat(ctx context.Context, heartbeat *Heartbeat) error 90 | RemoveHeartbeat(ctx context.Context, heartbeatID string) error 91 | 92 | // Retries 93 | GetAllRetries(ctx context.Context) (*Retries, error) 94 | 95 | // Storage Server Time 96 | GetTime(ctx context.Context) (time.Time, error) 97 | } 98 | -------------------------------------------------------------------------------- /heartbeat.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "crypto/rand" 5 | "encoding/hex" 6 | "encoding/json" 7 | "fmt" 8 | "os" 9 | "strings" 10 | "time" 11 | 12 | "github.com/digitalocean/go-workers2/storage" 13 | ) 14 | 15 | type HeartbeatInfo struct { 16 | Hostname string `json:"hostname"` 17 | StartedAt int64 `json:"started_at"` 18 | Pid int `json:"Pid"` 19 | Tag string `json:"tag"` 20 | Concurrency int `json:"concurrency"` 21 | Queues []string `json:"queues"` 22 | Labels []string `json:"labels"` 23 | Identity string `json:"identity"` 24 | } 25 | 26 | type HeartbeatWorkerMsgWrapper struct { 27 | Queue string `json:"Queue"` 28 | Payload string `json:"payload"` 29 | RunAt int64 `json:"run_at"` 30 | Tid string `json:"Tid"` 31 | } 32 | 33 | type HeartbeatWorkerMsg struct { 34 | Retry int `json:"retry"` 35 | Queue string `json:"Queue"` 36 | Backtrace bool `json:"backtrace"` 37 | Class string `json:"class"` 38 | Args *Args `json:"args"` 39 | Jid string `json:"jid"` 40 | CreatedAt int64 `json:"created_at"` 41 | EnqueuedAt int64 `json:"enqueued_at"` 42 | } 43 | 44 | type afterHeartbeatFunc func(heartbeat *storage.Heartbeat, manager *Manager, staleMessageUpdates []*staleMessageUpdate) error 45 | 46 | func GenerateProcessNonce() (string, error) { 47 | bytes := make([]byte, 12) 48 | if _, err := rand.Read(bytes); err != nil { 49 | return "", err 50 | } 51 | return hex.EncodeToString(bytes), nil 52 | } 53 | 54 | func (m *Manager) buildHeartbeat(heartbeatTime time.Time, ttl time.Duration) (*storage.Heartbeat, error) { 55 | queues := []string{} 56 | 57 | concurrency := 0 58 | busy := 0 59 | pid := os.Getpid() 60 | 61 | var workerHeartbeats []storage.WorkerHeartbeat 62 | 63 | for _, w := range m.workers { 64 | queues = append(queues, w.queue) 65 | concurrency += w.concurrency // add up all concurrency here because it can be specified on a per-worker basis. 66 | busy += len(w.inProgressMessages()) 67 | 68 | w.runnersLock.Lock() 69 | for _, r := range w.runners { 70 | workerHeartbeat := storage.WorkerHeartbeat{ 71 | Pid: pid, 72 | Tid: r.tid, 73 | Queue: w.queue, 74 | InProgressQueue: w.inProgressQueue, 75 | } 76 | workerHeartbeats = append(workerHeartbeats, workerHeartbeat) 77 | } 78 | w.runnersLock.Unlock() 79 | } 80 | 81 | hostname, err := os.Hostname() 82 | if err != nil { 83 | return nil, err 84 | } 85 | 86 | if m.opts.ManagerDisplayName != "" { 87 | hostname = hostname + ":" + m.opts.ManagerDisplayName 88 | } 89 | 90 | tag := "default" 91 | 92 | if m.opts.Namespace != "" { 93 | tag = strings.ReplaceAll(m.opts.Namespace, ":", "") 94 | } 95 | 96 | heartbeatID, err := m.getHeartbeatID() 97 | if err != nil { 98 | return nil, err 99 | } 100 | 101 | heartbeatInfo := &HeartbeatInfo{ 102 | Hostname: hostname, 103 | StartedAt: m.startedAt.UTC().Unix(), 104 | Pid: pid, 105 | Tag: tag, 106 | Concurrency: concurrency, 107 | Queues: queues, 108 | Labels: []string{}, 109 | Identity: heartbeatID, 110 | } 111 | heartbeatInfoJson, err := json.Marshal(heartbeatInfo) 112 | 113 | if err != nil { 114 | return nil, err 115 | } 116 | 117 | heartbeat := &storage.Heartbeat{ 118 | Identity: heartbeatID, 119 | Beat: heartbeatTime.UTC().Unix(), 120 | Quiet: false, 121 | Busy: busy, 122 | RSS: 0, // rss is not currently supported 123 | Info: string(heartbeatInfoJson), 124 | Pid: pid, 125 | ActiveManager: m.IsActive(), 126 | WorkerHeartbeats: workerHeartbeats, 127 | Ttl: ttl, 128 | } 129 | if m.opts.Heartbeat != nil && m.opts.Heartbeat.PrioritizedManager != nil { 130 | heartbeat.ManagerPriority = m.opts.Heartbeat.PrioritizedManager.ManagerPriority 131 | } 132 | 133 | return heartbeat, nil 134 | } 135 | 136 | func (m *Manager) getHeartbeatID() (string, error) { 137 | hostname, err := os.Hostname() 138 | if err != nil { 139 | return "", err 140 | } 141 | pid := os.Getpid() 142 | return fmt.Sprintf("%s:%d:%s", hostname, pid, m.processNonce), nil 143 | } 144 | -------------------------------------------------------------------------------- /fetcher.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "os" 8 | "sync" 9 | "time" 10 | 11 | "github.com/digitalocean/go-workers2/storage" 12 | ) 13 | 14 | //Fetcher is an interface for managing work messages 15 | type Fetcher interface { 16 | Queue() string 17 | InProgressQueue() string 18 | Fetch() 19 | Acknowledge(*Msg) 20 | SetActive(bool) 21 | IsActive() bool 22 | Ready() chan bool 23 | Messages() chan *Msg 24 | Close() 25 | Closed() bool 26 | } 27 | 28 | type simpleFetcher struct { 29 | store storage.Store 30 | processID string 31 | queue string 32 | lock sync.Mutex 33 | isActive bool 34 | 35 | ready chan bool 36 | messages chan *Msg 37 | stop chan bool 38 | exit chan bool 39 | closed chan bool 40 | logger *log.Logger 41 | } 42 | 43 | func newSimpleFetcher(queue string, opts Options, isActive bool) *simpleFetcher { 44 | logger := opts.Logger 45 | if logger == nil { 46 | logger = log.New(os.Stdout, "go-workers2: ", log.Ldate|log.Lmicroseconds) 47 | } 48 | 49 | return &simpleFetcher{ 50 | store: opts.store, 51 | processID: opts.ProcessID, 52 | queue: queue, 53 | isActive: isActive, 54 | ready: make(chan bool), 55 | messages: make(chan *Msg), 56 | stop: make(chan bool), 57 | exit: make(chan bool), 58 | closed: make(chan bool), 59 | logger: logger, 60 | } 61 | } 62 | 63 | func (f *simpleFetcher) Queue() string { 64 | return f.queue 65 | } 66 | 67 | func (f *simpleFetcher) processOldMessages() { 68 | messages := f.inprogressMessages() 69 | 70 | for _, message := range messages { 71 | <-f.Ready() 72 | f.sendMessage(message) 73 | } 74 | } 75 | 76 | func (f *simpleFetcher) Fetch() { 77 | for !f.isActive { 78 | select { 79 | case <-f.stop: 80 | close(f.closed) 81 | close(f.exit) 82 | return 83 | } 84 | } 85 | f.processOldMessages() 86 | 87 | go func() { 88 | for { 89 | // f.Close() has been called 90 | if f.Closed() { 91 | break 92 | } 93 | <-f.Ready() 94 | if f.IsActive() { 95 | f.tryFetchMessage() 96 | } 97 | } 98 | }() 99 | 100 | for { 101 | select { 102 | case <-f.stop: 103 | // Stop the redis-polling goroutine 104 | close(f.closed) 105 | // Signal to Close() that the fetcher has stopped 106 | close(f.exit) 107 | break 108 | } 109 | } 110 | } 111 | 112 | func (f *simpleFetcher) tryFetchMessage() { 113 | message, err := f.store.DequeueMessage(context.Background(), f.queue, f.InProgressQueue(), 1*time.Second) 114 | if err != nil { 115 | // If redis returns null, the queue is empty. 116 | // Just ignore empty queue errors; print all other errors. 117 | if err != storage.NoMessage { 118 | f.logger.Println("ERR: ", f.queue, err) 119 | } 120 | } else { 121 | f.sendMessage(message) 122 | } 123 | } 124 | 125 | func (f *simpleFetcher) sendMessage(message string) { 126 | msg, err := NewMsg(message) 127 | 128 | if err != nil { 129 | f.logger.Println("ERR: Couldn't create message from", message, ":", err) 130 | return 131 | } 132 | 133 | f.Messages() <- msg 134 | } 135 | 136 | func (f *simpleFetcher) Acknowledge(message *Msg) { 137 | f.store.AcknowledgeMessage(context.Background(), f.InProgressQueue(), message.OriginalJson()) 138 | } 139 | 140 | func (f *simpleFetcher) Messages() chan *Msg { 141 | return f.messages 142 | } 143 | 144 | func (f *simpleFetcher) SetActive(active bool) { 145 | f.lock.Lock() 146 | defer f.lock.Unlock() 147 | f.isActive = active 148 | } 149 | 150 | func (f *simpleFetcher) IsActive() bool { 151 | f.lock.Lock() 152 | defer f.lock.Unlock() 153 | return f.isActive 154 | } 155 | 156 | func (f *simpleFetcher) Ready() chan bool { 157 | return f.ready 158 | } 159 | 160 | func (f *simpleFetcher) Close() { 161 | f.stop <- true 162 | <-f.exit 163 | } 164 | 165 | func (f *simpleFetcher) Closed() bool { 166 | select { 167 | case <-f.closed: 168 | return true 169 | default: 170 | return false 171 | } 172 | } 173 | 174 | func (f *simpleFetcher) inprogressMessages() []string { 175 | messages, err := f.store.ListMessages(context.Background(), f.InProgressQueue()) 176 | if err != nil { 177 | f.logger.Println("ERR: ", err) 178 | } 179 | 180 | return messages 181 | } 182 | 183 | func (f *simpleFetcher) InProgressQueue() string { 184 | return fmt.Sprint(f.queue, ":", f.processID, ":inprogress") 185 | } 186 | -------------------------------------------------------------------------------- /options_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "crypto/tls" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestRedisPoolConfig(t *testing.T) { 12 | // Tests redis pool size which defaults to 1 13 | opts, err := processOptions(Options{ 14 | ServerAddr: "localhost:6379", 15 | ProcessID: "2", 16 | }) 17 | 18 | assert.NoError(t, err) 19 | assert.Equal(t, 1, opts.client.Options().PoolSize) 20 | 21 | opts, err = processOptions(Options{ 22 | ServerAddr: "localhost:6379", 23 | ProcessID: "1", 24 | PoolSize: 20, 25 | }) 26 | 27 | assert.NoError(t, err) 28 | assert.Equal(t, 20, opts.client.Options().PoolSize) 29 | } 30 | 31 | func TestRedisPoolConfigTLS(t *testing.T) { 32 | opts, err := processOptions(Options{ 33 | ServerAddr: "localhost:6379", 34 | ProcessID: "1", 35 | PoolSize: 20, 36 | }) 37 | 38 | assert.NoError(t, err) 39 | assert.Nil(t, opts.client.Options().TLSConfig) 40 | 41 | opts, err = processOptions(Options{ 42 | ServerAddr: "localhost:6379", 43 | ProcessID: "1", 44 | PoolSize: 20, 45 | RedisTLSConfig: &tls.Config{ServerName: "test_tls"}, 46 | }) 47 | 48 | assert.NoError(t, err) 49 | assert.NotNil(t, opts.client.Options().TLSConfig) 50 | assert.Equal(t, "test_tls", opts.client.Options().TLSConfig.ServerName) 51 | } 52 | 53 | func TestCustomProcessConfig(t *testing.T) { 54 | opts, err := processOptions(Options{ 55 | ServerAddr: "localhost:6379", 56 | ProcessID: "1", 57 | }) 58 | 59 | assert.NoError(t, err) 60 | assert.Equal(t, "1", opts.ProcessID) 61 | 62 | opts, err = processOptions(Options{ 63 | ServerAddr: "localhost:6379", 64 | ProcessID: "2", 65 | }) 66 | 67 | assert.NoError(t, err) 68 | assert.Equal(t, "2", opts.ProcessID) 69 | } 70 | 71 | func TestRequiresRedisConfig(t *testing.T) { 72 | _, err := processOptions(Options{ProcessID: "2"}) 73 | 74 | assert.Error(t, err, "Configure requires either the Server or Sentinels option") 75 | } 76 | 77 | func TestRequiresProcessConfig(t *testing.T) { 78 | _, err := processOptions(Options{ServerAddr: "localhost:6379"}) 79 | 80 | assert.Error(t, err, "Configure requires a ProcessID, which uniquely identifies this instance") 81 | } 82 | 83 | func TestAddsColonToNamespace(t *testing.T) { 84 | opts, err := processOptions(Options{ 85 | ServerAddr: "localhost:6379", 86 | ProcessID: "1", 87 | }) 88 | 89 | assert.NoError(t, err) 90 | assert.Equal(t, "", opts.Namespace) 91 | 92 | opts, err = processOptions(Options{ 93 | ServerAddr: "localhost:6379", 94 | ProcessID: "1", 95 | Namespace: "prod", 96 | }) 97 | 98 | assert.NoError(t, err) 99 | assert.Equal(t, "prod:", opts.Namespace) 100 | } 101 | 102 | func TestDefaultPollIntervalConfig(t *testing.T) { 103 | opts, err := processOptions(Options{ 104 | ServerAddr: "localhost:6379", 105 | ProcessID: "1", 106 | }) 107 | 108 | assert.NoError(t, err) 109 | assert.Equal(t, 15*time.Second, opts.PollInterval) 110 | 111 | opts, err = processOptions(Options{ 112 | ServerAddr: "localhost:6379", 113 | ProcessID: "1", 114 | PollInterval: time.Second, 115 | }) 116 | 117 | assert.NoError(t, err) 118 | assert.Equal(t, time.Second, opts.PollInterval) 119 | } 120 | 121 | func TestSentinelConfigGood(t *testing.T) { 122 | opts, err := processOptions(Options{ 123 | SentinelAddrs: "localhost:26379,localhost:46379", 124 | RedisMasterName: "123", 125 | ProcessID: "1", 126 | PollInterval: time.Second, 127 | }) 128 | 129 | assert.NoError(t, err) 130 | assert.Equal(t, "FailoverClient", opts.client.Options().Addr) 131 | assert.Nil(t, opts.client.Options().TLSConfig) 132 | } 133 | 134 | func TestSentinelConfigGoodTLS(t *testing.T) { 135 | opts, err := processOptions(Options{ 136 | SentinelAddrs: "localhost:26379,localhost:46379", 137 | RedisMasterName: "123", 138 | ProcessID: "1", 139 | PollInterval: time.Second, 140 | RedisTLSConfig: &tls.Config{ServerName: "test_tls"}, 141 | }) 142 | 143 | assert.NoError(t, err) 144 | assert.Equal(t, "FailoverClient", opts.client.Options().Addr) 145 | assert.NotNil(t, opts.client.Options().TLSConfig) 146 | assert.Equal(t, "test_tls", opts.client.Options().TLSConfig.ServerName) 147 | } 148 | 149 | func TestSentinelConfigNoMaster(t *testing.T) { 150 | _, err := processOptions(Options{ 151 | SentinelAddrs: "localhost:26379,localhost:46379", 152 | ProcessID: "1", 153 | PollInterval: time.Second, 154 | }) 155 | 156 | assert.Error(t, err) 157 | } 158 | -------------------------------------------------------------------------------- /fetcher_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func buildFetch(queue string, opts Options) Fetcher { 11 | fetch := newSimpleFetcher(queue, opts, true) 12 | go fetch.Fetch() 13 | return fetch 14 | } 15 | 16 | func TestFetchConfig(t *testing.T) { 17 | opts, err := SetupDefaultTestOptions() 18 | assert.NoError(t, err) 19 | fetch := buildFetch("fetchQueue1", opts) 20 | assert.Equal(t, "fetchQueue1", fetch.Queue()) 21 | fetch.Close() 22 | } 23 | 24 | func TestGetMessagesToChannel(t *testing.T) { 25 | ctx := context.Background() 26 | 27 | opts, err := SetupDefaultTestOptions() 28 | assert.NoError(t, err) 29 | 30 | message, _ := NewMsg("{\"foo\":\"bar\"}") 31 | fetch := buildFetch("fetchQueue2", opts) 32 | 33 | rc := opts.client 34 | 35 | rc.LPush(ctx, "queue:fetchQueue2", message.ToJson()).Result() 36 | 37 | fetch.Ready() <- true 38 | fetchedMessage := <-fetch.Messages() 39 | 40 | assert.Equal(t, message, fetchedMessage) 41 | 42 | len, err := rc.LLen(ctx, "queue:fetchQueue2").Result() 43 | assert.NoError(t, err) 44 | assert.Equal(t, int64(0), len) 45 | 46 | fetch.Close() 47 | } 48 | 49 | func TestMoveProgressMessageToPrivateQueue(t *testing.T) { 50 | ctx := context.Background() 51 | 52 | opts, err := SetupDefaultTestOptions() 53 | assert.NoError(t, err) 54 | message, _ := NewMsg("{\"foo\":\"bar\"}") 55 | 56 | fetch := buildFetch("fetchQueue3", opts) 57 | 58 | rc := opts.client 59 | 60 | rc.LPush(ctx, "queue:fetchQueue3", message.ToJson()) 61 | 62 | fetch.Ready() <- true 63 | <-fetch.Messages() 64 | 65 | len, err := rc.LLen(ctx, "queue:fetchQueue3:1:inprogress").Result() 66 | assert.NoError(t, err) 67 | assert.Equal(t, int64(1), len) 68 | 69 | messages, err := rc.LRange(ctx, "queue:fetchQueue3:1:inprogress", 0, -1).Result() 70 | assert.NoError(t, err) 71 | assert.Equal(t, message.ToJson(), messages[0]) 72 | 73 | fetch.Close() 74 | } 75 | 76 | func TestRemoveProgressMessageWhenAcked(t *testing.T) { 77 | ctx := context.Background() 78 | 79 | opts, err := SetupDefaultTestOptions() 80 | assert.NoError(t, err) 81 | message, _ := NewMsg("{\"foo\":\"bar\"}") 82 | 83 | fetch := buildFetch("fetchQueue4", opts) 84 | 85 | rc := opts.client 86 | 87 | rc.LPush(ctx, "queue:fetchQueue4", message.ToJson()).Result() 88 | 89 | fetch.Ready() <- true 90 | <-fetch.Messages() 91 | 92 | fetch.Acknowledge(message) 93 | 94 | len, err := rc.LLen(ctx, "queue:fetchQueue4:1:inprogress").Result() 95 | assert.NoError(t, err) 96 | assert.Equal(t, int64(0), len) 97 | 98 | fetch.Close() 99 | } 100 | 101 | func TestRemoveProgressMessageDifferentSerialization(t *testing.T) { 102 | ctx := context.Background() 103 | 104 | opts, err := SetupDefaultTestOptions() 105 | assert.NoError(t, err) 106 | 107 | json := "{\"foo\":\"bar\",\"args\":[]}" 108 | message, _ := NewMsg(json) 109 | 110 | assert.NotEqual(t, message.ToJson(), json) 111 | 112 | fetch := buildFetch("fetchQueue5", opts) 113 | 114 | rc := opts.client 115 | 116 | rc.LPush(ctx, "queue:fetchQueue5", json).Result() 117 | 118 | fetch.Ready() <- true 119 | <-fetch.Messages() 120 | 121 | fetch.Acknowledge(message) 122 | 123 | len, err := rc.LLen(ctx, "queue:fetchQueue5:1:inprogress").Result() 124 | assert.NoError(t, err) 125 | assert.Equal(t, int64(0), len) 126 | 127 | fetch.Close() 128 | } 129 | 130 | func TestRetryInprogressMessages(t *testing.T) { 131 | ctx := context.Background() 132 | 133 | opts, err := SetupDefaultTestOptions() 134 | assert.NoError(t, err) 135 | 136 | message, _ := NewMsg("{\"foo\":\"bar\"}") 137 | message2, _ := NewMsg("{\"foo\":\"bar2\"}") 138 | message3, _ := NewMsg("{\"foo\":\"bar3\"}") 139 | 140 | rc := opts.client 141 | 142 | rc.LPush(ctx, "queue:fetchQueue6:1:inprogress", message.ToJson()).Result() 143 | rc.LPush(ctx, "queue:fetchQueue6:1:inprogress", message2.ToJson()).Result() 144 | rc.LPush(ctx, "queue:fetchQueue6", message3.ToJson()).Result() 145 | 146 | fetch := buildFetch("fetchQueue6", opts) 147 | 148 | fetch.Ready() <- true 149 | assert.Equal(t, message2, <-fetch.Messages()) 150 | fetch.Ready() <- true 151 | assert.Equal(t, message, <-fetch.Messages()) 152 | fetch.Ready() <- true 153 | assert.Equal(t, message3, <-fetch.Messages()) 154 | 155 | fetch.Acknowledge(message) 156 | fetch.Acknowledge(message2) 157 | fetch.Acknowledge(message3) 158 | 159 | len, err := rc.LLen(ctx, "queue:fetchQueue6:1:inprogress").Result() 160 | assert.NoError(t, err) 161 | assert.Equal(t, int64(0), len) 162 | 163 | fetch.Close() 164 | } 165 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/digitalocean/go-workers2.png)](https://travis-ci.org/digitalocean/go-workers2) 2 | [![GoDoc](https://godoc.org/github.com/digitalocean/go-workers2?status.png)](https://godoc.org/github.com/digitalocean/go-workers2) 3 | 4 | [Sidekiq](http://sidekiq.org/) compatible 5 | background workers in [golang](http://golang.org/). 6 | 7 | - reliable queueing for all queues using [brpoplpush](http://redis.io/commands/brpoplpush) 8 | - handles retries 9 | - support custom middleware 10 | - customize concurrency per queue 11 | - responds to Unix signals to safely wait for jobs to finish before exiting. 12 | - provides stats on what jobs are currently running 13 | - redis sentinel support 14 | - well tested 15 | 16 | Example usage: 17 | 18 | ```go 19 | package main 20 | 21 | import ( 22 | "fmt" 23 | 24 | workers "github.com/digitalocean/go-workers2" 25 | ) 26 | 27 | func myJob(message *workers.Msg) error { 28 | // do something with your message 29 | // message.Jid() 30 | // message.Args() is a wrapper around go-simplejson (http://godoc.org/github.com/bitly/go-simplejson) 31 | return nil 32 | } 33 | 34 | func myMiddleware(queue string, mgr *workers.Manager, next workers.JobFunc) workers.JobFunc { 35 | return func(message *workers.Msg) (err error) { 36 | // do something before each message is processed 37 | err = next(message) 38 | // do something after each message is processed 39 | return 40 | } 41 | } 42 | 43 | func main() { 44 | // Create a manager, which manages workers 45 | manager, err := workers.NewManager(workers.Options{ 46 | // location of redis instance 47 | ServerAddr: "localhost:6379", 48 | // instance of the database 49 | Database: 0, 50 | // number of connections to keep open with redis 51 | PoolSize: 30, 52 | // unique process id for this instance of workers (for proper recovery of inprogress jobs on crash) 53 | ProcessID: "1", 54 | }) 55 | 56 | if err != nil { 57 | fmt.Println(err) 58 | } 59 | 60 | // create a middleware chain with the default middlewares, and append myMiddleware 61 | mids := workers.DefaultMiddlewares().Append(myMiddleware) 62 | 63 | // pull messages from "myqueue" with concurrency of 10 64 | // this worker will not run myMiddleware, but will run the default middlewares 65 | manager.AddWorker("myqueue", 10, myJob) 66 | 67 | // pull messages from "myqueue2" with concurrency of 20 68 | // this worker will run the default middlewares and myMiddleware 69 | manager.AddWorker("myqueue2", 20, myJob, mids...) 70 | 71 | // pull messages from "myqueue3" with concurrency of 20 72 | // this worker will only run myMiddleware 73 | manager.AddWorker("myqueue3", 20, myJob, myMiddleware) 74 | 75 | // If you already have a manager and want to enqueue 76 | // to the same place: 77 | producer := manager.Producer() 78 | 79 | // Alternatively, if you want to create a producer to enqueue messages 80 | // producer, err := workers.NewProducer(Options{ 81 | // // location of redis instance 82 | // ServerAddr: "localhost:6379", 83 | // // instance of the database 84 | // Database: 0, 85 | // // number of connections to keep open with redis 86 | // PoolSize: 30, 87 | // // unique process id for this instance of workers (for proper recovery of inprogress jobs on crash) 88 | // ProcessID: "1", 89 | // }) 90 | 91 | // Add a job to a queue 92 | producer.Enqueue("myqueue3", "Add", []int{1, 2}) 93 | 94 | // Add a job to a queue with retry 95 | producer.EnqueueWithOptions("myqueue3", "Add", []int{1, 2}, workers.EnqueueOptions{Retry: true}) 96 | 97 | // Add a job to a queue passing the context to redis 98 | producer.EnqueueWithContext(ctx.Background(), "myqueue3", "Add", []int{1, 2}, workers.EnqueueOptions{Retry: true}) 99 | 100 | // stats will be available at http://localhost:8080/stats 101 | go workers.StartAPIServer(8080) 102 | 103 | // Blocks until process is told to exit via unix signal 104 | manager.Run() 105 | } 106 | ``` 107 | 108 | When running the above code example, it will produce the following output at `localhost:8080/stats`: 109 | 110 | ```json 111 | [ 112 | { 113 | "manager_name": "", 114 | "processed": 5, 115 | "failed": 57, 116 | "jobs": { 117 | "myqueue": null, 118 | "myqueue2": null, 119 | "myqueue3": null 120 | }, 121 | "enqueued": { 122 | "myqueue": 0, 123 | "myqueue2": 0, 124 | "myqueue3": 0 125 | }, 126 | "retry_count": 4 127 | } 128 | ] 129 | ``` 130 | 131 | Development sponsored by DigitalOcean. Code forked from [github/jrallison/go-workers](https://github.com/jrallison/go-workers). Initial development sponsored by [Customer.io](http://customer.io). 132 | -------------------------------------------------------------------------------- /producer.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "crypto/rand" 6 | "encoding/json" 7 | "fmt" 8 | "io" 9 | "time" 10 | 11 | "github.com/go-redis/redis/v8" 12 | ) 13 | 14 | const ( 15 | // NanoSecondPrecision is a constant for the number of nanoseconds in a second 16 | NanoSecondPrecision = 1000000000.0 17 | ) 18 | 19 | // Producer is used to enqueue new work 20 | type Producer struct { 21 | opts Options 22 | } 23 | 24 | // EnqueueData stores data and configuration for new work 25 | type EnqueueData struct { 26 | Queue string `json:"queue,omitempty"` 27 | Class string `json:"class"` 28 | Args interface{} `json:"args"` 29 | Jid string `json:"jid"` 30 | EnqueuedAt float64 `json:"enqueued_at"` 31 | EnqueueOptions 32 | } 33 | 34 | // EnqueueOptions stores configuration for new work 35 | type EnqueueOptions struct { 36 | RetryCount int `json:"retry_count,omitempty"` 37 | RetryMax int `json:"retry_max,omitempty"` 38 | Retry bool `json:"retry,omitempty"` 39 | At float64 `json:"at,omitempty"` 40 | } 41 | 42 | // NewProducer creates a new producer with the given options 43 | func NewProducer(options Options) (*Producer, error) { 44 | options, err := processOptions(options) 45 | if err != nil { 46 | return nil, err 47 | } 48 | 49 | return &Producer{ 50 | opts: options, 51 | }, nil 52 | } 53 | 54 | // NewProducerWithRedisClient creates a new producer with the given options and Redis client 55 | func NewProducerWithRedisClient(options Options, client *redis.Client) (*Producer, error) { 56 | options, err := processOptionsWithRedisClient(options, client) 57 | if err != nil { 58 | return nil, err 59 | } 60 | 61 | return &Producer{ 62 | opts: options, 63 | }, nil 64 | } 65 | 66 | // GetRedisClient returns the Redis client used by the producer 67 | // Deprecated: the Redis client is an internal implementation and access will be removed 68 | func (p *Producer) GetRedisClient() *redis.Client { 69 | return p.opts.client 70 | } 71 | 72 | // Enqueue enqueues new work for immediate processing 73 | func (p *Producer) Enqueue(queue, class string, args interface{}) (string, error) { 74 | return p.EnqueueWithOptions(queue, class, args, EnqueueOptions{At: nowToSecondsWithNanoPrecision()}) 75 | } 76 | 77 | // EnqueueIn enqueues new work for delayed processing 78 | func (p *Producer) EnqueueIn(queue, class string, in float64, args interface{}) (string, error) { 79 | return p.EnqueueWithOptions(queue, class, args, EnqueueOptions{At: nowToSecondsWithNanoPrecision() + in}) 80 | } 81 | 82 | // EnqueueAt enqueues new work for processing at a specific time 83 | func (p *Producer) EnqueueAt(queue, class string, at time.Time, args interface{}) (string, error) { 84 | return p.EnqueueWithOptions(queue, class, args, EnqueueOptions{At: timeToSecondsWithNanoPrecision(at)}) 85 | } 86 | 87 | // EnqueueWithOptions enqueues new work for processing with the given options 88 | func (p *Producer) EnqueueWithOptions(queue, class string, args interface{}, opts EnqueueOptions) (string, error) { 89 | return p.EnqueueWithContext(context.Background(), queue, class, args, opts) 90 | } 91 | 92 | // EnqueueWithContext enqueues new work for processing with the given options and context 93 | func (p *Producer) EnqueueWithContext(ctx context.Context, queue, class string, args interface{}, opts EnqueueOptions) (string, error) { 94 | now := nowToSecondsWithNanoPrecision() 95 | data := EnqueueData{ 96 | Queue: queue, 97 | Class: class, 98 | Args: args, 99 | Jid: generateJid(), 100 | EnqueuedAt: now, 101 | EnqueueOptions: opts, 102 | } 103 | 104 | bytes, err := json.Marshal(data) 105 | if err != nil { 106 | return "", err 107 | } 108 | 109 | if now < opts.At { 110 | err = p.opts.store.EnqueueScheduledMessage(ctx, data.At, string(bytes)) 111 | return data.Jid, err 112 | } 113 | 114 | err = p.opts.store.CreateQueue(ctx, queue) 115 | if err != nil { 116 | return "", err 117 | } 118 | 119 | err = p.opts.store.EnqueueMessageNow(ctx, queue, string(bytes)) 120 | if err != nil { 121 | return "", err 122 | } 123 | 124 | return data.Jid, nil 125 | } 126 | 127 | func timeToSecondsWithNanoPrecision(t time.Time) float64 { 128 | return float64(t.UnixNano()) / NanoSecondPrecision 129 | } 130 | 131 | func durationToSecondsWithNanoPrecision(d time.Duration) float64 { 132 | return float64(d.Nanoseconds()) / NanoSecondPrecision 133 | } 134 | 135 | func nowToSecondsWithNanoPrecision() float64 { 136 | return timeToSecondsWithNanoPrecision(time.Now()) 137 | } 138 | 139 | func generateJid() string { 140 | // Return 12 random bytes as 24 character hex 141 | b := make([]byte, 12) 142 | _, err := io.ReadFull(rand.Reader, b) 143 | if err != nil { 144 | return "" 145 | } 146 | return fmt.Sprintf("%x", b) 147 | } 148 | -------------------------------------------------------------------------------- /options.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "crypto/tls" 5 | "errors" 6 | "log" 7 | "os" 8 | "strings" 9 | "time" 10 | 11 | "github.com/digitalocean/go-workers2/storage" 12 | "github.com/go-redis/redis/v8" 13 | ) 14 | 15 | const ( 16 | defaultHeartbeatInterval = 5 * time.Second 17 | 18 | defaultHeartbeatTTL = 60 * time.Second 19 | ) 20 | 21 | // Options contains the set of configuration options for a manager and/or producer 22 | type Options struct { 23 | ProcessID string 24 | Namespace string 25 | PollInterval time.Duration 26 | Database int 27 | Password string 28 | PoolSize int 29 | 30 | // Provide one of ServerAddr or (SentinelAddrs + RedisMasterName) 31 | ServerAddr string 32 | SentinelAddrs string 33 | RedisMasterName string 34 | RedisTLSConfig *tls.Config 35 | 36 | // Optional display name used when displaying manager stats 37 | ManagerDisplayName string 38 | ManagerStartInactive bool 39 | 40 | // Define Heartbeat to enable heartbeat 41 | Heartbeat *HeartbeatOptions 42 | 43 | // Log 44 | Logger *log.Logger 45 | 46 | client *redis.Client 47 | store storage.Store 48 | } 49 | 50 | func (o *Options) Client() *redis.Client { 51 | return o.client 52 | } 53 | 54 | type HeartbeatOptions struct { 55 | // Optional heartbeat interval config 56 | Interval time.Duration 57 | 58 | // redis eviction ttl config 59 | HeartbeatTTL time.Duration 60 | 61 | PrioritizedManager *PrioritizedManagerOptions 62 | } 63 | 64 | type PrioritizedManagerOptions struct { 65 | ManagerPriority int 66 | TotalActiveManagers int 67 | } 68 | 69 | func processOptions(options Options) (Options, error) { 70 | options, err := validateGeneralOptions(options) 71 | if err != nil { 72 | return Options{}, err 73 | } 74 | 75 | //redis options 76 | if options.PoolSize == 0 { 77 | options.PoolSize = 1 78 | } 79 | redisIdleTimeout := 240 * time.Second 80 | 81 | if options.ServerAddr != "" { 82 | options.client = redis.NewClient(&redis.Options{ 83 | IdleTimeout: redisIdleTimeout, 84 | Password: options.Password, 85 | DB: options.Database, 86 | PoolSize: options.PoolSize, 87 | Addr: options.ServerAddr, 88 | TLSConfig: options.RedisTLSConfig, 89 | }) 90 | } else if options.SentinelAddrs != "" { 91 | if options.RedisMasterName == "" { 92 | return Options{}, errors.New("Sentinel configuration requires a master name") 93 | } 94 | 95 | options.client = redis.NewFailoverClient(&redis.FailoverOptions{ 96 | IdleTimeout: redisIdleTimeout, 97 | Password: options.Password, 98 | DB: options.Database, 99 | PoolSize: options.PoolSize, 100 | SentinelAddrs: strings.Split(options.SentinelAddrs, ","), 101 | MasterName: options.RedisMasterName, 102 | TLSConfig: options.RedisTLSConfig, 103 | }) 104 | } else { 105 | return Options{}, errors.New("Options requires either the Server or Sentinels option") 106 | } 107 | 108 | if options.Logger == nil { 109 | options.Logger = log.New(os.Stdout, "go-workers2: ", log.Ldate|log.Lmicroseconds) 110 | } 111 | 112 | redisStore := storage.NewRedisStore(options.Namespace, options.client, options.Logger) 113 | options.store = redisStore 114 | 115 | if options.Heartbeat != nil { 116 | if options.Heartbeat.Interval <= 0 { 117 | options.Heartbeat.Interval = defaultHeartbeatInterval 118 | } 119 | if options.Heartbeat.HeartbeatTTL <= 0 { 120 | options.Heartbeat.HeartbeatTTL = defaultHeartbeatTTL 121 | } 122 | } 123 | 124 | return options, nil 125 | } 126 | 127 | func processOptionsWithRedisClient(options Options, client *redis.Client) (Options, error) { 128 | options, err := validateGeneralOptions(options) 129 | if err != nil { 130 | return Options{}, err 131 | } 132 | 133 | if client == nil { 134 | return Options{}, errors.New("redis client is nil; Redis client is not configured") 135 | } 136 | 137 | options.client = client 138 | 139 | if options.Logger == nil { 140 | options.Logger = log.New(os.Stdout, "go-workers2: ", log.Ldate|log.Lmicroseconds) 141 | } 142 | 143 | redisStore := storage.NewRedisStore(options.Namespace, options.client, options.Logger) 144 | options.store = redisStore 145 | 146 | return options, nil 147 | } 148 | 149 | func validateGeneralOptions(options Options) (Options, error) { 150 | if options.ProcessID == "" { 151 | return Options{}, errors.New("options requires a ProcessID, which uniquely identifies this instance") 152 | } 153 | 154 | if options.Namespace != "" { 155 | options.Namespace += ":" 156 | } 157 | 158 | if options.PollInterval <= 0 { 159 | options.PollInterval = 15 * time.Second 160 | } 161 | 162 | if options.Heartbeat != nil && 163 | options.Heartbeat.Interval >= options.Heartbeat.HeartbeatTTL { 164 | return Options{}, errors.New("invalid heartbeat configuration, heartbeat interval longer than or equal to heartbeat tll") 165 | } 166 | 167 | return options, nil 168 | } 169 | -------------------------------------------------------------------------------- /worker_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "sync" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | type dummyFetcher struct { 13 | lock sync.Mutex 14 | isActive bool 15 | 16 | inProgressQueue func() string 17 | queue func() string 18 | fetch func() 19 | acknowledge func(*Msg) 20 | ready func() chan bool 21 | messages func() chan *Msg 22 | close func() 23 | closed func() bool 24 | } 25 | 26 | func (d *dummyFetcher) Queue() string { return d.queue() } 27 | func (d *dummyFetcher) InProgressQueue() string { return d.inProgressQueue() } 28 | func (d *dummyFetcher) Fetch() { d.fetch() } 29 | func (d *dummyFetcher) Acknowledge(m *Msg) { d.acknowledge(m) } 30 | func (d *dummyFetcher) Ready() chan bool { return d.ready() } 31 | func (d *dummyFetcher) Messages() chan *Msg { return d.messages() } 32 | func (d *dummyFetcher) Close() { d.close() } 33 | func (d *dummyFetcher) Closed() bool { return d.closed() } 34 | 35 | func (d *dummyFetcher) SetActive(active bool) { 36 | d.lock.Lock() 37 | defer d.lock.Unlock() 38 | d.isActive = active 39 | } 40 | func (d *dummyFetcher) IsActive() bool { 41 | d.lock.Lock() 42 | defer d.lock.Unlock() 43 | return d.isActive 44 | } 45 | 46 | func TestNewWorker(t *testing.T) { 47 | testLogger := log.New(os.Stdout, "test-go-workers2: ", log.Ldate|log.Lmicroseconds) 48 | 49 | cc := NewCallCounter() 50 | w := newWorker(testLogger, "q", 0, cc.F) 51 | assert.Equal(t, "q", w.queue) 52 | assert.Equal(t, 1, w.concurrency) 53 | assert.NotNil(t, w.stop) 54 | 55 | assert.NotNil(t, w.handler) 56 | w.handler(nil) 57 | assert.Equal(t, 1, cc.count) 58 | 59 | w = newWorker(testLogger, "q", -5, cc.F) 60 | assert.Equal(t, 1, w.concurrency) 61 | 62 | w = newWorker(testLogger, "q", 10, cc.F) 63 | assert.Equal(t, 10, w.concurrency) 64 | } 65 | 66 | func TestWorker(t *testing.T) { 67 | testLogger := log.New(os.Stdout, "test-go-workers2: ", log.Ldate|log.Lmicroseconds) 68 | 69 | readyCh := make(chan bool) 70 | msgCh := make(chan *Msg) 71 | ackCh := make(chan *Msg) 72 | fetchCh := make(chan bool) 73 | 74 | var dfClosedLock sync.Mutex 75 | var dfClosed bool 76 | df := dummyFetcher{ 77 | inProgressQueue: func() string { return "inprog-q" }, 78 | queue: func() string { return "q" }, 79 | fetch: func() { close(fetchCh) }, 80 | acknowledge: func(m *Msg) { ackCh <- m }, 81 | ready: func() chan bool { return readyCh }, 82 | messages: func() chan *Msg { return msgCh }, 83 | close: func() { 84 | dfClosedLock.Lock() 85 | defer dfClosedLock.Unlock() 86 | dfClosed = true 87 | }, 88 | closed: func() bool { 89 | dfClosedLock.Lock() 90 | defer dfClosedLock.Unlock() 91 | return dfClosed 92 | }, 93 | } 94 | 95 | cc := NewCallCounter() 96 | 97 | w := newWorker(testLogger, "q", 2, cc.F) 98 | 99 | var wg sync.WaitGroup 100 | wg.Add(1) 101 | go func() { 102 | w.start(&df) 103 | wg.Done() 104 | }() 105 | 106 | // This block delays until the entire worker is started. 107 | // In order for a message to be consumed, at least one task runner 108 | // must be started. We consume the message off of ackCh for sanity. 109 | // Acquiring and then releasing the runnersLock ensures that start 110 | // has finished its setup work 111 | 112 | <-fetchCh // We should be sure that Fetch got called before providing any messages 113 | msgCh <- cc.msg() 114 | <-ackCh 115 | w.runnersLock.Lock() 116 | w.runnersLock.Unlock() 117 | 118 | assert.True(t, w.running) 119 | assert.Len(t, w.runners, 2) 120 | assert.Equal(t, w.inProgressQueue, df.InProgressQueue()) 121 | 122 | t.Run("cannot start while running", func(t *testing.T) { 123 | w.start(&df) 124 | // This test would time out if w.start doesn't return immediately 125 | }) 126 | 127 | t.Run(".inProgressMessages", func(t *testing.T) { 128 | 129 | // None running 130 | msgs := w.inProgressMessages() 131 | assert.Empty(t, msgs) 132 | 133 | // Enqueue one 134 | msgCh <- cc.syncMsg() 135 | <-cc.syncCh 136 | msgs = w.inProgressMessages() 137 | assert.Len(t, msgs, 1) 138 | 139 | // Enqueue another 140 | msgCh <- cc.syncMsg() 141 | <-cc.syncCh 142 | msgs = w.inProgressMessages() 143 | assert.Len(t, msgs, 2) 144 | 145 | // allow one to finish 146 | cc.ackSyncCh <- true 147 | <-ackCh 148 | msgs = w.inProgressMessages() 149 | assert.Len(t, msgs, 1) 150 | 151 | // alow the other to finish 152 | cc.ackSyncCh <- true 153 | <-ackCh 154 | msgs = w.inProgressMessages() 155 | assert.Empty(t, msgs) 156 | }) 157 | 158 | w.quit() 159 | wg.Wait() 160 | 161 | } 162 | 163 | func TestWorkerProcessesAndAcksMessages(t *testing.T) { 164 | testLogger := log.New(os.Stdout, "test-go-workers2: ", log.Ldate|log.Lmicroseconds) 165 | readyCh := make(chan bool) 166 | msgCh := make(chan *Msg) 167 | ackCh := make(chan *Msg) 168 | closeCh := make(chan bool) 169 | 170 | df := dummyFetcher{ 171 | queue: func() string { return "q" }, 172 | inProgressQueue: func() string { return "inprog-q" }, 173 | fetch: func() { <-closeCh }, 174 | acknowledge: func(m *Msg) { ackCh <- m }, 175 | ready: func() chan bool { return readyCh }, 176 | messages: func() chan *Msg { return msgCh }, 177 | close: func() { close(closeCh) }, 178 | closed: func() bool { 179 | select { 180 | case <-closeCh: 181 | return true 182 | default: 183 | return false 184 | } 185 | }, 186 | } 187 | 188 | cc := NewCallCounter() 189 | w := newWorker(testLogger, "q", 1, cc.F) 190 | 191 | var wg sync.WaitGroup 192 | wg.Add(1) 193 | go func() { 194 | w.start(&df) 195 | wg.Done() 196 | }() 197 | 198 | // since we have concurrency 1, messages _must_ be processed in order 199 | 200 | msgCh <- cc.msg() 201 | ackedMsg := <-ackCh 202 | assert.True(t, ackedMsg.ack) 203 | assert.NotZero(t, ackedMsg.startedAt) 204 | assert.Equal(t, 1, cc.count) 205 | 206 | noAck := cc.noAckMsg() 207 | msgCh <- noAck 208 | msgCh <- cc.msg() 209 | ackedMsg = <-ackCh 210 | assert.False(t, noAck.ack) 211 | assert.NotZero(t, noAck.startedAt) 212 | assert.True(t, ackedMsg.ack) 213 | assert.NotZero(t, ackedMsg.startedAt) 214 | assert.Equal(t, 3, cc.count) 215 | 216 | w.quit() 217 | wg.Wait() 218 | } 219 | -------------------------------------------------------------------------------- /producer_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "encoding/json" 7 | "testing" 8 | 9 | "github.com/digitalocean/go-workers2/storage" 10 | "github.com/go-redis/redis/v8" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestProducer_Enqueue(t *testing.T) { 15 | ctx := context.Background() 16 | 17 | namespace := "prod" 18 | opts, err := SetupDefaultTestOptionsWithNamespace(namespace) 19 | assert.NoError(t, err) 20 | rc := opts.client 21 | 22 | p := &Producer{opts: opts} 23 | 24 | //makes the queue available 25 | p.Enqueue("enqueue1", "Add", []int{1, 2}) 26 | 27 | found, _ := rc.SIsMember(ctx, "prod:queues", "enqueue1").Result() 28 | assert.True(t, found) 29 | 30 | // adds a job to the queue 31 | nb, _ := rc.LLen(ctx, "prod:queue:enqueue2").Result() 32 | assert.Equal(t, int64(0), nb) 33 | 34 | p.Enqueue("enqueue2", "Add", []int{1, 2}) 35 | 36 | nb, _ = rc.LLen(ctx, "prod:queue:enqueue2").Result() 37 | assert.Equal(t, int64(1), nb) 38 | 39 | //saves the arguments 40 | p.Enqueue("enqueue3", "Compare", []string{"foo", "bar"}) 41 | 42 | bytes, _ := rc.LPop(ctx, "prod:queue:enqueue3").Result() 43 | var result map[string]interface{} 44 | err = json.Unmarshal([]byte(bytes), &result) 45 | assert.NoError(t, err) 46 | assert.Equal(t, "Compare", result["class"]) 47 | 48 | args := result["args"].([]interface{}) 49 | assert.Len(t, args, 2) 50 | assert.Equal(t, "foo", args[0]) 51 | assert.Equal(t, "bar", args[1]) 52 | 53 | //has a jid 54 | p.Enqueue("enqueue4", "Compare", []string{"foo", "bar"}) 55 | 56 | bytes, _ = rc.LPop(ctx, "prod:queue:enqueue4").Result() 57 | err = json.Unmarshal([]byte(bytes), &result) 58 | assert.NoError(t, err) 59 | assert.Equal(t, "Compare", result["class"]) 60 | 61 | jid := result["jid"].(string) 62 | assert.Len(t, jid, 24) 63 | 64 | //has enqueued_at that is close to now 65 | p.Enqueue("enqueue5", "Compare", []string{"foo", "bar"}) 66 | 67 | bytes, _ = rc.LPop(ctx, "prod:queue:enqueue5").Result() 68 | err = json.Unmarshal([]byte(bytes), &result) 69 | assert.NoError(t, err) 70 | assert.Equal(t, "Compare", result["class"]) 71 | 72 | ea := result["enqueued_at"].(float64) 73 | assert.InDelta(t, nowToSecondsWithNanoPrecision(), ea, 0.1) 74 | 75 | // has retry and retry_count when set 76 | p.EnqueueWithOptions("enqueue6", "Compare", []string{"foo", "bar"}, EnqueueOptions{RetryCount: 10, Retry: true, RetryMax: 21}) 77 | 78 | bytes, _ = rc.LPop(ctx, "prod:queue:enqueue6").Result() 79 | err = json.Unmarshal([]byte(bytes), &result) 80 | assert.NoError(t, err) 81 | assert.Equal(t, "Compare", result["class"]) 82 | 83 | retry := result["retry"].(bool) 84 | assert.True(t, retry) 85 | 86 | retryCount := int(result["retry_count"].(float64)) 87 | assert.Equal(t, 10, retryCount) 88 | 89 | retryMax := int(result["retry_max"].(float64)) 90 | assert.Equal(t, 21, retryMax) 91 | } 92 | 93 | func TestProducer_EnqueueIn(t *testing.T) { 94 | ctx := context.Background() 95 | 96 | namespace := "prod" 97 | opts, err := SetupDefaultTestOptionsWithNamespace(namespace) 98 | assert.NoError(t, err) 99 | rc := opts.client 100 | 101 | p := &Producer{opts: opts} 102 | 103 | scheduleQueue := namespace + ":" + storage.ScheduledJobsKey 104 | 105 | //has added a job in the scheduled queue 106 | _, err = p.EnqueueIn("enqueuein1", "Compare", 10, map[string]interface{}{"foo": "bar"}) 107 | assert.NoError(t, err) 108 | 109 | scheduledCount, err := rc.ZCard(ctx, scheduleQueue).Result() 110 | assert.NoError(t, err) 111 | assert.Equal(t, int64(1), scheduledCount) 112 | 113 | rc.Del(ctx, scheduleQueue) 114 | 115 | //has the correct 'queue' 116 | _, err = p.EnqueueIn("enqueuein2", "Compare", 10, map[string]interface{}{"foo": "bar"}) 117 | assert.NoError(t, err) 118 | 119 | var data EnqueueData 120 | elem, err := rc.ZRange(ctx, scheduleQueue, 0, -1).Result() 121 | assert.NoError(t, err) 122 | bytes := elem[0] 123 | err = json.Unmarshal([]byte(bytes), &data) 124 | assert.NoError(t, err) 125 | 126 | assert.Equal(t, "enqueuein2", data.Queue) 127 | 128 | rc.Del(ctx, scheduleQueue) 129 | } 130 | 131 | func TestMultipleEnqueueOrder(t *testing.T) { 132 | ctx := context.Background() 133 | 134 | namespace := "prod" 135 | opts, err := SetupDefaultTestOptionsWithNamespace(namespace) 136 | assert.NoError(t, err) 137 | rc := opts.client 138 | 139 | p := &Producer{opts: opts} 140 | 141 | var msg1, _ = NewMsg("{\"key\":\"1\"}") 142 | _, err = p.Enqueue("testq1", "Compare", msg1.ToJson()) 143 | assert.NoError(t, err) 144 | 145 | var msg2, _ = NewMsg("{\"key\":\"2\"}") 146 | _, err = p.Enqueue("testq1", "Compare", msg2.ToJson()) 147 | assert.NoError(t, err) 148 | 149 | len, err := rc.LLen(ctx, "prod:queue:testq1").Result() 150 | assert.NoError(t, err) 151 | assert.Equal(t, int64(2), len) 152 | 153 | bytesMsg, err := rc.RPop(ctx, "prod:queue:testq1").Result() 154 | assert.NoError(t, err) 155 | var data EnqueueData 156 | err = json.Unmarshal([]byte(bytesMsg), &data) 157 | assert.NoError(t, err) 158 | actualMsg, err := NewMsg(data.Args.(string)) 159 | assert.NoError(t, err) 160 | assert.Equal(t, msg1.Get("key"), actualMsg.Get("key")) 161 | 162 | bytesMsg, err = rc.RPop(ctx, "prod:queue:testq1").Result() 163 | assert.NoError(t, err) 164 | err = json.Unmarshal([]byte(bytesMsg), &data) 165 | assert.NoError(t, err) 166 | actualMsg, err = NewMsg(data.Args.(string)) 167 | assert.NoError(t, err) 168 | assert.Equal(t, msg2.Get("key"), actualMsg.Get("key")) 169 | 170 | len, err = rc.LLen(ctx, "prod:queue:testq1").Result() 171 | assert.NoError(t, err) 172 | assert.Equal(t, int64(0), len) 173 | } 174 | 175 | func TestNewProducerWithRedisClient(t *testing.T) { 176 | namespace := "prod" 177 | opts := Options{ 178 | ProcessID: "1", 179 | Namespace: namespace, 180 | } 181 | 182 | client := redis.NewClient(&redis.Options{ 183 | IdleTimeout: 1, 184 | Password: "ab", 185 | DB: 2, 186 | TLSConfig: &tls.Config{ServerName: "test_tls3"}, 187 | }) 188 | 189 | producer, err := NewProducerWithRedisClient(opts, client) 190 | 191 | assert.NoError(t, err) 192 | assert.Equal(t, namespace+":", producer.opts.Namespace) 193 | 194 | assert.NotNil(t, producer.GetRedisClient()) 195 | assert.NotNil(t, producer.GetRedisClient().Options().TLSConfig) 196 | assert.Equal(t, "test_tls3", producer.GetRedisClient().Options().TLSConfig.ServerName) 197 | } 198 | 199 | func TestNewProducerWithRedisClientNoProcessID(t *testing.T) { 200 | namespace := "prod" 201 | opts := Options{ 202 | Namespace: namespace, 203 | } 204 | 205 | client := redis.NewClient(&redis.Options{ 206 | IdleTimeout: 1, 207 | Password: "ab", 208 | DB: 2, 209 | TLSConfig: &tls.Config{ServerName: "test_tls2"}, 210 | }) 211 | 212 | mgr, err := NewProducerWithRedisClient(opts, client) 213 | 214 | assert.Error(t, err) 215 | assert.Nil(t, mgr) 216 | } 217 | -------------------------------------------------------------------------------- /middleware_retry_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | const errorText = "AHHHH" 13 | 14 | var panickingFunc = func(message *Msg) error { 15 | panic(errors.New(errorText)) 16 | } 17 | 18 | var wares = NewMiddlewares(RetryMiddleware) 19 | 20 | func TestRetryQueue(t *testing.T) { 21 | ctx := context.Background() 22 | 23 | //puts messages in retry queue when they fail 24 | message, _ := NewMsg("{\"jid\":\"2\",\"retry\":true}") 25 | 26 | tests := []struct { 27 | name string 28 | f JobFunc 29 | }{ 30 | { 31 | name: "retry on panic", 32 | f: panickingFunc, 33 | }, 34 | { 35 | name: "retry on error", 36 | f: func(m *Msg) error { 37 | return errors.New("ERROR") 38 | }, 39 | }, 40 | } 41 | for _, tt := range tests { 42 | t.Run(tt.name, func(t *testing.T) { 43 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 44 | assert.NoError(t, err) 45 | 46 | mgr := &Manager{opts: opts} 47 | 48 | // Test panic 49 | wares.build("myqueue", mgr, tt.f)(message) 50 | 51 | retries, _ := opts.client.ZRange(ctx, retryQueue(opts.Namespace), 0, 1).Result() 52 | assert.Len(t, retries, 1) 53 | assert.Equal(t, message.ToJson(), retries[0]) 54 | }) 55 | } 56 | } 57 | 58 | func TestDisableRetries(t *testing.T) { 59 | ctx := context.Background() 60 | 61 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 62 | assert.NoError(t, err) 63 | 64 | mgr := &Manager{opts: opts} 65 | 66 | message, _ := NewMsg("{\"jid\":\"2\",\"retry\":false}") 67 | 68 | wares.build("myqueue", mgr, panickingFunc)(message) 69 | 70 | count, _ := opts.client.ZCard(ctx, retryQueue(opts.Namespace)).Result() 71 | assert.Equal(t, int64(0), count) 72 | } 73 | 74 | func TestNoDefaultRetry(t *testing.T) { 75 | ctx := context.Background() 76 | 77 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 78 | assert.NoError(t, err) 79 | 80 | mgr := &Manager{opts: opts} 81 | 82 | //puts messages in retry queue when they fail 83 | message, _ := NewMsg("{\"jid\":\"2\"}") 84 | 85 | wares.build("myqueue", mgr, panickingFunc)(message) 86 | 87 | count, _ := opts.client.ZCard(ctx, retryQueue(opts.Namespace)).Result() 88 | assert.Equal(t, int64(0), count) 89 | } 90 | 91 | func TestNumericRetries(t *testing.T) { 92 | ctx := context.Background() 93 | 94 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 95 | assert.NoError(t, err) 96 | 97 | mgr := &Manager{opts: opts} 98 | 99 | message, _ := NewMsg("{\"jid\":\"2\",\"retry\":true}") 100 | 101 | wares.build("myqueue", mgr, panickingFunc)(message) 102 | 103 | retries, _ := opts.client.ZRange(ctx, retryQueue(opts.Namespace), 0, 1).Result() 104 | assert.Equal(t, message.ToJson(), retries[0]) 105 | } 106 | 107 | func TestHandleNewFailedMessages(t *testing.T) { 108 | ctx := context.Background() 109 | 110 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 111 | assert.NoError(t, err) 112 | 113 | mgr := &Manager{opts: opts} 114 | 115 | message, _ := NewMsg("{\"jid\":\"2\",\"retry\":true}") 116 | 117 | wares.build("prod:myqueue", mgr, panickingFunc)(message) 118 | 119 | retries, _ := opts.client.ZRange(ctx, retryQueue(opts.Namespace), 0, 1).Result() 120 | message, _ = NewMsg(retries[0]) 121 | 122 | queue, _ := message.Get("queue").String() 123 | errorMessage, _ := message.Get("error_message").String() 124 | errorClass, _ := message.Get("error_class").String() 125 | retryCount, _ := message.Get("retry_count").Int() 126 | errorBacktrace, _ := message.Get("error_backtrace").String() 127 | failedAt, _ := message.Get("failed_at").String() 128 | 129 | assert.Equal(t, "prod:myqueue", queue) 130 | assert.Equal(t, errorText, errorMessage) 131 | assert.Equal(t, "", errorClass) 132 | assert.Equal(t, 0, retryCount) 133 | assert.Equal(t, "", errorBacktrace) 134 | 135 | layout := "2006-01-02 15:04:05 MST" 136 | assert.Equal(t, time.Now().UTC().Format(layout), failedAt) 137 | } 138 | 139 | func TestRecurringFailedMessages(t *testing.T) { 140 | ctx := context.Background() 141 | 142 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 143 | assert.NoError(t, err) 144 | 145 | mgr := &Manager{opts: opts} 146 | 147 | layout := "2006-01-02 15:04:05 MST" 148 | 149 | message, _ := NewMsg("{\"jid\":\"2\",\"retry\":true,\"queue\":\"default\",\"error_message\":\"bam\",\"failed_at\":\"2013-07-20 14:03:42 UTC\",\"retry_count\":10}") 150 | 151 | wares.build("prod:myqueue", mgr, panickingFunc)(message) 152 | 153 | retries, _ := opts.client.ZRange(ctx, retryQueue(opts.Namespace), 0, 1).Result() 154 | message, _ = NewMsg(retries[0]) 155 | 156 | queue, _ := message.Get("queue").String() 157 | errorMessage, _ := message.Get("error_message").String() 158 | retryCount, _ := message.Get("retry_count").Int() 159 | failedAt, _ := message.Get("failed_at").String() 160 | retriedAt, _ := message.Get("retried_at").String() 161 | 162 | assert.Equal(t, "prod:myqueue", queue) 163 | assert.Equal(t, errorText, errorMessage) 164 | assert.Equal(t, 11, retryCount) 165 | assert.Equal(t, "2013-07-20 14:03:42 UTC", failedAt) 166 | assert.Equal(t, time.Now().UTC().Format(layout), retriedAt) 167 | } 168 | 169 | func TestRecurringFailedMessagesWithMax(t *testing.T) { 170 | ctx := context.Background() 171 | 172 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 173 | assert.NoError(t, err) 174 | 175 | mgr := &Manager{opts: opts} 176 | 177 | layout := "2006-01-02 15:04:05 MST" 178 | 179 | message, _ := NewMsg("{\"jid\":\"2\",\"retry\":true,\"queue\":\"default\",\"error_message\":\"bam\",\"failed_at\":\"2013-07-20 14:03:42 UTC\",\"retry_count\":8}") 180 | 181 | wares.build("prod:myqueue", mgr, panickingFunc)(message) 182 | 183 | retries, _ := opts.client.ZRange(ctx, retryQueue(opts.Namespace), 0, 1).Result() 184 | message, _ = NewMsg(retries[0]) 185 | 186 | queue, _ := message.Get("queue").String() 187 | errorMessage, _ := message.Get("error_message").String() 188 | retryCount, _ := message.Get("retry_count").Int() 189 | failedAt, _ := message.Get("failed_at").String() 190 | retriedAt, _ := message.Get("retried_at").String() 191 | 192 | assert.Equal(t, "prod:myqueue", queue) 193 | assert.Equal(t, errorText, errorMessage) 194 | assert.Equal(t, 9, retryCount) 195 | assert.Equal(t, "2013-07-20 14:03:42 UTC", failedAt) 196 | assert.Equal(t, time.Now().UTC().Format(layout), retriedAt) 197 | } 198 | 199 | func TestRetryOnlyToMax(t *testing.T) { 200 | ctx := context.Background() 201 | 202 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 203 | assert.NoError(t, err) 204 | 205 | mgr := &Manager{opts: opts} 206 | 207 | message, _ := NewMsg("{\"jid\":\"2\",\"retry\":true,\"retry_count\":25}") 208 | 209 | wares.build("prod:myqueue", mgr, panickingFunc)(message) 210 | 211 | count, _ := opts.client.ZCard(ctx, retryQueue(opts.Namespace)).Result() 212 | assert.Equal(t, int64(0), count) 213 | } 214 | 215 | func TestRetryMaxCallsRetryExhaustionHandler(t *testing.T) { 216 | ctx := context.Background() 217 | 218 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 219 | assert.NoError(t, err) 220 | 221 | mgr := &Manager{opts: opts} 222 | var resultQueue string 223 | var resultError error 224 | var resultMessage *Msg 225 | mgr.SetRetriesExhaustedHandlers(func(queue string, message *Msg, err error) { 226 | resultQueue = queue 227 | resultError = err 228 | resultMessage = message 229 | }) 230 | 231 | message, _ := NewMsg("{\"class\":\"clazz\",\"jid\":\"2\",\"retry\":true,\"retry_count\":25}") 232 | 233 | wares.build("prod:myqueue", mgr, panickingFunc)(message) 234 | 235 | count, _ := opts.client.ZCard(ctx, retryQueue(opts.Namespace)).Result() 236 | assert.Equal(t, int64(0), count) 237 | assert.Equal(t, "prod:myqueue", resultQueue) 238 | assert.Equal(t, errorText, resultError.Error()) 239 | assert.Equal(t, "clazz", resultMessage.Class()) 240 | assert.Equal(t, "2", resultMessage.Jid()) 241 | assert.NotNil(t, resultMessage.Args()) 242 | } 243 | 244 | func TestRetryOnlyToCustomMax(t *testing.T) { 245 | ctx := context.Background() 246 | 247 | opts, err := SetupDefaultTestOptionsWithNamespace("prod") 248 | assert.NoError(t, err) 249 | 250 | mgr := &Manager{opts: opts} 251 | 252 | message, _ := NewMsg("{\"jid\":\"2\",\"max_retry\":3,\"retry_count\":3}") 253 | 254 | wares.build("prod:myqueue", mgr, panickingFunc)(message) 255 | 256 | count, _ := opts.client.ZCard(ctx, retryQueue(opts.Namespace)).Result() 257 | assert.Equal(t, int64(0), count) 258 | } 259 | -------------------------------------------------------------------------------- /storage/redis.go: -------------------------------------------------------------------------------- 1 | package storage 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "log" 8 | "strconv" 9 | "time" 10 | 11 | "github.com/go-redis/redis/v8" 12 | ) 13 | 14 | type redisStore struct { 15 | namespace string 16 | 17 | client *redis.Client 18 | logger *log.Logger 19 | } 20 | 21 | // Compile-time check to ensure that Redis store does in fact implement the Store interface 22 | var _ Store = &redisStore{} 23 | 24 | // NewRedisStore returns a new Redis store with the given namespace and preconfigured client 25 | func NewRedisStore(namespace string, client *redis.Client, logger *log.Logger) Store { 26 | return &redisStore{ 27 | namespace: namespace, 28 | client: client, 29 | logger: logger, 30 | } 31 | } 32 | 33 | func (r *redisStore) DequeueMessage(ctx context.Context, queue string, inprogressQueue string, timeout time.Duration) (string, error) { 34 | message, err := r.client.BRPopLPush(ctx, r.getQueueName(queue), r.getQueueName(inprogressQueue), timeout).Result() 35 | 36 | if err != nil { 37 | // If redis returns null, the queue is empty. 38 | // Just ignore empty queue errors; print all other errors. 39 | if err != redis.Nil { 40 | r.logger.Println("ERR: ", queue, err) 41 | } else { 42 | err = NoMessage 43 | } 44 | 45 | time.Sleep(1 * time.Second) 46 | 47 | return "", err 48 | } 49 | 50 | return message, nil 51 | } 52 | 53 | func (r *redisStore) CheckRtt(ctx context.Context) int64 { 54 | start := time.Now() 55 | r.client.Ping(ctx) 56 | ellapsed := time.Since(start) 57 | 58 | return ellapsed.Microseconds() 59 | } 60 | 61 | func (r *redisStore) getHeartbeat(ctx context.Context, heartbeatID string) (*Heartbeat, error) { 62 | heartbeatProperties := []string{"beat", "quiet", "busy", "rtt_us", "rss", "info", "manager_priority", "active_manager", "worker_heartbeats"} 63 | booleanProperties := []string{"quiet", "active_manager"} 64 | managerKey := GetManagerKey(r.namespace, heartbeatID) 65 | heartbeatPropertyValues, err := r.client.HMGet(ctx, managerKey, heartbeatProperties...).Result() 66 | if err != nil { 67 | return nil, err 68 | } 69 | 70 | heartbeatMap := make(map[string]interface{}) 71 | hasPropertyValue := false 72 | for i, heartbeatProperty := range heartbeatProperties { 73 | if heartbeatPropertyValues[i] != nil { 74 | heartbeatMap[heartbeatProperty] = heartbeatPropertyValues[i] 75 | hasPropertyValue = true 76 | } 77 | } 78 | 79 | for _, booleanProperty := range booleanProperties { 80 | if heartbeatMap[booleanProperty] == "1" { 81 | heartbeatMap[booleanProperty] = "true" 82 | } else { 83 | heartbeatMap[booleanProperty] = "false" 84 | } 85 | } 86 | 87 | workerHeartbeats := []WorkerHeartbeat{} 88 | err = json.Unmarshal([]byte(fmt.Sprintf("%v", heartbeatMap["worker_heartbeats"])), &workerHeartbeats) 89 | if err != nil { 90 | return nil, err 91 | } 92 | delete(heartbeatMap, "worker_heartbeats") 93 | 94 | if !hasPropertyValue { 95 | return nil, nil 96 | } 97 | 98 | heartbeatJson, err := json.Marshal(heartbeatMap) 99 | if err != nil { 100 | return nil, err 101 | } 102 | 103 | heartbeat := Heartbeat{} 104 | err = json.Unmarshal(heartbeatJson, &heartbeat) 105 | if err != nil { 106 | return nil, err 107 | } 108 | heartbeat.Identity = heartbeatID 109 | heartbeat.WorkerHeartbeats = workerHeartbeats 110 | return &heartbeat, nil 111 | } 112 | 113 | func (r *redisStore) GetAllHeartbeats(ctx context.Context) ([]*Heartbeat, error) { 114 | var heartbeats []*Heartbeat 115 | 116 | heartbeatIDs, err := r.getHeartbeatIDs(ctx) 117 | if len(heartbeatIDs) == 0 { 118 | return nil, err 119 | } 120 | for _, heartbeatID := range heartbeatIDs { 121 | heartbeat, err := r.getHeartbeat(ctx, heartbeatID) 122 | if err != nil { 123 | return nil, err 124 | } 125 | if heartbeat != nil { 126 | heartbeats = append(heartbeats, heartbeat) 127 | } 128 | } 129 | return heartbeats, nil 130 | } 131 | 132 | func (r *redisStore) getHeartbeatIDs(ctx context.Context) ([]string, error) { 133 | heartbeatIDs, err := r.client.SMembers(ctx, GetProcessesKey(r.namespace)).Result() 134 | if err != nil && err != redis.Nil { 135 | return nil, err 136 | } 137 | return heartbeatIDs, nil 138 | } 139 | 140 | func (r *redisStore) SendHeartbeat(ctx context.Context, heartbeat *Heartbeat) error { 141 | pipe := r.client.Pipeline() 142 | rtt := r.CheckRtt(ctx) 143 | 144 | managerKey := GetManagerKey(r.namespace, heartbeat.Identity) 145 | pipe.SAdd(ctx, GetProcessesKey(r.namespace), heartbeat.Identity) // add to the sidekiq processes set without the namespace 146 | 147 | workerHeartbeats, err := json.Marshal(heartbeat.WorkerHeartbeats) 148 | if err != nil { 149 | return err 150 | } 151 | 152 | pipe.HMSet(ctx, managerKey, 153 | "beat", heartbeat.Beat, 154 | "quiet", heartbeat.Quiet, 155 | "busy", heartbeat.Busy, 156 | "rtt_us", rtt, 157 | "rss", heartbeat.RSS, 158 | "info", heartbeat.Info, 159 | "manager_priority", heartbeat.ManagerPriority, 160 | "active_manager", heartbeat.ActiveManager, 161 | "worker_heartbeats", workerHeartbeats) 162 | 163 | _, err = pipe.Exec(ctx) 164 | if err != nil && err != redis.Nil { 165 | return err 166 | } 167 | 168 | return nil 169 | } 170 | 171 | func (r *redisStore) getTaskRunnerID(pid int, tid string) string { 172 | return fmt.Sprintf("%d-%s", pid, tid) 173 | } 174 | 175 | func (r *redisStore) RequeueMessagesFromInProgressQueue(ctx context.Context, inprogressQueue, queue string) ([]string, error) { 176 | var requeuedMsgs []string 177 | for { 178 | msg, err := r.client.BRPopLPush(ctx, r.getQueueName(inprogressQueue), r.getQueueName(queue), 1*time.Second).Result() 179 | 180 | if err != nil { 181 | if err == redis.Nil { 182 | break 183 | } 184 | return requeuedMsgs, err 185 | } 186 | requeuedMsgs = append(requeuedMsgs, msg) 187 | } 188 | return requeuedMsgs, nil 189 | } 190 | 191 | func (r *redisStore) RemoveHeartbeat(ctx context.Context, heartbeatID string) error { 192 | managerKey := GetManagerKey(r.namespace, heartbeatID) 193 | 194 | pipe := r.client.Pipeline() 195 | pipe.Del(ctx, managerKey) 196 | 197 | workersKey := GetWorkersKey(managerKey) 198 | pipe.Del(ctx, workersKey) 199 | 200 | pipe.SRem(ctx, GetProcessesKey(r.namespace), heartbeatID) 201 | 202 | _, err := pipe.Exec(ctx) 203 | if err != nil && err != redis.Nil { 204 | return err 205 | } 206 | 207 | return nil 208 | } 209 | 210 | func (r *redisStore) EnqueueMessage(ctx context.Context, queue string, priority float64, message string) error { 211 | _, err := r.client.ZAdd(ctx, r.getQueueName(queue), &redis.Z{ 212 | Score: priority, 213 | Member: message, 214 | }).Result() 215 | 216 | return err 217 | } 218 | 219 | func (r *redisStore) EnqueueScheduledMessage(ctx context.Context, priority float64, message string) error { 220 | _, err := r.client.ZAdd(ctx, r.namespace+ScheduledJobsKey, &redis.Z{ 221 | Score: priority, 222 | Member: message, 223 | }).Result() 224 | 225 | return err 226 | } 227 | 228 | func (r *redisStore) DequeueScheduledMessage(ctx context.Context, priority float64) (string, error) { 229 | key := r.namespace + ScheduledJobsKey 230 | 231 | messages, err := r.client.ZRangeByScore(ctx, key, &redis.ZRangeBy{ 232 | Min: "-inf", 233 | Max: strconv.FormatFloat(priority, 'f', -1, 64), 234 | Offset: 0, 235 | Count: 1, 236 | }).Result() 237 | 238 | if err != nil { 239 | return "", err 240 | } 241 | 242 | if len(messages) == 0 { 243 | return "", NoMessage 244 | } 245 | 246 | removed, err := r.client.ZRem(ctx, key, messages[0]).Result() 247 | if err != nil { 248 | return "", err 249 | } 250 | 251 | if removed == 0 { 252 | return "", NoMessage 253 | } 254 | 255 | return messages[0], nil 256 | } 257 | 258 | func (r *redisStore) EnqueueRetriedMessage(ctx context.Context, priority float64, message string) error { 259 | _, err := r.client.ZAdd(ctx, r.namespace+RetryKey, &redis.Z{ 260 | Score: priority, 261 | Member: message, 262 | }).Result() 263 | 264 | return err 265 | } 266 | 267 | func (r *redisStore) DequeueRetriedMessage(ctx context.Context, priority float64) (string, error) { 268 | key := r.namespace + RetryKey 269 | 270 | messages, err := r.client.ZRangeByScore(ctx, key, &redis.ZRangeBy{ 271 | Min: "-inf", 272 | Max: strconv.FormatFloat(priority, 'f', -1, 64), 273 | Offset: 0, 274 | Count: 1, 275 | }).Result() 276 | 277 | if err != nil { 278 | return "", err 279 | } 280 | 281 | if len(messages) == 0 { 282 | return "", NoMessage 283 | } 284 | 285 | removed, err := r.client.ZRem(ctx, key, messages[0]).Result() 286 | if err != nil { 287 | return "", err 288 | } 289 | 290 | if removed == 0 { 291 | return "", NoMessage 292 | } 293 | 294 | return messages[0], nil 295 | } 296 | 297 | func (r *redisStore) EnqueueMessageNow(ctx context.Context, queue string, message string) error { 298 | queue = r.namespace + "queue:" + queue 299 | _, err := r.client.LPush(ctx, queue, message).Result() 300 | return err 301 | } 302 | 303 | func (r *redisStore) GetAllRetries(ctx context.Context) (*Retries, error) { 304 | pipe := r.client.Pipeline() 305 | 306 | retryCountGet := pipe.ZCard(ctx, r.namespace+RetryKey) 307 | retryJobsGet := pipe.ZRange(ctx, r.namespace+RetryKey, 0, -1) 308 | 309 | _, err := pipe.Exec(ctx) 310 | if err != nil && err != redis.Nil { 311 | return nil, err 312 | } 313 | 314 | return &Retries{ 315 | RetryJobs: retryJobsGet.Val(), 316 | TotalRetryCount: retryCountGet.Val(), 317 | }, nil 318 | } 319 | 320 | func (r *redisStore) GetAllStats(ctx context.Context, queues []string) (*Stats, error) { 321 | pipe := r.client.Pipeline() 322 | 323 | pGet := pipe.Get(ctx, r.namespace+"stat:processed") 324 | fGet := pipe.Get(ctx, r.namespace+"stat:failed") 325 | rGet := pipe.ZCard(ctx, r.namespace+RetryKey) 326 | qLen := map[string]*redis.IntCmd{} 327 | 328 | for _, queue := range queues { 329 | qLen[r.namespace+queue] = pipe.LLen(ctx, fmt.Sprintf("%squeue:%s", r.namespace, queue)) 330 | } 331 | 332 | _, err := pipe.Exec(ctx) 333 | if err != nil && err != redis.Nil { 334 | return nil, err 335 | } 336 | 337 | stats := &Stats{ 338 | Enqueued: make(map[string]int64), 339 | } 340 | 341 | stats.Processed, _ = strconv.ParseInt(pGet.Val(), 10, 64) 342 | stats.Failed, _ = strconv.ParseInt(fGet.Val(), 10, 64) 343 | stats.RetryCount = rGet.Val() 344 | 345 | for q, l := range qLen { 346 | stats.Enqueued[q] = l.Val() 347 | } 348 | 349 | return stats, nil 350 | } 351 | 352 | func (r *redisStore) AcknowledgeMessage(ctx context.Context, queue string, message string) error { 353 | _, err := r.client.LRem(ctx, r.getQueueName(queue), -1, message).Result() 354 | 355 | return err 356 | } 357 | 358 | func (r *redisStore) CreateQueue(ctx context.Context, queue string) error { 359 | _, err := r.client.SAdd(ctx, r.namespace+"queues", queue).Result() 360 | return err 361 | } 362 | 363 | func (r *redisStore) ListMessages(ctx context.Context, queue string) ([]string, error) { 364 | messages, err := r.client.LRange(ctx, r.getQueueName(queue), 0, -1).Result() 365 | if err != nil { 366 | return nil, err 367 | } 368 | 369 | return messages, nil 370 | } 371 | 372 | func (r *redisStore) IncrementStats(ctx context.Context, metric string) error { 373 | rc := r.client 374 | 375 | today := time.Now().UTC().Format("2006-01-02") 376 | 377 | pipe := rc.Pipeline() 378 | pipe.Incr(ctx, r.namespace+"stat:"+metric) 379 | pipe.Incr(ctx, r.namespace+"stat:"+metric+":"+today) 380 | 381 | if _, err := pipe.Exec(ctx); err != nil { 382 | return err 383 | } 384 | 385 | return nil 386 | } 387 | 388 | func (r *redisStore) getQueueName(queue string) string { 389 | return r.namespace + "queue:" + queue 390 | } 391 | 392 | func (r *redisStore) GetTime(ctx context.Context) (time.Time, error) { 393 | return r.client.Time(ctx).Result() 394 | } 395 | -------------------------------------------------------------------------------- /manager.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | "sort" 8 | "sync" 9 | "time" 10 | 11 | "github.com/digitalocean/go-workers2/storage" 12 | "github.com/go-redis/redis/v8" 13 | "github.com/google/uuid" 14 | ) 15 | 16 | // Manager coordinates work, workers, and signaling needed for job processing 17 | type Manager struct { 18 | uuid string 19 | opts Options 20 | schedule *scheduledWorker 21 | workers []*worker 22 | lock sync.Mutex 23 | signal chan os.Signal 24 | running bool 25 | stop chan bool 26 | active bool 27 | logger *log.Logger 28 | startedAt time.Time 29 | processNonce string 30 | heartbeatChannel chan bool 31 | 32 | beforeStartHooks []func() 33 | duringDrainHooks []func() 34 | afterActiveChangeHooks []AfterActiveChangeFunc 35 | 36 | afterHeartbeatHooks []afterHeartbeatFunc 37 | 38 | retriesExhaustedHandlers []RetriesExhaustedFunc 39 | } 40 | 41 | type staleMessageUpdate struct { 42 | Queue string 43 | InprogressQueue string 44 | RequeuedMsgs []string 45 | } 46 | 47 | type AfterActiveChangeFunc func(manager *Manager, activateManager, deactivateManager bool) 48 | 49 | type UpdateActiveManager struct { 50 | ActivateManager bool 51 | DeactivateManager bool 52 | } 53 | 54 | // NewManager creates a new manager with provide options 55 | func NewManager(options Options) (*Manager, error) { 56 | options, err := processOptions(options) 57 | if err != nil { 58 | return nil, err 59 | } 60 | return newManager(options) 61 | } 62 | 63 | // GetRedisClient returns the Redis client used by the manager 64 | func (m *Manager) GetRedisClient() *redis.Client { 65 | return m.opts.client 66 | } 67 | 68 | // NewManagerWithRedisClient creates a new manager with provide options and pre-configured Redis client 69 | func NewManagerWithRedisClient(options Options, client *redis.Client) (*Manager, error) { 70 | options, err := processOptionsWithRedisClient(options, client) 71 | if err != nil { 72 | return nil, err 73 | } 74 | return newManager(options) 75 | } 76 | 77 | func newManager(processedOptions Options) (*Manager, error) { 78 | processNonce, err := GenerateProcessNonce() 79 | if err != nil { 80 | return nil, err 81 | } 82 | 83 | manager := &Manager{ 84 | uuid: uuid.New().String(), 85 | logger: processedOptions.Logger, 86 | opts: processedOptions, 87 | processNonce: processNonce, 88 | active: !processedOptions.ManagerStartInactive, 89 | } 90 | if processedOptions.Heartbeat != nil && processedOptions.Heartbeat.PrioritizedManager != nil { 91 | manager.addAfterHeartbeatHooks(activateManagerByPriority) 92 | } 93 | return manager, nil 94 | } 95 | 96 | // AddWorker adds a new job processing worker 97 | func (m *Manager) AddWorker(queue string, concurrency int, job JobFunc, mids ...MiddlewareFunc) { 98 | m.lock.Lock() 99 | defer m.lock.Unlock() 100 | 101 | middlewareQueueName := m.opts.Namespace + queue 102 | if len(mids) == 0 { 103 | job = DefaultMiddlewares().build(middlewareQueueName, m, job) 104 | } else { 105 | job = NewMiddlewares(mids...).build(middlewareQueueName, m, job) 106 | } 107 | m.workers = append(m.workers, newWorker(m.logger, queue, concurrency, job)) 108 | } 109 | 110 | // AddBeforeStartHooks adds functions to be executed before the manager starts 111 | func (m *Manager) AddBeforeStartHooks(hooks ...func()) { 112 | m.lock.Lock() 113 | defer m.lock.Unlock() 114 | m.beforeStartHooks = append(m.beforeStartHooks, hooks...) 115 | } 116 | 117 | // AddDuringDrainHooks adds function to be execute during a drain operation 118 | func (m *Manager) AddDuringDrainHooks(hooks ...func()) { 119 | m.lock.Lock() 120 | defer m.lock.Unlock() 121 | m.duringDrainHooks = append(m.duringDrainHooks, hooks...) 122 | } 123 | 124 | func (m *Manager) addAfterHeartbeatHooks(hooks ...afterHeartbeatFunc) { 125 | m.lock.Lock() 126 | defer m.lock.Unlock() 127 | m.afterHeartbeatHooks = append(m.afterHeartbeatHooks, hooks...) 128 | } 129 | 130 | func (m *Manager) AddAfterActiveChangeHooks(hooks ...AfterActiveChangeFunc) { 131 | m.lock.Lock() 132 | defer m.lock.Unlock() 133 | m.afterActiveChangeHooks = append(m.afterActiveChangeHooks, hooks...) 134 | } 135 | 136 | // SetRetriesExhaustedHandlers sets function(s) that will be sequentially executed when retries are exhausted for a job. 137 | func (m *Manager) SetRetriesExhaustedHandlers(handlers ...RetriesExhaustedFunc) { 138 | m.lock.Lock() 139 | defer m.lock.Unlock() 140 | m.retriesExhaustedHandlers = handlers 141 | } 142 | 143 | // AddRetriesExhaustedHandlers adds function(s) to be executed when retries are exhausted for a job. 144 | func (m *Manager) AddRetriesExhaustedHandlers(handlers ...RetriesExhaustedFunc) { 145 | m.lock.Lock() 146 | defer m.lock.Unlock() 147 | m.retriesExhaustedHandlers = append(m.retriesExhaustedHandlers, handlers...) 148 | } 149 | 150 | // Run starts all workers under this Manager and blocks until they exit. 151 | func (m *Manager) Run() { 152 | m.startedAt = time.Now() 153 | 154 | m.lock.Lock() 155 | defer m.lock.Unlock() 156 | if m.running { 157 | return // Can't start if we're already running! 158 | } 159 | m.running = true 160 | 161 | for _, h := range m.beforeStartHooks { 162 | h() 163 | } 164 | 165 | globalAPIServer.registerManager(m) 166 | 167 | var wg sync.WaitGroup 168 | 169 | wg.Add(1) 170 | m.signal = make(chan os.Signal, 1) 171 | go func() { 172 | m.handleSignals() 173 | wg.Done() 174 | }() 175 | 176 | wg.Add(len(m.workers)) 177 | for i := range m.workers { 178 | w := m.workers[i] 179 | go func() { 180 | fetcher := newSimpleFetcher(w.queue, *m.Opts(), m.IsActive()) 181 | w.start(fetcher) 182 | wg.Done() 183 | }() 184 | } 185 | m.schedule = newScheduledWorker(m.opts) 186 | 187 | wg.Add(1) 188 | go func() { 189 | m.schedule.run() 190 | wg.Done() 191 | }() 192 | 193 | if m.opts.Heartbeat != nil { 194 | go m.startHeartbeat() 195 | } 196 | 197 | // Release the lock so that Stop can acquire it 198 | m.lock.Unlock() 199 | wg.Wait() 200 | // Regain the lock 201 | m.lock.Lock() 202 | globalAPIServer.deregisterManager(m) 203 | m.running = false 204 | } 205 | 206 | // Stop all workers under this Manager and returns immediately. 207 | func (m *Manager) Stop() { 208 | m.lock.Lock() 209 | defer m.lock.Unlock() 210 | if !m.running { 211 | return 212 | } 213 | if m.opts.Heartbeat != nil { 214 | m.stopHeartbeat() 215 | } 216 | for _, w := range m.workers { 217 | w.quit() 218 | } 219 | m.schedule.quit() 220 | for _, h := range m.duringDrainHooks { 221 | h() 222 | } 223 | m.stopSignalHandler() 224 | } 225 | 226 | func (m *Manager) Opts() *Options { 227 | return &m.opts 228 | } 229 | 230 | func (m *Manager) inProgressMessages() map[string][]*Msg { 231 | m.lock.Lock() 232 | defer m.lock.Unlock() 233 | res := map[string][]*Msg{} 234 | for _, w := range m.workers { 235 | res[w.queue] = append(res[w.queue], w.inProgressMessages()...) 236 | } 237 | return res 238 | } 239 | 240 | // Producer creates a new work producer with configuration identical to the manager 241 | func (m *Manager) Producer() *Producer { 242 | return &Producer{opts: m.opts} 243 | } 244 | 245 | // GetStats returns the set of stats for the manager 246 | func (m *Manager) GetStats() (Stats, error) { 247 | stats := Stats{ 248 | Jobs: map[string][]JobStatus{}, 249 | Enqueued: map[string]int64{}, 250 | Name: m.opts.ManagerDisplayName, 251 | } 252 | var q []string 253 | 254 | inProgress := m.inProgressMessages() 255 | ns := m.opts.Namespace 256 | 257 | for queue, msgs := range inProgress { 258 | var jobs []JobStatus 259 | for _, m := range msgs { 260 | jobs = append(jobs, JobStatus{ 261 | Message: m, 262 | StartedAt: m.startedAt, 263 | }) 264 | } 265 | stats.Jobs[ns+queue] = jobs 266 | q = append(q, queue) 267 | } 268 | 269 | storeStats, err := m.opts.store.GetAllStats(context.Background(), q) 270 | 271 | if err != nil { 272 | return stats, err 273 | } 274 | 275 | stats.Processed = storeStats.Processed 276 | stats.Failed = storeStats.Failed 277 | stats.RetryCount = storeStats.RetryCount 278 | 279 | for q, l := range storeStats.Enqueued { 280 | stats.Enqueued[q] = l 281 | } 282 | 283 | return stats, nil 284 | } 285 | 286 | // GetRetries returns the set of retry jobs for the manager 287 | func (m *Manager) GetRetries(page uint64, pageSize int64, match string) (Retries, error) { 288 | // TODO: add back pagination and filtering 289 | 290 | storeRetries, err := m.opts.store.GetAllRetries(context.Background()) 291 | if err != nil { 292 | return Retries{}, err 293 | } 294 | 295 | var retryJobs []*Msg 296 | for _, r := range storeRetries.RetryJobs { 297 | // parse json from string of retry data 298 | retryJob, err := NewMsg(r) 299 | if err != nil { 300 | return Retries{}, err 301 | } 302 | 303 | retryJobs = append(retryJobs, retryJob) 304 | } 305 | 306 | return Retries{ 307 | TotalRetryCount: storeRetries.TotalRetryCount, 308 | RetryJobs: retryJobs, 309 | }, nil 310 | } 311 | 312 | func (m *Manager) startHeartbeat() error { 313 | heartbeatTicker := time.NewTicker(m.opts.Heartbeat.Interval) 314 | m.heartbeatChannel = make(chan bool, 1) 315 | 316 | for { 317 | select { 318 | case <-heartbeatTicker.C: 319 | heartbeatTime, err := m.opts.store.GetTime(context.Background()) 320 | if err != nil { 321 | m.logger.Println("ERR: Failed to get heartbeat time", err) 322 | return err 323 | } 324 | heartbeat, err := m.sendHeartbeat(heartbeatTime) 325 | if err != nil { 326 | m.logger.Println("ERR: Failed to send heartbeat", err) 327 | return err 328 | } 329 | expireTS := heartbeatTime.Add(-m.opts.Heartbeat.HeartbeatTTL).Unix() 330 | staleMessageUpdates, err := m.handleAllExpiredHeartbeats(context.Background(), expireTS) 331 | if err != nil { 332 | m.logger.Println("ERR: error expiring heartbeat identities", err) 333 | return err 334 | } 335 | for _, afterHeartbeatHook := range m.afterHeartbeatHooks { 336 | err := afterHeartbeatHook(heartbeat, m, staleMessageUpdates) 337 | if err != nil { 338 | m.logger.Println("ERR: Failed to execute after heartbeat hook", err) 339 | return err 340 | } 341 | } 342 | case <-m.heartbeatChannel: 343 | return nil 344 | } 345 | } 346 | } 347 | 348 | func (m *Manager) handleAllExpiredHeartbeats(ctx context.Context, expireTS int64) ([]*staleMessageUpdate, error) { 349 | heartbeats, err := m.opts.store.GetAllHeartbeats(ctx) 350 | if err != nil && err != redis.Nil { 351 | return nil, err 352 | } 353 | 354 | var staleMessageUpdates []*staleMessageUpdate 355 | for _, heartbeat := range heartbeats { 356 | if heartbeat.Beat > expireTS { 357 | continue 358 | } 359 | 360 | // requeue worker in-progress queues back to the queues 361 | requeuedInProgressQueues := make(map[string]bool) 362 | for _, workerHeartbeat := range heartbeat.WorkerHeartbeats { 363 | var requeuedMsgs []string 364 | if _, exists := requeuedInProgressQueues[workerHeartbeat.InProgressQueue]; exists { 365 | continue 366 | } 367 | requeuedMsgs, err = m.opts.store.RequeueMessagesFromInProgressQueue(ctx, workerHeartbeat.InProgressQueue, workerHeartbeat.Queue) 368 | if err != nil { 369 | return nil, err 370 | } 371 | requeuedInProgressQueues[workerHeartbeat.InProgressQueue] = true 372 | if len(requeuedMsgs) == 0 { 373 | continue 374 | } 375 | updatedStaleMessage := &staleMessageUpdate{ 376 | Queue: workerHeartbeat.Queue, 377 | InprogressQueue: workerHeartbeat.InProgressQueue, 378 | RequeuedMsgs: requeuedMsgs, 379 | } 380 | staleMessageUpdates = append(staleMessageUpdates, updatedStaleMessage) 381 | } 382 | err = m.opts.store.RemoveHeartbeat(ctx, heartbeat.Identity) 383 | if err != nil { 384 | return nil, err 385 | } 386 | 387 | } 388 | return staleMessageUpdates, nil 389 | } 390 | 391 | func (m *Manager) IsActive() bool { 392 | m.lock.Lock() 393 | defer m.lock.Unlock() 394 | return m.active 395 | } 396 | 397 | func (m *Manager) Active(active bool) { 398 | isActive := m.IsActive() 399 | activateManager := !isActive && active 400 | deactivateManager := isActive && !active 401 | if activateManager || deactivateManager { 402 | m.lock.Lock() 403 | m.active = active 404 | for _, worker := range m.workers { 405 | worker.fetcher.SetActive(active) 406 | } 407 | m.lock.Unlock() 408 | for _, hook := range m.afterActiveChangeHooks { 409 | hook(m, activateManager, deactivateManager) 410 | } 411 | } 412 | } 413 | 414 | func (m *Manager) stopHeartbeat() { 415 | m.heartbeatChannel <- true 416 | } 417 | 418 | func (m *Manager) sendHeartbeat(heartbeatTime time.Time) (*storage.Heartbeat, error) { 419 | heartbeat, err := m.buildHeartbeat(heartbeatTime, m.opts.Heartbeat.HeartbeatTTL) 420 | if err != nil { 421 | return heartbeat, err 422 | } 423 | 424 | err = m.opts.store.SendHeartbeat(context.Background(), heartbeat) 425 | return heartbeat, err 426 | } 427 | 428 | func activateManagerByPriority(heartbeat *storage.Heartbeat, manager *Manager, staleMessageUpdates []*staleMessageUpdate) error { 429 | ctx := context.Background() 430 | heartbeats, err := manager.opts.store.GetAllHeartbeats(ctx) 431 | if err != nil { 432 | return err 433 | } 434 | if len(heartbeats) == 0 { 435 | return nil 436 | } 437 | // order active heartbeats by manager priority descending 438 | sort.Slice(heartbeats, func(i, j int) bool { 439 | return heartbeats[i].ManagerPriority > heartbeats[j].ManagerPriority 440 | }) 441 | 442 | // if current manager's priority is high enough to be within total active manager threshold, set manager as active 443 | activeManager := false 444 | for i := 0; i < manager.opts.Heartbeat.PrioritizedManager.TotalActiveManagers; i++ { 445 | if heartbeats[i].Identity == heartbeat.Identity { 446 | activeManager = true 447 | break 448 | } 449 | } 450 | manager.Active(activeManager) 451 | return nil 452 | } 453 | -------------------------------------------------------------------------------- /manager_test.go: -------------------------------------------------------------------------------- 1 | package workers 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "fmt" 7 | "sync" 8 | "testing" 9 | "time" 10 | 11 | "github.com/digitalocean/go-workers2/storage" 12 | "github.com/go-redis/redis/v8" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func newTestManager(opts Options, flushDB bool) (*Manager, error) { 17 | mgr, err := NewManager(opts) 18 | if mgr != nil && flushDB { 19 | mgr.opts.client.FlushDB(context.Background()).Result() 20 | } 21 | return mgr, err 22 | } 23 | 24 | func TestNewManager(t *testing.T) { 25 | namespace := "prod" 26 | opts := testOptionsWithNamespace(namespace) 27 | mgr, err := NewManager(opts) 28 | assert.NoError(t, err) 29 | assert.NotEmpty(t, mgr.uuid) 30 | assert.Equal(t, namespace+":", mgr.opts.Namespace) 31 | } 32 | 33 | func TestNewManagerWithRedisClient(t *testing.T) { 34 | namespace := "prod" 35 | opts := Options{ 36 | ProcessID: "1", 37 | Namespace: namespace, 38 | } 39 | 40 | client := redis.NewClient(&redis.Options{ 41 | IdleTimeout: 1, 42 | Password: "ab", 43 | DB: 2, 44 | TLSConfig: &tls.Config{ServerName: "test_tls2"}, 45 | }) 46 | 47 | mgr, err := NewManagerWithRedisClient(opts, client) 48 | 49 | assert.NoError(t, err) 50 | assert.NotEmpty(t, mgr.uuid) 51 | assert.Equal(t, namespace+":", mgr.opts.Namespace) 52 | 53 | assert.NotNil(t, mgr.GetRedisClient()) 54 | assert.NotNil(t, mgr.GetRedisClient().Options().TLSConfig) 55 | assert.Equal(t, "test_tls2", mgr.GetRedisClient().Options().TLSConfig.ServerName) 56 | } 57 | 58 | func TestNewManagerWithRedisClientNoProcessID(t *testing.T) { 59 | namespace := "prod" 60 | opts := Options{ 61 | Namespace: namespace, 62 | } 63 | 64 | client := redis.NewClient(&redis.Options{ 65 | IdleTimeout: 1, 66 | Password: "ab", 67 | DB: 2, 68 | TLSConfig: &tls.Config{ServerName: "test_tls2"}, 69 | }) 70 | 71 | mgr, err := NewManagerWithRedisClient(opts, client) 72 | 73 | assert.Error(t, err) 74 | assert.Nil(t, mgr) 75 | } 76 | 77 | func TestManager_AddBeforeStartHooks(t *testing.T) { 78 | namespace := "prod" 79 | opts := testOptionsWithNamespace(namespace) 80 | opts.PollInterval = time.Second 81 | mgr, err := newTestManager(opts, true) 82 | assert.NoError(t, err) 83 | var beforeStartCalled int 84 | mgr.AddBeforeStartHooks(func() { 85 | beforeStartCalled++ 86 | }) 87 | ch := make(chan bool) 88 | go func() { 89 | mgr.Run() 90 | ch <- true 91 | mgr.Run() 92 | ch <- true 93 | }() 94 | time.Sleep(time.Second) 95 | assert.Equal(t, 1, beforeStartCalled) 96 | mgr.Stop() 97 | <-ch 98 | time.Sleep(time.Second) 99 | assert.Equal(t, 2, beforeStartCalled) 100 | mgr.Stop() 101 | <-ch 102 | } 103 | 104 | func TestManager_AddDuringDrainHooks(t *testing.T) { 105 | namespace := "prod" 106 | opts := testOptionsWithNamespace(namespace) 107 | opts.PollInterval = time.Second 108 | mgr, err := newTestManager(opts, true) 109 | assert.NoError(t, err) 110 | var duringDrainCalled int 111 | mgr.AddDuringDrainHooks(func() { 112 | duringDrainCalled++ 113 | }) 114 | ch := make(chan bool) 115 | go func() { 116 | mgr.Run() 117 | ch <- true 118 | mgr.Run() 119 | ch <- true 120 | }() 121 | time.Sleep(time.Second) 122 | mgr.Stop() 123 | assert.Equal(t, 1, duringDrainCalled) 124 | <-ch 125 | time.Sleep(time.Second) 126 | mgr.Stop() 127 | assert.Equal(t, 2, duringDrainCalled) 128 | <-ch 129 | } 130 | 131 | func TestManager_AddWorker(t *testing.T) { 132 | namespace := "prod" 133 | opts := testOptionsWithNamespace(namespace) 134 | opts.PollInterval = time.Second 135 | mgr, err := NewManager(opts) 136 | assert.NoError(t, err) 137 | 138 | var handlerCalled bool 139 | var defaultMidCalled bool 140 | 141 | baseMids := defaultMiddlewares 142 | defaultMiddlewares = NewMiddlewares( 143 | func(queue string, mgr *Manager, next JobFunc) JobFunc { 144 | return func(message *Msg) (result error) { 145 | defaultMidCalled = true 146 | result = next(message) 147 | return 148 | } 149 | }, 150 | ) 151 | mgr.AddWorker("someq", 1, func(m *Msg) error { 152 | handlerCalled = true 153 | return nil 154 | }) 155 | assert.Len(t, mgr.workers, 1) 156 | assert.Equal(t, "someq", mgr.workers[0].queue) 157 | 158 | msg, _ := NewMsg("{}") 159 | 160 | mgr.workers[0].handler(msg) 161 | assert.True(t, defaultMidCalled) 162 | assert.True(t, handlerCalled) 163 | 164 | var midCalled bool 165 | 166 | mgr.workers = nil 167 | mgr.AddWorker("someq", 1, func(m *Msg) error { 168 | handlerCalled = true 169 | return nil 170 | }, func(queue string, mgr *Manager, next JobFunc) JobFunc { 171 | return func(message *Msg) (result error) { 172 | midCalled = true 173 | result = next(message) 174 | return 175 | } 176 | }) 177 | 178 | defaultMidCalled = false 179 | handlerCalled = false 180 | 181 | mgr.workers[0].handler(msg) 182 | assert.False(t, defaultMidCalled) 183 | assert.True(t, midCalled) 184 | assert.True(t, handlerCalled) 185 | 186 | defaultMiddlewares = baseMids 187 | } 188 | 189 | func TestManager_Run(t *testing.T) { 190 | namespace := "mgrruntest" 191 | opts := testOptionsWithNamespace(namespace) 192 | opts.PollInterval = time.Second 193 | mgr, err := newTestManager(opts, true) 194 | assert.NoError(t, err) 195 | prod := mgr.Producer() 196 | 197 | q1cc := NewCallCounter() 198 | q2cc := NewCallCounter() 199 | mgr.AddWorker("queue1", 1, q1cc.F, NopMiddleware) 200 | mgr.AddWorker("queue2", 2, q2cc.F, NopMiddleware) 201 | 202 | var wg sync.WaitGroup 203 | wg.Add(1) 204 | go func() { 205 | mgr.Run() 206 | wg.Done() 207 | }() 208 | 209 | // Test that messages process 210 | _, err = prod.Enqueue("queue1", "any", q1cc.syncMsg().Args().Interface()) 211 | assert.NoError(t, err) 212 | // This channel read will timeout the test if messages don't process 213 | <-q1cc.syncCh 214 | q1cc.ackSyncCh <- true 215 | 216 | // Test that the manager is registered in the stats server 217 | assert.Contains(t, globalAPIServer.managers, mgr.uuid) 218 | 219 | // Test that it runs a scheduledWorker 220 | _, err = prod.EnqueueIn("queue1", "any", 2, q1cc.syncMsg().Args().Interface()) 221 | assert.NoError(t, err) 222 | // This channel read will timeout the test if the scheduled message doesn't process 223 | <-q1cc.syncCh 224 | q1cc.ackSyncCh <- true 225 | 226 | mgr.Stop() 227 | wg.Wait() 228 | 229 | // Test that the manager is deregistered from the stats server 230 | assert.NotContains(t, globalAPIServer.managers, mgr.uuid) 231 | 232 | // Test that we can restart the manager 233 | wg.Add(1) 234 | go func() { 235 | mgr.Run() 236 | wg.Done() 237 | }() 238 | 239 | // Test that messages process 240 | _, err = prod.Enqueue("queue1", "any", q1cc.syncMsg().Args().Interface()) 241 | assert.NoError(t, err) 242 | // This channel read will timeout the test if messages don't process, which 243 | // means the manager didn't restart 244 | <-q1cc.syncCh 245 | q1cc.ackSyncCh <- true 246 | 247 | // Test that we're back in the global stats server 248 | assert.Contains(t, globalAPIServer.managers, mgr.uuid) 249 | 250 | mgr.Stop() 251 | wg.Wait() 252 | 253 | } 254 | 255 | func TestManager_inProgressMessages(t *testing.T) { 256 | namespace := "mgrruntest" 257 | opts := testOptionsWithNamespace(namespace) 258 | opts.PollInterval = time.Second 259 | mgr, err := newTestManager(opts, true) 260 | assert.NoError(t, err) 261 | prod, err := NewProducer(opts) 262 | assert.NoError(t, err) 263 | 264 | q1cc := NewCallCounter() 265 | q2cc := NewCallCounter() 266 | mgr.AddWorker("ipm_test_queue1", 1, q1cc.F, NopMiddleware) 267 | mgr.AddWorker("ipm_test_queue2", 2, q2cc.F, NopMiddleware) 268 | 269 | var wg sync.WaitGroup 270 | wg.Add(1) 271 | go func() { 272 | mgr.Run() 273 | wg.Done() 274 | }() 275 | 276 | // None 277 | ipm := mgr.inProgressMessages() 278 | assert.Len(t, ipm, 2) 279 | assert.Contains(t, ipm, "ipm_test_queue1") 280 | assert.Contains(t, ipm, "ipm_test_queue2") 281 | assert.Empty(t, ipm["ipm_test_queue1"]) 282 | assert.Empty(t, ipm["ipm_test_queue2"]) 283 | 284 | // One in Queue1 285 | _, err = prod.Enqueue("ipm_test_queue1", "any", q1cc.syncMsg().Args().Interface()) 286 | assert.NoError(t, err) 287 | <-q1cc.syncCh 288 | ipm = mgr.inProgressMessages() 289 | assert.Len(t, ipm, 2) 290 | assert.Contains(t, ipm, "ipm_test_queue1") 291 | assert.Contains(t, ipm, "ipm_test_queue2") 292 | assert.Len(t, ipm["ipm_test_queue1"], 1) 293 | assert.Empty(t, ipm["ipm_test_queue2"]) 294 | 295 | // One in Queue2 296 | _, err = prod.Enqueue("ipm_test_queue2", "any", q2cc.syncMsg().Args().Interface()) 297 | assert.NoError(t, err) 298 | <-q2cc.syncCh 299 | ipm = mgr.inProgressMessages() 300 | assert.Len(t, ipm, 2) 301 | assert.Contains(t, ipm, "ipm_test_queue1") 302 | assert.Contains(t, ipm, "ipm_test_queue2") 303 | assert.Len(t, ipm["ipm_test_queue1"], 1) 304 | assert.Len(t, ipm["ipm_test_queue2"], 1) 305 | 306 | // Another in Queue2 307 | _, err = prod.Enqueue("ipm_test_queue2", "any", q2cc.syncMsg().Args().Interface()) 308 | assert.NoError(t, err) 309 | <-q2cc.syncCh 310 | ipm = mgr.inProgressMessages() 311 | assert.Len(t, ipm, 2) 312 | assert.Contains(t, ipm, "ipm_test_queue1") 313 | assert.Contains(t, ipm, "ipm_test_queue2") 314 | assert.Len(t, ipm["ipm_test_queue1"], 1) 315 | assert.Len(t, ipm["ipm_test_queue2"], 2) 316 | 317 | // Release two from Queue2 318 | q2cc.ackSyncCh <- true 319 | q2cc.ackSyncCh <- true 320 | 321 | time.Sleep(2 * time.Second) 322 | ipm = mgr.inProgressMessages() 323 | assert.Len(t, ipm, 2) 324 | assert.Contains(t, ipm, "ipm_test_queue1") 325 | assert.Contains(t, ipm, "ipm_test_queue2") 326 | assert.Len(t, ipm["ipm_test_queue1"], 1) 327 | assert.Len(t, ipm["ipm_test_queue2"], 0) 328 | 329 | // Release last from Queue1 - should have one left in queue2 330 | q1cc.ackSyncCh <- true 331 | time.Sleep(2 * time.Second) 332 | ipm = mgr.inProgressMessages() 333 | assert.Len(t, ipm, 2) 334 | assert.Contains(t, ipm, "ipm_test_queue1") 335 | assert.Contains(t, ipm, "ipm_test_queue2") 336 | assert.Len(t, ipm["ipm_test_queue1"], 0) 337 | assert.Len(t, ipm["ipm_test_queue2"], 0) 338 | 339 | mgr.Stop() 340 | wg.Wait() 341 | } 342 | 343 | func TestManager_InactiveManagerNoMessageProcessing(t *testing.T) { 344 | namespace := "mgrruntest" 345 | opts := SetupDefaultTestOptionsWithHeartbeat(namespace, "1") 346 | mgr, err := newTestManager(opts, true) 347 | assert.NoError(t, err) 348 | mgr.active = false 349 | q1cc := NewCallCounter() 350 | mgr.AddWorker("queue1", 1, q1cc.F, NopMiddleware) 351 | 352 | var wg sync.WaitGroup 353 | wg.Add(1) 354 | go func() { 355 | mgr.Run() 356 | wg.Done() 357 | }() 358 | prod, err := NewProducer(opts) 359 | assert.NoError(t, err) 360 | _, err = prod.Enqueue("queue1", "any", q1cc.syncMsg().Args().Interface()) 361 | assert.NoError(t, err) 362 | time.Sleep(2 * time.Second) 363 | ipm := mgr.inProgressMessages() 364 | assert.Len(t, ipm, 1) 365 | assert.Contains(t, ipm, "queue1") 366 | // message does not get to inprogress queue since it is never picked up by the inactive manager 367 | assert.Len(t, ipm["queue1"], 0) 368 | mgr.Stop() 369 | wg.Wait() 370 | } 371 | 372 | func TestManager_Run_HeartbeatHandlesStaleInProgressMessages(t *testing.T) { 373 | namespace := "mgrruntest" 374 | opts1 := SetupDefaultTestOptionsWithHeartbeat(namespace, "1") 375 | mgr1, err := newTestManager(opts1, true) 376 | assert.NoError(t, err) 377 | prod1 := mgr1.Producer() 378 | 379 | mgr1qcc := NewCallCounter() 380 | mgr1.AddWorker("testqueue", 3, mgr1qcc.F, NopMiddleware) 381 | 382 | assertMgr1HeartbeatTimeoutDuration := mgr1.opts.Heartbeat.Interval * 3 383 | pollMgr1StartTime, err := mgr1.opts.store.GetTime(context.Background()) 384 | assert.NoError(t, err) 385 | assertMgr1Heartbeat := false 386 | mgr1.addAfterHeartbeatHooks(func(heartbeat *storage.Heartbeat, manager *Manager, staleMessageUpdates []*staleMessageUpdate) error { 387 | heartbeatTime := time.Unix(heartbeat.Beat, 0) 388 | if !assertMgr1Heartbeat && heartbeatTime.Sub(pollMgr1StartTime) > assertMgr1HeartbeatTimeoutDuration { 389 | assert.Fail(t, "mgr1 heartbeat timed out") 390 | } 391 | if len(heartbeat.WorkerHeartbeats) > 0 { 392 | assertMgr1Heartbeat = true 393 | } 394 | if len(staleMessageUpdates) > 0 { 395 | assert.Fail(t, "expiring in manager 1") 396 | } 397 | return nil 398 | }) 399 | 400 | var wg1 sync.WaitGroup 401 | wg1.Add(1) 402 | go func() { 403 | mgr1.Run() 404 | wg1.Done() 405 | }() 406 | 407 | // put 3 messages in queue1 and into in-progress queues 408 | _, err = prod1.Enqueue("testqueue", "any", mgr1qcc.syncMsg().Args().Interface()) 409 | assert.NoError(t, err) 410 | <-mgr1qcc.syncCh 411 | _, err = prod1.Enqueue("testqueue", "any", mgr1qcc.syncMsg().Args().Interface()) 412 | assert.NoError(t, err) 413 | <-mgr1qcc.syncCh 414 | _, err = prod1.Enqueue("testqueue", "any", mgr1qcc.syncMsg().Args().Interface()) 415 | assert.NoError(t, err) 416 | <-mgr1qcc.syncCh 417 | 418 | // release 2 of the messages in-progress queues 419 | mgr1qcc.ackSyncCh <- true 420 | mgr1qcc.ackSyncCh <- true 421 | 422 | // 1 left in in-progress Queue, wait for heartbeat and stop manager 423 | for !assertMgr1Heartbeat { 424 | time.Sleep(time.Millisecond) 425 | } 426 | mgr1.Stop() 427 | 428 | // we update processID to guarantee using different in-progress queues from mgr1 429 | opts2 := SetupDefaultTestOptionsWithHeartbeat(namespace, "2") 430 | mgr2, err := newTestManager(opts2, false) 431 | assert.NoError(t, err) 432 | mgr2qcc := NewCallCounter() 433 | // demonstrate implementation does not care for different concurrency levels by changing concurrency from 3 to 2 434 | mgr2.AddWorker("testqueue", 2, mgr2qcc.F, NopMiddleware) 435 | pollMgr2StartTime, err := mgr2.opts.store.GetTime(context.Background()) 436 | assert.NoError(t, err) 437 | assertMessageRequeued := make(chan bool) 438 | mgr2.addAfterHeartbeatHooks(func(heartbeat *storage.Heartbeat, manager *Manager, staleMessageUpdates []*staleMessageUpdate) error { 439 | heartbeatTime := time.Unix(heartbeat.Beat, 0) 440 | if heartbeatTime.Sub(pollMgr2StartTime) > mgr2.opts.Heartbeat.HeartbeatTTL*2 { 441 | assert.Fail(t, "mgr2 timed out polling for requeued stale task runner") 442 | assertMessageRequeued <- false 443 | } 444 | if len(staleMessageUpdates) > 0 { 445 | for _, updatedStaleMessage := range staleMessageUpdates { 446 | assert.Equal(t, "testqueue", updatedStaleMessage.Queue) 447 | assert.Contains(t, updatedStaleMessage.InprogressQueue, "testqueue") 448 | assert.Contains(t, updatedStaleMessage.InprogressQueue, "inprogress") 449 | if len(updatedStaleMessage.RequeuedMsgs) > 0 { 450 | // check if it has requeued messages, as heartbeat may have expired the other 2 task runners 451 | // without messages instead 452 | assert.Equal(t, 1, len(staleMessageUpdates[0].RequeuedMsgs)) 453 | <-mgr2qcc.syncCh 454 | ipm := mgr2.inProgressMessages() 455 | assert.Contains(t, ipm, "testqueue") 456 | requeuedMsg, err := NewMsg(staleMessageUpdates[0].RequeuedMsgs[0]) 457 | assert.NoError(t, err) 458 | // verify requeued message from manager 1 is now in progress of being processed by manager 2 459 | assert.Equal(t, requeuedMsg.Jid(), ipm["testqueue"][0].Jid()) 460 | assert.NoError(t, err) 461 | assertMessageRequeued <- true 462 | } 463 | } 464 | } 465 | return nil 466 | }) 467 | var wg2 sync.WaitGroup 468 | wg2.Add(1) 469 | go func() { 470 | mgr2.Run() 471 | wg2.Done() 472 | }() 473 | // process requeued message in manager2 474 | isRequeud := <-assertMessageRequeued 475 | assert.True(t, isRequeud) 476 | // testing complete as manager2 picked up the originally in-progress message from manager1 477 | // signal dummy acks for pending in-progress messages to satisfy waitgroups 478 | mgr1qcc.ackSyncCh <- true 479 | mgr2qcc.ackSyncCh <- true 480 | 481 | mgr2.Stop() 482 | wg1.Wait() 483 | wg2.Wait() 484 | } 485 | 486 | type testPrioritizedActiveManagerConfig struct { 487 | manager *Manager 488 | callCounter *CallCounter 489 | managerPriority int64 490 | waitGroup sync.WaitGroup 491 | assertHeartbeat chan bool 492 | assertedHeartbeat bool 493 | assertedActivate bool 494 | } 495 | 496 | func TestManager_Run_PrioritizedActiveManager(t *testing.T) { 497 | namespace := "mgrruntest" 498 | var managerConfigs []*testPrioritizedActiveManagerConfig 499 | totalManagers := 6 500 | totalActiveManagers := totalManagers / 2 501 | // initialize managers 502 | for i := 0; i < totalManagers; i++ { 503 | opts := SetupDefaultTestOptionsWithHeartbeat(namespace, fmt.Sprintf("process%d", i)) 504 | opts.ManagerStartInactive = true 505 | // half of the managers will be active based on priority 506 | opts.Heartbeat.PrioritizedManager = &PrioritizedManagerOptions{ 507 | TotalActiveManagers: totalActiveManagers, 508 | ManagerPriority: i, 509 | } 510 | flushDB := i == 0 511 | manager, err := newTestManager(opts, flushDB) 512 | assert.NoError(t, err) 513 | mgrqcc := NewCallCounter() 514 | manager.AddWorker("testqueue", 3, mgrqcc.F, NopMiddleware) 515 | 516 | managerConfig := &testPrioritizedActiveManagerConfig{ 517 | manager: manager, 518 | callCounter: mgrqcc, 519 | managerPriority: int64(i), 520 | assertHeartbeat: make(chan bool), 521 | assertedHeartbeat: false, 522 | } 523 | 524 | manager.addAfterHeartbeatHooks(func(heartbeat *storage.Heartbeat, manager *Manager, requeuedTaskRunnersStatus []*staleMessageUpdate) error { 525 | if !managerConfig.assertedHeartbeat { 526 | managerConfig.assertHeartbeat <- true 527 | } 528 | return nil 529 | }) 530 | 531 | managerConfig.waitGroup.Add(1) 532 | go func() { 533 | managerConfig.manager.Run() 534 | managerConfig.waitGroup.Done() 535 | }() 536 | managerConfigs = append(managerConfigs, managerConfig) 537 | } 538 | 539 | // synchronize all managers have had a heartbeat 540 | for i := 0; i < totalManagers; i++ { 541 | managerConfigs[i].assertedHeartbeat = <-managerConfigs[i].assertHeartbeat 542 | assert.True(t, managerConfigs[i].assertedHeartbeat) 543 | } 544 | 545 | time.Sleep(managerConfigs[0].manager.Opts().Heartbeat.Interval * 2) 546 | 547 | // verify managers 0 to 2 are inactive and 1 to 5 are active 548 | for i := 0; i < totalManagers; i++ { 549 | if i < totalManagers/2 { 550 | assert.False(t, managerConfigs[i].manager.IsActive()) 551 | } else { 552 | // higher priority managers are activated 553 | assert.True(t, managerConfigs[i].manager.IsActive()) 554 | } 555 | } 556 | 557 | // stop all the active highest priority managers 558 | for i := totalManagers / 2; i < totalManagers; i++ { 559 | managerConfigs[i].manager.Stop() 560 | managerConfigs[i].waitGroup.Wait() 561 | } 562 | 563 | time.Sleep(managerConfigs[0].manager.Opts().Heartbeat.HeartbeatTTL * 2) 564 | 565 | // the lowest priority managers will activate 566 | for i := 0; i < totalManagers/2; i++ { 567 | assert.True(t, managerConfigs[i].manager.IsActive()) 568 | } 569 | } 570 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 3 | cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= 4 | cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= 5 | cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= 6 | cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= 7 | cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= 8 | cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= 9 | cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= 10 | cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= 11 | cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= 12 | cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= 13 | dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= 14 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 15 | github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= 16 | github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= 17 | github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= 18 | github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= 19 | github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= 20 | github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= 21 | github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= 22 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 23 | github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= 24 | github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= 25 | github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y= 26 | github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= 27 | github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= 28 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= 29 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= 30 | github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 31 | github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= 32 | github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= 33 | github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= 34 | github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 35 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 36 | github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= 37 | github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= 38 | github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= 39 | github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= 40 | github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= 41 | github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= 42 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 43 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 44 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 45 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 46 | github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= 47 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 48 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 49 | github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= 50 | github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 51 | github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= 52 | github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= 53 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 54 | github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= 55 | github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= 56 | github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= 57 | github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= 58 | github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= 59 | github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= 60 | github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= 61 | github.com/go-redis/redis/v8 v8.4.4 h1:fGqgxCTR1sydaKI00oQf3OmkU/DIe/I/fYXvGklCIuc= 62 | github.com/go-redis/redis/v8 v8.4.4/go.mod h1:nA0bQuF0i5JFx4Ta9RZxGKXFrQ8cRWntra97f0196iY= 63 | github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= 64 | github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= 65 | github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= 66 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 67 | github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= 68 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 69 | github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 70 | github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= 71 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 72 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 73 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 74 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 75 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 76 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 77 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 78 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 79 | github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= 80 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 81 | github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= 82 | github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 83 | github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= 84 | github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= 85 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 86 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 87 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 88 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 89 | github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 90 | github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= 91 | github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 92 | github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= 93 | github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= 94 | github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= 95 | github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= 96 | github.com/google/uuid v1.1.4 h1:0ecGp3skIrHWPNGPJDaBIghfA6Sp7Ruo2Io8eLKzWm0= 97 | github.com/google/uuid v1.1.4/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 98 | github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= 99 | github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= 100 | github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= 101 | github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= 102 | github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= 103 | github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= 104 | github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= 105 | github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= 106 | github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= 107 | github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 108 | github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= 109 | github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= 110 | github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= 111 | github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= 112 | github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= 113 | github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= 114 | github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= 115 | github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 116 | github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= 117 | github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= 118 | github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 119 | github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 120 | github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= 121 | github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= 122 | github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= 123 | github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= 124 | github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= 125 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 126 | github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= 127 | github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= 128 | github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= 129 | github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= 130 | github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= 131 | github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= 132 | github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= 133 | github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= 134 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 135 | github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 136 | github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= 137 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 138 | github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= 139 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 140 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 141 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 142 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 143 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 144 | github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= 145 | github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= 146 | github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= 147 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 148 | github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= 149 | github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= 150 | github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= 151 | github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= 152 | github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= 153 | github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= 154 | github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= 155 | github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= 156 | github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= 157 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 158 | github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 159 | github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= 160 | github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= 161 | github.com/nxadm/tail v1.4.6 h1:11TGpSHY7Esh/i/qnq02Jo5oVrI1Gue8Slbq0ujPZFQ= 162 | github.com/nxadm/tail v1.4.6/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= 163 | github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= 164 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 165 | github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= 166 | github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= 167 | github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= 168 | github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= 169 | github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= 170 | github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= 171 | github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= 172 | github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= 173 | github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= 174 | github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 175 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 176 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 177 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 178 | github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= 179 | github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= 180 | github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= 181 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 182 | github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 183 | github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 184 | github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= 185 | github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= 186 | github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 187 | github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= 188 | github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= 189 | github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= 190 | github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= 191 | github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 192 | github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= 193 | github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= 194 | github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= 195 | github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= 196 | github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= 197 | github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= 198 | github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= 199 | github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 200 | github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= 201 | github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= 202 | github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= 203 | github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= 204 | github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= 205 | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 206 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 207 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 208 | github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= 209 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 210 | github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 211 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 212 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 213 | github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= 214 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 215 | github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= 216 | github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= 217 | github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= 218 | go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= 219 | go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= 220 | go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= 221 | go.opentelemetry.io/otel v0.15.0 h1:CZFy2lPhxd4HlhZnYK8gRyDotksO3Ip9rBweY1vVYJw= 222 | go.opentelemetry.io/otel v0.15.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA= 223 | go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= 224 | go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= 225 | go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= 226 | golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 227 | golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 228 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 229 | golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 230 | golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 231 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 232 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 233 | golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 234 | golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= 235 | golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= 236 | golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= 237 | golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= 238 | golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= 239 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 240 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= 241 | golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 242 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 243 | golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 244 | golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 245 | golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 246 | golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= 247 | golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= 248 | golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= 249 | golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= 250 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 251 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 252 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 253 | golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 254 | golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 255 | golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 256 | golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 257 | golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 258 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 259 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 260 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 261 | golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 262 | golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 263 | golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= 264 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 265 | golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= 266 | golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 267 | golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw= 268 | golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 269 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 270 | golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 271 | golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 272 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 273 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 274 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 275 | golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 276 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 277 | golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 278 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 279 | golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 280 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 281 | golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 282 | golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 283 | golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 284 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 285 | golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 286 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 287 | golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 288 | golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 289 | golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 290 | golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 291 | golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 292 | golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 293 | golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 294 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 295 | golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 296 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 297 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 298 | golang.org/x/sys v0.0.0-20210105210732-16f7687f5001 h1:/dSxr6gT0FNI1MO5WLJo8mTmItROeOKTkDn+7OwWBos= 299 | golang.org/x/sys v0.0.0-20210105210732-16f7687f5001/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 300 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 301 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 302 | golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 303 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 304 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 305 | golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= 306 | golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 307 | golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 308 | golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 309 | golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 310 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 311 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 312 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= 313 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 314 | golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 315 | golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 316 | golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 317 | golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 318 | golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 319 | golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 320 | golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= 321 | golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= 322 | golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= 323 | golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 324 | golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 325 | golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 326 | golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 327 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 328 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 329 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= 330 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 331 | google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= 332 | google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= 333 | google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= 334 | google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= 335 | google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= 336 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 337 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 338 | google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 339 | google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= 340 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 341 | google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 342 | google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 343 | google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 344 | google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 345 | google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= 346 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= 347 | google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= 348 | google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= 349 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= 350 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 351 | google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= 352 | google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= 353 | google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= 354 | google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= 355 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 356 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 357 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 358 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 359 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 360 | google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 361 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 362 | google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 363 | google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= 364 | google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= 365 | gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= 366 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 367 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 368 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 369 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 370 | gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= 371 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 372 | gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= 373 | gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= 374 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 375 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 376 | gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= 377 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 378 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 379 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 380 | gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 381 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 382 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 383 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 384 | gopkg.in/yaml.v3 v3.0.0-20210105161348-2e78108cf5f8 h1:tH9C0MON9YI3/KuD+u5+tQrQQ8px0MrcJ/avzeALw7o= 385 | gopkg.in/yaml.v3 v3.0.0-20210105161348-2e78108cf5f8/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 386 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 387 | honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 388 | honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 389 | honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 390 | honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= 391 | rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= 392 | --------------------------------------------------------------------------------