├── doc
├── job-flow.png
├── delay-fire.png
├── fire-forget.png
├── fire-wait.png
├── images
│ └── logo.png
├── lmstfy-internal.png
├── throttler.cn.md
├── Administration.md
├── throttler.en.md
├── administration.cn.md
├── administration.en.md
└── Usage-Patterns.md
├── scripts
├── teardown.sh
├── run-test.sh
├── setup.sh
├── redis
│ └── docker-compose.yml
└── token-cli
├── version
└── version.go
├── .gitignore
├── engine
├── redis
│ ├── constant.go
│ ├── utils.go
│ ├── info_test.go
│ ├── setup_test.go
│ ├── hooks
│ │ ├── init.go
│ │ └── metrics.go
│ ├── setup.go
│ ├── pool_test.go
│ ├── info.go
│ ├── meta.go
│ ├── pool.go
│ ├── timer_test.go
│ ├── deadletter_test.go
│ ├── timer.go
│ ├── queue_test.go
│ ├── metrics.go
│ ├── deadletter.go
│ ├── engine_test.go
│ ├── queue.go
│ └── engine.go
├── errors.go
├── migration
│ ├── README.md
│ ├── setup.go
│ ├── setup_test.go
│ ├── engine.go
│ └── engine_test.go
├── engine.go
├── pool.go
└── job.go
├── .github
├── ISSUE_TEMPLATE
│ ├── config.yml
│ ├── enhancement.yml
│ └── bug-report.yml
└── workflows
│ ├── lmstfy.yaml
│ ├── release.yaml
│ └── codeql.yml
├── client
├── cmd
│ └── lmstfy
│ │ ├── README.md
│ │ └── main.go
├── errors.go
├── README.md
└── setup_test.go
├── Dockerfile
├── helper
├── setup_test.go
├── redis_test.go
└── redis.go
├── config
├── config_test.go
├── demo-conf.toml
├── docker-image-conf.toml
├── preset.go
└── config.go
├── uuid
├── uuid_test.go
└── uuid.go
├── Changelog
├── Makefile
├── server
├── handlers
│ ├── setup.go
│ ├── throttler.go
│ ├── metrics.go
│ ├── setup_test.go
│ ├── middleware.go
│ ├── throttler_test.go
│ └── admin_test.go
├── middleware
│ └── middleware.go
├── route.go
└── main.go
├── LICENSE
├── log
├── backtrack_test.go
├── backtrack.go
└── utils.go
├── go.mod
├── auth
├── token.go
└── token_test.go
└── README.md
/doc/job-flow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bitleak/lmstfy/HEAD/doc/job-flow.png
--------------------------------------------------------------------------------
/doc/delay-fire.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bitleak/lmstfy/HEAD/doc/delay-fire.png
--------------------------------------------------------------------------------
/doc/fire-forget.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bitleak/lmstfy/HEAD/doc/fire-forget.png
--------------------------------------------------------------------------------
/doc/fire-wait.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bitleak/lmstfy/HEAD/doc/fire-wait.png
--------------------------------------------------------------------------------
/doc/images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bitleak/lmstfy/HEAD/doc/images/logo.png
--------------------------------------------------------------------------------
/doc/lmstfy-internal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bitleak/lmstfy/HEAD/doc/lmstfy-internal.png
--------------------------------------------------------------------------------
/scripts/teardown.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | cd scripts/redis && docker-compose -p lmstfy-test down -v --remove-orphans && cd ../..
4 |
--------------------------------------------------------------------------------
/version/version.go:
--------------------------------------------------------------------------------
1 | package version
2 |
3 | var (
4 | Version = "unknown"
5 | BuildCommit = "unknown"
6 | BuildDate = "unknown"
7 | )
8 |
--------------------------------------------------------------------------------
/scripts/run-test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e -x
3 | go test $(go list ./... | grep -v client) -race -v -covermode=atomic -coverprofile=coverage.out -p 1
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.exe
2 | *.exe~
3 | *.dll
4 | *.so
5 | *.dylib
6 | *.test
7 | *.out
8 | .idea/
9 | lmstfy-server
10 | _build/
11 | .DS_Store
12 | lint.log
13 | *.swp
14 | *.swo
15 | running.pid
16 |
--------------------------------------------------------------------------------
/engine/redis/constant.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | const (
4 | PoolPrefix = "j"
5 | QueuePrefix = "q"
6 | DeadLetterPrefix = "d"
7 | MetaPrefix = "m"
8 |
9 | BatchSize = int64(100)
10 | )
11 |
--------------------------------------------------------------------------------
/engine/errors.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import "errors"
4 |
5 | var (
6 | ErrNotFound = errors.New("job not found")
7 | ErrEmptyQueue = errors.New("the queue is empty")
8 | ErrWrongQueue = errors.New("wrong queue for the job")
9 | )
10 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: Ask a question or get support
4 | url: https://github.com/bitleak/lmstfy/discussions/categories/q-a
5 | about: Ask a question or request support for using LMSTFY
6 |
--------------------------------------------------------------------------------
/scripts/setup.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if [[ -n $(docker ps -q -f "name=lmstfy-test") ]];then
4 | cd scripts/redis && docker-compose -p lmstfy-test down -v --remove-orphans && cd ../..
5 | fi
6 |
7 | cd scripts/redis && docker-compose -p lmstfy-test up -d --remove-orphans && cd ../..
--------------------------------------------------------------------------------
/engine/redis/utils.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import "strings"
4 |
5 | func join(args ...string) string {
6 | return strings.Join(args, "/")
7 | }
8 |
9 | func splits(n int, s string) []string {
10 | return strings.SplitN(s, "/", n)
11 | }
12 |
13 | func isLuaScriptGone(err error) bool {
14 | return strings.HasPrefix(err.Error(), "NOSCRIPT")
15 | }
16 |
--------------------------------------------------------------------------------
/client/cmd/lmstfy/README.md:
--------------------------------------------------------------------------------
1 | # Lmstfy CLI
2 |
3 | ## Install
4 |
5 | ```
6 | go get -u github.com/bitleak/lmstfy/client/cmd/lmstfy
7 | ```
8 |
9 | ## Usage
10 |
11 | setup your config file `~/.lmstfy.toml`:
12 | ```
13 | host = 172.16.200.10
14 | port = 9999
15 | namespace = FIXME
16 | token = FIXME
17 | ```
18 |
19 | run to get start:
20 | ```
21 | $ lmstfy help
22 | ```
23 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.17 AS builder
2 |
3 | WORKDIR /lmstfy
4 |
5 | ADD ./ /lmstfy
6 | RUN apt update -y && apt install -y netcat
7 | RUN cd /lmstfy && make
8 |
9 | FROM ubuntu:20.04
10 | COPY --from=builder /lmstfy /lmstfy
11 | RUN apt update -y && apt install -y netcat
12 | EXPOSE 7777:7777
13 | ENTRYPOINT ["/lmstfy/_build/lmstfy-server", "-c", "/lmstfy/config/docker-image-conf.toml"]
14 |
15 |
--------------------------------------------------------------------------------
/engine/redis/info_test.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import "testing"
4 |
5 | func TestGetRedisInfo(t *testing.T) {
6 | R.Conn.Set(dummyCtx, "info", 1, 0)
7 | info := GetRedisInfo(R)
8 | if info.NKeys < 1 {
9 | t.Fatalf("Expected NKeys is at least 1")
10 | }
11 | if info.MemUsed <= 0 {
12 | t.Fatalf("Expected MemUsed is non-zero")
13 | }
14 | if info.NClients < 1 {
15 | t.Fatalf("Expected NClients is at least 1")
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/helper/setup_test.go:
--------------------------------------------------------------------------------
1 | package helper
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "testing"
7 |
8 | "github.com/bitleak/lmstfy/config"
9 | )
10 |
11 | var (
12 | CONF *config.Config
13 | )
14 |
15 | func TestMain(m *testing.M) {
16 | presetConfig, err := config.CreatePresetForTest("")
17 | if err != nil {
18 | panic(fmt.Sprintf("CreatePresetForTest failed with error: %s", err))
19 | }
20 | defer presetConfig.Destroy()
21 | CONF = presetConfig.Config
22 | ret := m.Run()
23 | os.Exit(ret)
24 | }
25 |
--------------------------------------------------------------------------------
/engine/migration/README.md:
--------------------------------------------------------------------------------
1 | ## Migration Engine
2 | This engine is a proxy-like engine for the `redis` engine.
3 |
4 | It's main purpose is to migrate a redis engine to a larger new redis engine without
5 | breaking existing clients. The migration engine will send all PUBLISH operation to
6 | the new redis engine, and CONSUME from both engines.
7 |
8 | The authorization token used for the migration is the same as the old pool. After
9 | migration finished, we can update the config, remove the old pool and rename the new
10 | pool with the old name. The user does NOT needed to do anything.
11 |
--------------------------------------------------------------------------------
/config/config_test.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestRedisConfig_Validate(t *testing.T) {
8 | conf := &RedisConf{}
9 | if err := conf.validate(); err == nil {
10 | t.Fatal("validate addr error was expected, but got nil")
11 | }
12 | conf.Addr = "abc"
13 | if err := conf.validate(); err != nil {
14 | t.Fatalf("no error was expected, but got %v", err)
15 | }
16 | conf.DB = -1
17 | if err := conf.validate(); err == nil {
18 | t.Fatalf("validate db error was expected, but got nil")
19 | }
20 | conf.DB = 0
21 | conf.MasterName = "test"
22 | if err := conf.validate(); err != nil {
23 | t.Fatalf("no error was expected, but got %v", err)
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/client/errors.go:
--------------------------------------------------------------------------------
1 | package client
2 |
3 | import "fmt"
4 |
5 | // API error type. implements the Stringer interface.
6 | type ErrType int
7 |
8 | const (
9 | RequestErr ErrType = iota + 1
10 | ResponseErr
11 | )
12 |
13 | func (t ErrType) String() string {
14 | switch t {
15 | case RequestErr:
16 | return "req"
17 | case ResponseErr:
18 | return "resp"
19 | }
20 | return ""
21 | }
22 |
23 | // API error. implements the error interface.
24 | type APIError struct {
25 | Type ErrType
26 | Reason string
27 | JobID string
28 | RequestID string
29 | }
30 |
31 | func (e *APIError) Error() string {
32 | return fmt.Sprintf("t:%s; m:%s; j:%s; r:%s", e.Type, e.Reason, e.JobID, e.RequestID)
33 | }
34 |
--------------------------------------------------------------------------------
/uuid/uuid_test.go:
--------------------------------------------------------------------------------
1 | package uuid
2 |
3 | import (
4 | "testing"
5 | "time"
6 |
7 | "github.com/oklog/ulid"
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | func TestJobID(t *testing.T) {
12 | jobID := GenJobIDWithVersion(JobIDV1, 10)
13 |
14 | id, version := extractJobID(jobID)
15 | require.Equal(t, 1, version)
16 | require.Equal(t, ulid.EncodedSize, len(id))
17 |
18 | delay, err := ExtractDelaySecondFromUniqueID(jobID)
19 | require.NoError(t, err)
20 | require.Equal(t, uint32(10), delay)
21 |
22 | // Test elapsed time
23 | time.Sleep(10 * time.Millisecond)
24 | delayMilliseconds, err := ElapsedMilliSecondFromUniqueID(jobID)
25 | require.NoError(t, err)
26 | require.InDelta(t, 10, delayMilliseconds, 2)
27 | }
28 |
--------------------------------------------------------------------------------
/Changelog:
--------------------------------------------------------------------------------
1 | VERSION 1.0.10
2 | * FIX the wrong backtrace in logrus hook
3 |
4 | VERSION 1.0.8
5 | * ADD: basic authorization for admin api
6 | * ADD: parse the http scheme from the host in lmstfy client
7 | VERSION 1.0.6
8 | * MOD: supports query param freeze_tries in consume API
9 | * MOD: make freeTries as consume parameter instead of the function
10 |
11 | VERSION 1.0.5
12 | * FIX: allow to use consume multi queues with timeout = 0
13 | * MOD: allow using user defined http client to setup the lmstfy clien
14 |
15 | VERSION 1.0.3
16 | * add publish bulk api testcase
17 | * print some logs while initialize meta and setup error
18 | * remove build install flag
19 |
20 | VERSION 1.0.2
21 | * add token consume/produce rate limit
22 |
23 | VERSION 1.0.0
24 | * Initial public version
25 |
26 |
--------------------------------------------------------------------------------
/engine/migration/setup.go:
--------------------------------------------------------------------------------
1 | package migration
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/bitleak/lmstfy/config"
7 | "github.com/bitleak/lmstfy/engine"
8 | "github.com/sirupsen/logrus"
9 | )
10 |
11 | var logger *logrus.Logger
12 |
13 | func SetLogger(l *logrus.Logger) {
14 | logger = l
15 | }
16 |
17 | func Setup(conf *config.Config) error {
18 | for redisPool, poolConf := range conf.Pool {
19 | if poolConf.MigrateTo != "" {
20 | oldEngine := engine.GetEngineByKind(engine.KindRedis, redisPool)
21 | newEngine := engine.GetEngineByKind(engine.KindRedis, poolConf.MigrateTo)
22 | if newEngine == nil {
23 | return fmt.Errorf("invalid pool [%s] to migrate to", poolConf.MigrateTo)
24 | }
25 | engine.Register(engine.KindMigration, redisPool, NewEngine(oldEngine, newEngine))
26 | }
27 | }
28 | return nil
29 | }
30 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | PROGRAM=lmstfy-server
2 |
3 | PKG_FILES=`go list ./... | sed -e 's=github.com/bitleak/lmstfy/=./='`
4 |
5 | CCCOLOR="\033[37;1m"
6 | MAKECOLOR="\033[32;1m"
7 | ENDCOLOR="\033[0m"
8 |
9 | all: $(PROGRAM)
10 |
11 | .PHONY: all
12 |
13 | $(PROGRAM):
14 | @bash build.sh
15 | @echo ""
16 | @printf $(MAKECOLOR)"Hint: It's a good idea to run 'make test' ;)"$(ENDCOLOR)
17 | @echo ""
18 |
19 | setup:
20 | @bash scripts/setup.sh
21 |
22 | teardown:
23 | @bash scripts/teardown.sh
24 |
25 | test:
26 | @sh scripts/run-test.sh
27 |
28 | lint:
29 | @rm -rf lint.log
30 | @printf $(CCCOLOR)"Checking format...\n"$(ENDCOLOR)
31 | @go list ./... | sed -e 's=github.com/bitleak/lmstfy/=./=' | xargs -n 1 gofmt -d -s 2>&1 | tee -a lint.log
32 | @[ ! -s lint.log ]
33 | @printf $(CCCOLOR)"Checking vet...\n"$(ENDCOLOR)
34 | @go list ./... | sed -e 's=github.com/bitleak/lmstfy/=./=' | xargs -n 1 go vet
35 |
--------------------------------------------------------------------------------
/engine/engine.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import (
4 | "io"
5 | )
6 |
7 | type Engine interface {
8 | Publish(job Job) (jobID string, err error)
9 | Consume(namespace string, queues []string, ttrSecond, timeoutSecond uint32) (job Job, err error)
10 | BatchConsume(namespace string, queues []string, count, ttrSecond, timeoutSecond uint32) (jobs []Job, err error)
11 | Delete(namespace, queue, jobID string) error
12 | Peek(namespace, queue, optionalJobID string) (job Job, err error)
13 | Size(namespace, queue string) (size int64, err error)
14 | Destroy(namespace, queue string) (count int64, err error)
15 |
16 | // Dead letter
17 | PeekDeadLetter(namespace, queue string) (size int64, jobID string, err error)
18 | DeleteDeadLetter(namespace, queue string, limit int64) (count int64, err error)
19 | RespawnDeadLetter(namespace, queue string, limit, ttlSecond int64) (count int64, err error)
20 | SizeOfDeadLetter(namespace, queue string) (size int64, err error)
21 |
22 | Shutdown()
23 |
24 | DumpInfo(output io.Writer) error
25 | }
26 |
--------------------------------------------------------------------------------
/helper/redis_test.go:
--------------------------------------------------------------------------------
1 | package helper
2 |
3 | import (
4 | "context"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | "github.com/stretchr/testify/require"
9 | )
10 |
11 | func TestValidateRedisConfig(t *testing.T) {
12 | ctx := context.Background()
13 | defaultPool := CONF.Pool["default"]
14 | redisCli := NewRedisClient(&defaultPool, nil)
15 | _, err := redisCli.ConfigSet(ctx, "appendonly", "no").Result()
16 | require.Nil(t, err)
17 | assert.NotNil(t, ValidateRedisConfig(ctx, &defaultPool))
18 | _, err = redisCli.ConfigSet(ctx, "appendonly", "yes").Result()
19 | require.Nil(t, err)
20 | _, err = redisCli.ConfigSet(ctx, "maxmemory-policy", "allkeys-lru").Result()
21 | require.Nil(t, err)
22 | assert.NotNil(t, ValidateRedisConfig(ctx, &defaultPool))
23 | _, err = redisCli.ConfigSet(ctx, "maxmemory-policy", "noeviction").Result()
24 | require.NoError(t, ValidateRedisConfig(ctx, &defaultPool))
25 | for _, poolConf := range CONF.Pool {
26 | assert.NoError(t, ValidateRedisConfig(ctx, &poolConf))
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/doc/throttler.cn.md:
--------------------------------------------------------------------------------
1 | ## Throttler API
2 |
3 | 限流目前是针对 token 级别来限制消费/写入 QPS 而不是消息数,如果希望限制消息数,那么就不要使用批量消费 API 即可,当消费/写入达到限制频率时接口会返回 `429` 的状态码。
4 |
5 | > 注意:为了性能考虑,限制阀值目前是异步每 10s 更新一次,增加或者删除不会需要等待异步更新才会生效
6 |
7 | ### 创建限速器
8 |
9 | ```
10 | POST /token/:namespace/:token/limit
11 | ```
12 |
13 | #### Request Body
14 |
15 | ```
16 | {
17 | "read": 100,
18 | "write": 200,
19 | "interval": 10
20 | }
21 | ```
22 |
23 | `interval` 的单位是 秒,`read/write` 的单位是次数,上面的意思是这个 token 在 10s 之内最多可以消费 100 次以及写入 200 次。
24 |
25 | ### 查看限制器
26 |
27 | ```
28 | GET /token/:namespace/:token/limit
29 | ```
30 |
31 | #### Request Query
32 |
33 | no parameter
34 |
35 | ### 设置限制器
36 |
37 | ```
38 | PUT /token/:namespace/:token/limit
39 | ```
40 | #### Request Body
41 |
42 | ```
43 | {
44 | "read": 200,
45 | "write": 400,
46 | "interval": 10
47 | }
48 | ```
49 |
50 | ### 删除限制器
51 |
52 | ```
53 | DELETE /token/:namespace/:token/limit
54 | ```
55 |
56 | #### Request Query
57 |
58 | no parameter
59 |
60 | ### 罗列限制器
61 | ```
62 | GET /limits
63 | ```
64 |
65 | #### Request Query
66 |
67 | no parameter
--------------------------------------------------------------------------------
/server/handlers/setup.go:
--------------------------------------------------------------------------------
1 | package handlers
2 |
3 | import (
4 | "strconv"
5 | "sync"
6 |
7 | "github.com/bitleak/lmstfy/config"
8 | "github.com/gin-gonic/gin"
9 | "github.com/sirupsen/logrus"
10 | )
11 |
12 | var setupOnce sync.Once
13 | var _logger *logrus.Logger
14 |
15 | var (
16 | DefaultTTL string
17 | DefaultDelay string
18 | DefaultTries string
19 | DefaultTTR string
20 | DefaultTimeout string
21 | )
22 |
23 | func Setup(l *logrus.Logger) {
24 | setupOnce.Do(func() {
25 | _logger = l
26 | setupMetrics()
27 | })
28 | }
29 |
30 | func GetHTTPLogger(c *gin.Context) *logrus.Entry {
31 | reqID := c.GetString("req_id")
32 | if reqID == "" {
33 | return logrus.NewEntry(_logger)
34 | }
35 | return _logger.WithField("req_id", reqID)
36 | }
37 |
38 | func SetupParamDefaults(conf *config.Config) {
39 | DefaultTTL = strconv.Itoa(conf.TTLSecond)
40 | DefaultDelay = strconv.Itoa(conf.DelaySecond)
41 | DefaultTries = strconv.Itoa(conf.TriesNum)
42 | DefaultTTR = strconv.Itoa(conf.TTRSecond)
43 | DefaultTimeout = strconv.Itoa(conf.TimeoutSecond)
44 | }
45 |
--------------------------------------------------------------------------------
/doc/Administration.md:
--------------------------------------------------------------------------------
1 | ## Admin API
2 |
3 |
4 | ### 创建 `token`
5 |
6 | ```
7 | POST /token/:namespace
8 | ```
9 |
10 | #### Request Query
11 |
12 | - description: 创建 token 的用途描述
13 | - pool: optional, 默认 "default"
14 |
15 |
16 | ### 删除 `token`
17 |
18 | ```
19 | DELETE /token/:namespace/:token
20 | ```
21 |
22 | #### Request Query
23 | - pool: optional, 默认 "default"
24 |
25 |
26 | ### 列出所有 `token`
27 |
28 | ```
29 | GET /token/:namespace
30 | ```
31 |
32 | #### Request Query
33 | - pool: optional, 默认 "default"
34 |
35 |
36 | ## 迁移 redis
37 |
38 | 对于假设旧的 pool (default) 配置如下:
39 |
40 | ```
41 | [Pool]
42 | [Pool.default]
43 | Addr = "localhost:6379"
44 | ```
45 |
46 | 迁移到新的 redis 只需要更改配置为:
47 |
48 | ```
49 | [Pool]
50 | [Pool.default]
51 | Addr = "localhost:6379"
52 | MigrateTo = "migrate"
53 |
54 | [Pool.migrate]
55 | Addr = "localhost:6389"
56 | ```
57 |
58 | 用户的 token 不需要改变, 所有写操作到会写到新的 pool; 所有读操作会优先读取旧的 pool 的内容.
59 |
60 | 等到旧的 pool 的 redis key 数量和队列 size 不在变化了. 就可以下掉, 更新配置如下:
61 |
62 | ```
63 | [Pool]
64 | [Pool.default]
65 | Addr = "localhost:6389" # 数据迁移结束后, 直接使用新的 redis 即可.
66 | ```
67 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2019 Meitu Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/config/demo-conf.toml:
--------------------------------------------------------------------------------
1 | Host = "0.0.0.0"
2 | Port = 7777
3 | AdminHost = "127.0.0.1" # optional, default to localhost
4 | AdminPort = 7778
5 | #LogDir = "/var/log/lmstfy"
6 | LogLevel = "info"
7 | #LogFormat = "text" # Use LogFormat="json" if wants to print the log with json format
8 | EnableAccessLog = true
9 |
10 | # default params
11 | #TTLSecond = 24 * 60 * 60 // 1 day
12 | #DelaySecond = 0
13 | #TriesNum = 1
14 | #TTRSecond = 2 * 60 // 2 minutes
15 | #TimeoutSecond = 0 // means non-blocking
16 |
17 | # basic auth accounts for the admin api
18 | [Accounts]
19 | test_user = "change.me"
20 |
21 | [AdminRedis] # redis used to store admin data, eg. tokens
22 | Addr = "localhost:6379"
23 | Password = "foobared"
24 |
25 | [Pool]
26 | [Pool.default]
27 | Addr = "localhost:6379"
28 | Password = "foobared"
29 | # DB = 0
30 | #MigrateTo = "migrate" # When migration is enabled, all PUBLISH will go to `migrate` pool. and `default` will be drained
31 | #[Pool.migrate]
32 | #Addr = "localhost:6389"
33 |
34 | [Pool.mysentinel]
35 | Addr = "localhost:26379,localhost:26380,localhost:26381"
36 | MasterName = "mymaster"
37 | Password = "foobared"
38 | SentinelPassword = "foobared1"
39 |
--------------------------------------------------------------------------------
/config/docker-image-conf.toml:
--------------------------------------------------------------------------------
1 | Host = "0.0.0.0"
2 | Port = 7777
3 | AdminHost = "0.0.0.0" # optional, default to localhost
4 | AdminPort = 7778
5 | #LogDir = "/var/log/lmstfy"
6 | LogLevel = "info"
7 | #LogFormat = "text" # Use LogFormat="json" if wants to print the log with json format
8 | EnableAccessLog = true
9 |
10 | # default params
11 | #TTLSecond = 24 * 60 * 60 // 1 day
12 | #DelaySecond = 0
13 | #TriesNum = 1
14 | #TTRSecond = 2 * 60 // 2 minutes
15 | #TimeoutSecond = 0 // means non-blocking
16 |
17 | # basic auth accounts for the admin api
18 | [Accounts]
19 | #test_user = "change.me"
20 |
21 | [AdminRedis] # redis used to store admin data, eg. tokens
22 | Addr = "redis:6379"
23 | # Password = foobared
24 |
25 | [Pool]
26 | [Pool.default]
27 | Addr = "redis:6379"
28 | # Password = foobared
29 | # DB = 0
30 | #MigrateTo = "migrate" # When migration is enabled, all PUBLISH will go to `migrate` pool. and `default` will be drained
31 | #[Pool.migrate]
32 | #Addr = "localhost:6389"
33 |
34 | #[Pool.mysentinel]
35 | # Addr = "localhost:16379,localhost:6380,localhost:6381"
36 | # MasterName = "mymaster"
37 | # Password = foobared
38 | # SentinelPassword = "foobared1"
39 |
--------------------------------------------------------------------------------
/log/backtrack_test.go:
--------------------------------------------------------------------------------
1 | package log
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "strings"
7 | "testing"
8 |
9 | "github.com/sirupsen/logrus"
10 | )
11 |
12 | func TestFire(t *testing.T) {
13 | var buf bytes.Buffer
14 | logger := logrus.New()
15 | logger.SetFormatter(&logrus.JSONFormatter{})
16 | logger.SetOutput(&buf)
17 | logger.Hooks.Add(NewBackTrackHook(logrus.DebugLevel))
18 | logger.SetLevel(logrus.DebugLevel)
19 | logger.Error("test backtrace")
20 | entry := make(map[string]string)
21 | json.Unmarshal(buf.Bytes(), &entry)
22 | if !strings.HasSuffix(entry["bt_func"], "TestFire") {
23 | t.Fatal("bt_func suffix should be TestFunc was expected")
24 | }
25 | if !strings.Contains(entry["bt_line"], "backtrack_test.go") {
26 | t.Fatal("bt_func suffix contains backtrack_test.go was expected")
27 | }
28 | }
29 |
30 | func TestBackTrackFormatter(t *testing.T) {
31 | logger := logrus.New()
32 | logger.Hooks.Add(NewBackTrackHook(logrus.DebugLevel))
33 | logger.SetLevel(logrus.DebugLevel)
34 |
35 | logger.Debug("debug backtrack")
36 | logger.Error("error backtrack")
37 |
38 | entry := logger.WithField("ctx", "test")
39 | entry.Warn("debug backtrack")
40 | }
41 |
--------------------------------------------------------------------------------
/engine/redis/setup_test.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "testing"
7 |
8 | "github.com/sirupsen/logrus"
9 |
10 | "github.com/bitleak/lmstfy/config"
11 | "github.com/bitleak/lmstfy/helper"
12 | )
13 |
14 | var (
15 | R *RedisInstance
16 | )
17 |
18 | func setup(CONF *config.Config) {
19 | logger = logrus.New()
20 | level, _ := logrus.ParseLevel(CONF.LogLevel)
21 | logger.SetLevel(level)
22 |
23 | poolConf := CONF.Pool["default"]
24 | conn := helper.NewRedisClient(&poolConf, nil)
25 | err := conn.Ping(dummyCtx).Err()
26 | if err != nil {
27 | panic(fmt.Sprintf("Failed to ping: %s", err))
28 | }
29 | err = conn.FlushDB(dummyCtx).Err()
30 | if err != nil {
31 | panic(fmt.Sprintf("Failed to flush db: %s", err))
32 | }
33 |
34 | R = &RedisInstance{
35 | Name: "unittest",
36 | Conn: conn,
37 | }
38 |
39 | if err = PreloadDeadLetterLuaScript(R); err != nil {
40 | panic(fmt.Sprintf("Failed to preload deadletter lua script: %s", err))
41 | }
42 | }
43 |
44 | func TestMain(m *testing.M) {
45 | presetConfig, err := config.CreatePresetForTest("")
46 | if err != nil {
47 | panic(fmt.Sprintf("CreatePresetForTest failed with error: %s", err))
48 | }
49 | defer presetConfig.Destroy()
50 | setup(presetConfig.Config)
51 | ret := m.Run()
52 | os.Exit(ret)
53 | }
54 |
--------------------------------------------------------------------------------
/.github/workflows/lmstfy.yaml:
--------------------------------------------------------------------------------
1 | name: Lmstfy Actions # don't edit while the badge was depend on this
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | - feature/*
8 | tags:
9 | - v*
10 |
11 | pull_request:
12 | branches:
13 | - master
14 | - feature/*
15 |
16 | jobs:
17 | lint-build-test:
18 | name: Lint/Build/Test
19 | strategy:
20 | matrix:
21 | go-version: [1.17.x,1.18.x]
22 | os: [ubuntu-latest]
23 | runs-on: ${{ matrix.os }}
24 | steps:
25 |
26 | - name: Install Go
27 | uses: actions/setup-go@v5
28 | with:
29 | go-version: ${{matrix.go-version}}
30 |
31 | - name: Checkout Code Base
32 | uses: actions/checkout@v5
33 |
34 | - name: Restore Go Module Cache
35 | uses: actions/cache@v4
36 | with:
37 | path: ~/go/pkg/mod
38 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
39 | restore-keys: |
40 | ${{ runner.os }}-go-
41 |
42 | - name: Make Lint
43 | run: make lint
44 |
45 | - name: Build
46 | run: make
47 |
48 | - name: Test
49 | run: make test
50 |
51 | - name: Upload Coverage Report
52 | uses: codecov/codecov-action@v1
53 | with:
54 | file: ./coverage.out
55 | flags: unittests
56 | name: codecov-umbrella
57 |
--------------------------------------------------------------------------------
/doc/throttler.en.md:
--------------------------------------------------------------------------------
1 | ## Throttler API
2 | The throttler only limits the rate of token consume/publish QPS instead of messages, don't use batch consume if you want to limit the rate of message.
3 | Consume/Produce API would return the status code `429`(too many requests) when the token has reached the rate limit.
4 |
5 | > CAUTION: consideration of the performance, we sync limiters every 10 seconds instead of fetching the limit every time.
6 |
7 | ### Create the limit
8 |
9 | ```
10 | POST /token/:namespace/:token/limit
11 | ```
12 | #### Request Body
13 |
14 | ```
15 | {
16 | "read": 100,
17 | "write": 200,
18 | "interval": 10
19 | }
20 | ```
21 |
22 | The unit of the `interval` is second and `read`/`write` is counter, which means this token can consume 100 times
23 | and publish 200 times at 10 seconds.
24 |
25 | ### Get the limit
26 |
27 | ```
28 | GET /token/:namespace/:token/limit
29 | ```
30 |
31 | #### Request Query
32 |
33 | no parameter
34 |
35 | ### Set the limit
36 |
37 | ```
38 | PUT /token/:namespace/:token/limit
39 | ```
40 | #### Request Body
41 |
42 | ```
43 | {
44 | "read": 200,
45 | "write": 400,
46 | "interval": 10
47 | }
48 | ```
49 |
50 | ### Delete the limit
51 |
52 | ```
53 | DELETE /token/:namespace/:token/limit
54 | ```
55 |
56 | #### Request Query
57 |
58 | no parameter
59 |
60 | ### List the limit
61 |
62 | ```
63 | GET /limits
64 | ```
65 |
66 | #### Request Query
67 |
68 | no parameter
--------------------------------------------------------------------------------
/log/backtrack.go:
--------------------------------------------------------------------------------
1 | package log
2 |
3 | import (
4 | "fmt"
5 | "runtime"
6 | "strings"
7 |
8 | "github.com/sirupsen/logrus"
9 | )
10 |
11 | type BackTrackHook struct {
12 | level logrus.Level // trigger hook, only log level is severer than or equal to this param
13 | }
14 |
15 | func (bt *BackTrackHook) Levels() []logrus.Level {
16 | levels := make([]logrus.Level, 0)
17 | for _, l := range logrus.AllLevels {
18 | if l <= bt.level {
19 | levels = append(levels, l)
20 | }
21 | }
22 | return levels
23 | }
24 |
25 | func (bt *BackTrackHook) Fire(entry *logrus.Entry) error {
26 | pcs := make([]uintptr, 5)
27 | n := runtime.Callers(8, pcs)
28 | if n == 0 {
29 | return nil
30 | }
31 | frames := runtime.CallersFrames(pcs[:n])
32 | file := "unknown"
33 | line := 0
34 | funcName := "unknown"
35 | for {
36 | frame, more := frames.Next()
37 | if strings.Index(frame.Function, "github.com/sirupsen/logrus") == -1 {
38 | // This if the frame we are looking for
39 | file = frame.File
40 | line = frame.Line
41 | funcName = frame.Function
42 | break
43 | }
44 | if !more {
45 | // no more frames
46 | break
47 | }
48 | }
49 | // add backtrack info
50 | entry.Data["bt_line"] = fmt.Sprintf("%s:%d", file, line)
51 | entry.Data["bt_func"] = funcName
52 | return nil
53 | }
54 |
55 | func NewBackTrackHook(filteredLevel logrus.Level) logrus.Hook {
56 | return &BackTrackHook{filteredLevel}
57 | }
58 |
--------------------------------------------------------------------------------
/.github/workflows/release.yaml:
--------------------------------------------------------------------------------
1 | name: release docker image
2 |
3 | on:
4 | push:
5 | tags:
6 | - v*
7 |
8 | jobs:
9 | release-docker-image:
10 | name: Release Docker Image
11 | strategy:
12 | fail-fast: false
13 | matrix:
14 | go-version: [1.17.x]
15 |
16 | runs-on: ubuntu-18.04
17 | steps:
18 |
19 | - name: Install Go
20 | uses: actions/setup-go@v2
21 | with:
22 | go-version: ${{matrix.go-version}}
23 |
24 | - name: Checkout Code Base
25 | uses: actions/checkout@v3
26 | with:
27 | fetch-depth: 64
28 |
29 | - name: Set ENV
30 | run: |
31 | echo "RELEASE_TAG=${GITHUB_REF#refs/*/v}" >> $GITHUB_ENV
32 |
33 | - name: Login Docker Hub
34 | uses: docker/login-action@v1
35 | with:
36 | username: ${{ secrets.DOCKER_USERNAME }}
37 | password: ${{ secrets.DOCKER_PASSWORD }}
38 |
39 | - name: Set up QEMU
40 | uses: docker/setup-qemu-action@v1
41 |
42 | - name: Set up Docker Buildx
43 | id: buildx
44 | uses: docker/setup-buildx-action@v1
45 |
46 | - name: Available platforms
47 | run: echo ${{ steps.buildx.outputs.platforms }}
48 |
49 | - name: Build And Push Docker Image
50 | run: |
51 | docker buildx build --push --platform linux/amd64,linux/arm64 --tag bitleak/lmstfy:$RELEASE_TAG --tag bitleak/lmstfy:latest .
52 |
--------------------------------------------------------------------------------
/engine/redis/hooks/init.go:
--------------------------------------------------------------------------------
1 | package hooks
2 |
3 | import (
4 | "github.com/prometheus/client_golang/prometheus"
5 | )
6 |
7 | type performanceMetrics struct {
8 | Latencies *prometheus.HistogramVec
9 | QPS *prometheus.CounterVec
10 | }
11 |
12 | var _metrics *performanceMetrics
13 |
14 | const (
15 | _namespace = "infra"
16 | _subsystem = "lmstfy_redis"
17 | )
18 |
19 | func setupMetrics() {
20 | labels := []string{"node", "command", "status"}
21 | buckets := prometheus.ExponentialBuckets(1, 2, 16)
22 | newHistogram := func(name string, labels ...string) *prometheus.HistogramVec {
23 | histogram := prometheus.NewHistogramVec(
24 | prometheus.HistogramOpts{
25 | Namespace: _namespace,
26 | Subsystem: _subsystem,
27 | Name: name,
28 | Buckets: buckets,
29 | },
30 | labels,
31 | )
32 | prometheus.MustRegister(histogram)
33 | return histogram
34 | }
35 | newCounter := func(name string, labels ...string) *prometheus.CounterVec {
36 | counters := prometheus.NewCounterVec(
37 | prometheus.CounterOpts{
38 | Namespace: _namespace,
39 | Subsystem: _subsystem,
40 | Name: name,
41 | },
42 | labels,
43 | )
44 | prometheus.MustRegister(counters)
45 | return counters
46 | }
47 | _metrics = &performanceMetrics{
48 | Latencies: newHistogram("latency", labels...),
49 | QPS: newCounter("qps", labels...),
50 | }
51 | }
52 |
53 | func init() {
54 | setupMetrics()
55 | }
56 |
--------------------------------------------------------------------------------
/config/preset.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "github.com/orlangure/gnomock"
5 | "github.com/orlangure/gnomock/preset/redis"
6 | )
7 |
8 | type PresetConfigForTest struct {
9 | *Config
10 | containers []*gnomock.Container
11 | }
12 |
13 | func CreatePresetForTest(version string, pools ...string) (*PresetConfigForTest, error) {
14 | cfg := &Config{
15 | Host: "127.0.0.1",
16 | Port: 7777,
17 | AdminHost: "127.0.0.1",
18 | AdminPort: 7778,
19 | LogLevel: "INFO",
20 | Pool: make(map[string]RedisConf),
21 | }
22 |
23 | p := redis.Preset()
24 | defaultContainer, err := gnomock.Start(p)
25 | if err != nil {
26 | return nil, err
27 | }
28 | addr := defaultContainer.DefaultAddress()
29 | cfg.AdminRedis.Addr = addr
30 | cfg.Pool[DefaultPoolName] = RedisConf{Addr: addr, Version: version}
31 |
32 | containers := []*gnomock.Container{defaultContainer}
33 | for _, extraPool := range pools {
34 | if _, ok := cfg.Pool[extraPool]; ok {
35 | continue
36 | }
37 | extraContainer, _ := gnomock.Start(p)
38 | cfg.Pool[extraPool] = RedisConf{
39 | Addr: extraContainer.DefaultAddress(),
40 | Version: version,
41 | }
42 | containers = append(containers, extraContainer)
43 | }
44 | return &PresetConfigForTest{
45 | Config: cfg,
46 | containers: containers,
47 | }, nil
48 | }
49 |
50 | func (presetConfig *PresetConfigForTest) Destroy() {
51 | gnomock.Stop(presetConfig.containers...)
52 | presetConfig.Config = nil
53 | }
54 |
--------------------------------------------------------------------------------
/engine/redis/setup.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "time"
7 |
8 | "github.com/go-redis/redis/v8"
9 | "github.com/sirupsen/logrus"
10 |
11 | "github.com/bitleak/lmstfy/config"
12 | "github.com/bitleak/lmstfy/engine"
13 | "github.com/bitleak/lmstfy/helper"
14 | )
15 |
16 | const MaxRedisConnections = 5000
17 |
18 | var (
19 | logger *logrus.Logger
20 | dummyCtx = context.TODO()
21 | )
22 |
23 | // SetLogger will set the logger for engine
24 | func SetLogger(l *logrus.Logger) {
25 | logger = l
26 | }
27 |
28 | // Setup set the essential config of redis engine
29 | func Setup(conf *config.Config) error {
30 | for name, poolConf := range conf.Pool {
31 | if len(poolConf.Version) != 0 {
32 | continue
33 | }
34 |
35 | if poolConf.PoolSize == 0 {
36 | poolConf.PoolSize = MaxRedisConnections
37 | }
38 | opt := &redis.Options{}
39 | // By Default, the timeout for RW is 3 seconds, we might get few error
40 | // when redis server is doing AOF rewrite. We prefer data integrity over speed.
41 | opt.ReadTimeout = 30 * time.Second
42 | opt.WriteTimeout = 30 * time.Second
43 | opt.MinIdleConns = 10
44 | cli := helper.NewRedisClient(&poolConf, opt)
45 | if cli.Ping(dummyCtx).Err() != nil {
46 | return fmt.Errorf("redis server %s was not alive", poolConf.Addr)
47 | }
48 |
49 | e, err := NewEngine(name, cli)
50 | if err != nil {
51 | return fmt.Errorf("setup engine error: %s", err)
52 | }
53 | engine.Register(engine.KindRedis, name, e)
54 | }
55 | return nil
56 | }
57 |
--------------------------------------------------------------------------------
/doc/administration.cn.md:
--------------------------------------------------------------------------------
1 | ## Admin API
2 |
3 | admin api 权限管理通过在配置文件里面增加 basic 认证的账号列表, 如果未配置则不开启 basic 认证
4 |
5 | ### 查看 pool 列表
6 |
7 | ```
8 | GET /pools/
9 | ```
10 |
11 | #### Request Query
12 |
13 | 不需要参数
14 |
15 | ### 创建 `token`
16 |
17 | ```
18 | POST /token/:namespace
19 | ```
20 |
21 | #### Request Query
22 |
23 | - description: 创建 token 的用途描述
24 | - pool: optional, 默认 "default"
25 |
26 |
27 | ### 删除 `token`
28 |
29 | ```
30 | DELETE /token/:namespace/:token
31 | ```
32 |
33 | #### Request Query
34 | - pool: optional, 默认 "default"
35 |
36 |
37 | ### 列出所有 `token`
38 |
39 | ```
40 | GET /token/:namespace
41 | ```
42 |
43 | #### Request Query
44 | - pool: optional, 默认 "default"
45 |
46 | ### 列出所有 `namespace` 和 `queue`
47 |
48 | ```
49 | GET /info
50 | ```
51 |
52 | #### Request Query
53 | - pool: optional, 默认 "default"
54 |
55 | ### 获取 prometheus 监控指标
56 |
57 | ```
58 | GET /metrics
59 | ```
60 |
61 | #### Request Query
62 |
63 | 不需要参数
64 |
65 | ## 迁移 redis
66 |
67 | 对于假设旧的 pool (default) 配置如下:
68 |
69 | ```
70 | [Pool]
71 | [Pool.default]
72 | Addr = "localhost:6379"
73 | ```
74 |
75 | 迁移到新的 redis 只需要更改配置为:
76 |
77 | ```
78 | [Pool]
79 | [Pool.default]
80 | Addr = "localhost:6379"
81 | MigrateTo = "migrate"
82 |
83 | [Pool.migrate]
84 | Addr = "localhost:6389"
85 | ```
86 |
87 | 用户的 token 不需要改变, 所有写操作到会写到新的 pool; 所有读操作会优先读取旧的 pool 的内容.
88 |
89 | 等到旧的 pool 的 redis key 数量和队列 size 不在变化了. 就可以下掉, 更新配置如下:
90 |
91 | ```
92 | [Pool]
93 | [Pool.default]
94 | Addr = "localhost:6389" # 数据迁移结束后, 直接使用新的 redis 即可.
95 | ```
96 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/enhancement.yml:
--------------------------------------------------------------------------------
1 | name: Enhancement
2 | description: Add new feature, improve code, document, and more
3 | labels: [ "enhancement" ]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | Thank you very much for your enhancement for LMSTFY!
9 | - type: checkboxes
10 | attributes:
11 | label: Search before asking
12 | description: >
13 | Please make sure to search in the [issues](https://github.com/bitleak/lmstfy/issues) first to see whether the same issue was reported already.
14 | options:
15 | - label: >
16 | I had searched in the [issues](/https://github.com/bitleak/lmstfy/issues) and found no similar issues.
17 | required: true
18 | - type: textarea
19 | attributes:
20 | label: Motivation
21 | description: Describe the motivation you'd like to make this enhancement.
22 | validations:
23 | required: true
24 | - type: textarea
25 | attributes:
26 | label: Solution
27 | description: Describe the proposed solution and add related materials like links if any.
28 | - type: checkboxes
29 | attributes:
30 | label: Are you willing to submit a PR?
31 | description: >
32 | We very much look forward to developers or users to help solve LMSTFY problems together. If you are willing to submit a PR to fix this problem, please tick it.
33 | options:
34 | - label: I'm willing to submit a PR!
35 | - type: markdown
36 | attributes:
37 | value: "Thanks for completing our form!"
38 |
--------------------------------------------------------------------------------
/engine/pool.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import "github.com/bitleak/lmstfy/config"
4 |
5 | const (
6 | KindRedis = "redis"
7 | KindMigration = "migration"
8 | )
9 |
10 | var engines = make(map[string]map[string]Engine)
11 |
12 | func GetEngineByKind(kind, pool string) Engine {
13 | if pool == "" {
14 | pool = config.DefaultPoolName
15 | }
16 | k := engines[kind]
17 | if k == nil {
18 | return nil
19 | }
20 | return k[pool]
21 | }
22 |
23 | func GetPoolsByKind(kind string) []string {
24 | v, ok := engines[kind]
25 | if !ok {
26 | return []string{}
27 | }
28 | pools := make([]string, 0)
29 | for pool := range v {
30 | pools = append(pools, pool)
31 | }
32 | return pools
33 | }
34 |
35 | func GetPools() []string {
36 | return GetPoolsByKind(KindRedis)
37 | }
38 |
39 | func ExistsPool(pool string) bool {
40 | if pool == "" {
41 | pool = config.DefaultPoolName
42 | }
43 | return GetEngine(pool) != nil
44 | }
45 |
46 | func GetEngine(pool string) Engine {
47 | if pool == "" {
48 | pool = config.DefaultPoolName
49 | }
50 | kinds := []string{KindRedis, KindMigration}
51 | for _, kind := range kinds {
52 | if e := GetEngineByKind(kind, pool); e != nil {
53 | return e
54 | }
55 | }
56 | return nil
57 | }
58 |
59 | func Register(kind, pool string, e Engine) {
60 | if _, ok := engines[kind]; !ok {
61 | engines[kind] = make(map[string]Engine)
62 | }
63 | engines[kind][pool] = e
64 | }
65 |
66 | func Shutdown() {
67 | for kind, enginePool := range engines {
68 | for name, engine := range enginePool {
69 | engine.Shutdown()
70 | delete(enginePool, name)
71 | }
72 | delete(engines, kind)
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/server/handlers/throttler.go:
--------------------------------------------------------------------------------
1 | package handlers
2 |
3 | import (
4 | "fmt"
5 | "net/http"
6 |
7 | "github.com/gin-gonic/gin"
8 | "github.com/sirupsen/logrus"
9 |
10 | "github.com/bitleak/lmstfy/throttler"
11 | )
12 |
13 | const (
14 | ThrottleActionConsume = "consume"
15 | ThrottleActionProduce = "produce"
16 | )
17 |
18 | func Throttle(action string) gin.HandlerFunc {
19 | return func(c *gin.Context) {
20 | pool := c.GetString("pool")
21 | namespace := c.Param("namespace")
22 | token := c.GetString("token")
23 | if action != ThrottleActionConsume && action != ThrottleActionProduce {
24 | c.Next()
25 | return
26 | }
27 | isRead := action == ThrottleActionConsume
28 | isReachRateLimited, err := throttler.GetThrottler().IsReachRateLimit(pool, namespace, token, isRead)
29 | if err != nil {
30 | logger := GetHTTPLogger(c)
31 | logger.WithFields(logrus.Fields{
32 | "token": token,
33 | "action": action,
34 | "err": err,
35 | }).Errorf("The throttler was broken")
36 | c.JSON(http.StatusInternalServerError, gin.H{"err": err.Error()})
37 | c.Abort()
38 | return
39 | }
40 | if isReachRateLimited {
41 | metrics.RateLimits.WithLabelValues(pool, namespace, token, action).Inc()
42 | msg := fmt.Sprintf("token(%s) %s reach the limit rate, please retry later", token, action)
43 | c.JSON(http.StatusTooManyRequests, gin.H{"msg": msg})
44 | c.Abort()
45 | return
46 | }
47 | c.Next()
48 | statusCode := c.Writer.Status()
49 | if (isRead && statusCode != http.StatusOK) || (!isRead && statusCode != http.StatusCreated) {
50 | throttler.GetThrottler().RemedyLimiter(pool, namespace, token, isRead)
51 | }
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/engine/migration/setup_test.go:
--------------------------------------------------------------------------------
1 | package migration
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 | "testing"
8 |
9 | "github.com/bitleak/lmstfy/config"
10 | "github.com/bitleak/lmstfy/engine"
11 | "github.com/bitleak/lmstfy/engine/redis"
12 | "github.com/bitleak/lmstfy/helper"
13 | "github.com/sirupsen/logrus"
14 | )
15 |
16 | var (
17 | OldRedisEngine engine.Engine
18 | NewRedisEngine engine.Engine
19 | dummyCtx = context.TODO()
20 | )
21 |
22 | func setup(Conf *config.Config) {
23 | logger = logrus.New()
24 | level, _ := logrus.ParseLevel(Conf.LogLevel)
25 | logger.SetLevel(level)
26 | redis.SetLogger(logger)
27 | if err := redis.Setup(Conf); err != nil {
28 | panic(fmt.Sprintf("Failed to setup redis engine: %s", err))
29 | }
30 | for _, poolConf := range Conf.Pool {
31 | conn := helper.NewRedisClient(&poolConf, nil)
32 | err := conn.Ping(dummyCtx).Err()
33 | if err != nil {
34 | panic(fmt.Sprintf("Failed to ping: %s", err))
35 | }
36 | err = conn.FlushDB(dummyCtx).Err()
37 | if err != nil {
38 | panic(fmt.Sprintf("Failed to flush db: %s", err))
39 | }
40 | }
41 | OldRedisEngine = engine.GetEngineByKind(engine.KindRedis, "default")
42 | NewRedisEngine = engine.GetEngineByKind(engine.KindRedis, "migrate")
43 | }
44 |
45 | func teardown() {
46 | OldRedisEngine.Shutdown()
47 | NewRedisEngine.Shutdown()
48 | }
49 |
50 | func TestMain(m *testing.M) {
51 | presetConfig, err := config.CreatePresetForTest("", "migrate")
52 | if err != nil {
53 | panic(fmt.Sprintf("CreatePresetForTest failed with error: %s", err))
54 | }
55 | defer presetConfig.Destroy()
56 | setup(presetConfig.Config)
57 | ret := m.Run()
58 | teardown()
59 | os.Exit(ret)
60 | }
61 |
--------------------------------------------------------------------------------
/doc/administration.en.md:
--------------------------------------------------------------------------------
1 | ## Admin API
2 |
3 | admin api basic authorization would be enabled when the `acounts` field in config file was not empty.
4 |
5 | ### List Pool
6 |
7 | ```
8 | GET /pools/
9 | ```
10 |
11 | #### Request Query
12 |
13 | no parameter
14 |
15 | ### Create Token
16 |
17 | ```
18 | POST /token/:namespace
19 | ```
20 |
21 | #### Request Query
22 |
23 | - description: description of the token
24 | - pool: optional, Default: "default"
25 |
26 |
27 | ### DELETE Token
28 |
29 | ```
30 | DELETE /token/:namespace/:token
31 | ```
32 |
33 | #### Request Query
34 | - pool: optional, Default: "default"
35 |
36 |
37 | ### List Token
38 |
39 | ```
40 | GET /token/:namespace
41 | ```
42 |
43 | #### Request Query
44 | - pool: optional, Default: "default"
45 |
46 | ### List Namespace And Queue
47 |
48 | ```
49 | GET /info
50 | ```
51 |
52 | #### Request Query
53 | - pool: optional, 默认 "default"
54 |
55 | ### Get Prometheus Metrics
56 |
57 | ```
58 | GET /metrics
59 | ```
60 |
61 | #### Request Query
62 |
63 | no parameter
64 |
65 | ## Migrate Redis
66 |
67 | Assume the old pool (default) config was below:
68 |
69 | ```
70 | [Pool]
71 | [Pool.default]
72 | Addr = "localhost:6379"
73 | ```
74 |
75 | would migrate to new redis, just add the new configuration:
76 |
77 | ```
78 | [Pool]
79 | [Pool.default]
80 | Addr = "localhost:6379"
81 | MigrateTo = "migrate"
82 |
83 | [Pool.migrate]
84 | Addr = "localhost:6389"
85 | ```
86 |
87 | New write requests would redirect to the new pool and read reqeusts would try to consume the old pool first. We
88 | can remove the old pool configuration if the redis key and queue size wasn't changed, and recofingure the pool as below:
89 |
90 | ```
91 | [Pool]
92 | [Pool.default]
93 | Addr = "localhost:6389" # use new redis after migrated
94 | ```
95 |
--------------------------------------------------------------------------------
/scripts/redis/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | redis-sentinel:
5 | image: bitnami/redis-sentinel:5.0
6 | environment:
7 | - REDIS_MASTER_HOST=redis
8 | - REDIS_MASTER_PORT_NUMBER=6379
9 | - REDIS_SENTINEL_PORT=26379
10 | - REDIS_MASTER_PASSWORD=foobared
11 | - REDIS_SENTINEL_PASSWORD=foobared1
12 | ports:
13 | - "26379:26379"
14 | depends_on:
15 | - redis
16 |
17 | redis-sentinel-1:
18 | image: bitnami/redis-sentinel:5.0
19 | environment:
20 | - REDIS_MASTER_HOST=redis
21 | - REDIS_MASTER_PORT_NUMBER=6379
22 | - REDIS_SENTINEL_PORT=26379
23 | - REDIS_MASTER_PASSWORD=foobared
24 | - REDIS_SENTINEL_PASSWORD=foobared1
25 | ports:
26 | - "26380:26379"
27 | depends_on:
28 | - redis
29 |
30 | redis-sentinel-2:
31 | image: bitnami/redis-sentinel:5.0
32 | environment:
33 | - REDIS_MASTER_HOST=redis
34 | - REDIS_MASTER_PORT_NUMBER=6379
35 | - REDIS_SENTINEL_PORT=26379
36 | - REDIS_MASTER_PASSWORD=foobared
37 | - REDIS_SENTINEL_PASSWORD=foobared1
38 | ports:
39 | - "26381:26379"
40 | depends_on:
41 | - redis
42 |
43 | redis:
44 | image: redis:4
45 | command: redis-server --requirepass foobared --appendonly yes
46 | ports:
47 | - "6379:6379"
48 |
49 | redis_1:
50 | image: redis:4
51 | command: redis-server --slaveof redis 6379 --requirepass foobared --masterauth foobared --appendonly yes
52 | ports:
53 | - "6380:6379"
54 | depends_on:
55 | - redis
56 |
57 | redis_2:
58 | image: redis:4
59 | command: redis-server --slaveof redis 6379 --requirepass foobared --masterauth foobared --appendonly yes
60 | ports:
61 | - "6381:6379"
62 | depends_on:
63 | - redis
64 |
--------------------------------------------------------------------------------
/client/README.md:
--------------------------------------------------------------------------------
1 | # Lmstfy client
2 |
3 | ## Usage
4 |
5 | ### Initialize
6 |
7 | ```
8 | import github.com/bitleak/lmstfy/client
9 |
10 | c := client.NewLmstfyClient(host, port, namespace, token)
11 |
12 | c.ConfigRetry(3, 50) // optional, config the client to retry when some errors happened. retry 3 times with 50ms interval
13 | ```
14 |
15 | ### Producer example
16 |
17 | ```
18 | // Publish a job with ttl==forever, tries==3, delay==5s
19 | jobID, err := c.Publish("q1", []byte("hello"), 0, 3, 5)
20 | ```
21 |
22 | ### Consumer example
23 |
24 | ```
25 | // Consume a job from the q1, if there's not job available, wait until 12s passed (polling).
26 | // And if this consumer fail to ACK the job in 10s, the job can be retried by other consumers.
27 | job, err := c.Consume("q1", 10, 12)
28 | if err != nil {
29 | panic(err)
30 | }
31 |
32 | // Do something with the `job`
33 | fmt.Println(string(job.Data))
34 |
35 | err := c.Ack("q1", job.ID)
36 | if err != nil {
37 | panic(err)
38 | }
39 | ```
40 |
41 | ```$golang
42 | // Consume 5 jobs from the q1, if there's not job available, wait until 12s passed (polling).
43 | // If there are some jobs but not enough 5, return jobs as much as possible.
44 | // And if this consumer fail to ACK any job in 10s, the job can be retried by other consumers.
45 | jobs, err := c.BatchConsume("q1", 5, 10, 12)
46 | if err != nil {
47 | panic(err)
48 | }
49 |
50 | // Do something with the `job`
51 | for _, job := range jobs {
52 | fmt.Println(string(job.Data))
53 | err := c.Ack("q1", job.ID)
54 | if err != nil {
55 | panic(err)
56 | }
57 | }
58 |
59 | ```
60 |
61 | > CAUTION: consume would return nil job and error when queue was empty or not found, you can enable
62 | the client option to return error when the job is nil by revoking `EnableErrorOnNilJob()` function.
63 |
--------------------------------------------------------------------------------
/engine/redis/hooks/metrics.go:
--------------------------------------------------------------------------------
1 | package hooks
2 |
3 | import (
4 | "context"
5 | "time"
6 |
7 | "github.com/go-redis/redis/v8"
8 | "github.com/prometheus/client_golang/prometheus"
9 | )
10 |
11 | const (
12 | _contextStartTimeKey = iota + 1
13 | _contextSegmentKey
14 | )
15 |
16 | type MetricsHook struct {
17 | client *redis.Client
18 | }
19 |
20 | func NewMetricsHook(client *redis.Client) *MetricsHook {
21 | return &MetricsHook{client: client}
22 | }
23 |
24 | func (hook MetricsHook) BeforeProcess(ctx context.Context, cmd redis.Cmder) (context.Context, error) {
25 | return context.WithValue(ctx, _contextStartTimeKey, time.Now()), nil
26 | }
27 |
28 | func (hook MetricsHook) AfterProcess(ctx context.Context, cmd redis.Cmder) error {
29 | hook.record(ctx, cmd.Name(), cmd.Err())
30 | return nil
31 | }
32 |
33 | func (hook MetricsHook) BeforeProcessPipeline(ctx context.Context, cmds []redis.Cmder) (context.Context, error) {
34 | return context.WithValue(ctx, _contextStartTimeKey, time.Now()), nil
35 | }
36 |
37 | func (hook MetricsHook) AfterProcessPipeline(ctx context.Context, cmds []redis.Cmder) error {
38 | var firstErr error
39 | for _, cmd := range cmds {
40 | if cmd.Err() != nil {
41 | firstErr = cmd.Err()
42 | break
43 | }
44 | }
45 | hook.record(ctx, "pipeline", firstErr)
46 | return nil
47 | }
48 |
49 | func (hook MetricsHook) record(ctx context.Context, cmd string, err error) {
50 | startTime, ok := ctx.Value(_contextStartTimeKey).(time.Time)
51 | if !ok {
52 | return
53 | }
54 | durationMS := time.Since(startTime).Milliseconds()
55 | status := "ok"
56 | if err != nil && err != redis.Nil {
57 | status = "error"
58 | }
59 | labels := prometheus.Labels{"node": hook.client.Options().Addr, "command": cmd, "status": status}
60 | _metrics.QPS.With(labels).Inc()
61 | _metrics.Latencies.With(labels).Observe(float64(durationMS))
62 | }
63 |
--------------------------------------------------------------------------------
/server/middleware/middleware.go:
--------------------------------------------------------------------------------
1 | package middleware
2 |
3 | import (
4 | "time"
5 |
6 | "github.com/bitleak/lmstfy/uuid"
7 | "github.com/gin-gonic/gin"
8 | "github.com/sirupsen/logrus"
9 | )
10 |
11 | var isAccessLogEnabled = false
12 |
13 | // IsAccessLogEnabled return whether the accesslog was enabled or not
14 | func IsAccessLogEnabled() bool {
15 | return isAccessLogEnabled
16 | }
17 |
18 | // isAccessLogEnabled enable the accesslog
19 | func EnableAccessLog() {
20 | isAccessLogEnabled = true
21 | }
22 |
23 | // DisableAccessLog disable the accesslog
24 | func DisableAccessLog() {
25 | isAccessLogEnabled = false
26 | }
27 |
28 | // RequestIDMiddleware set request uuid into context
29 | func RequestIDMiddleware(c *gin.Context) {
30 | reqID := uuid.GenUniqueID()
31 | c.Set("req_id", reqID)
32 | c.Header("X-Request-ID", reqID)
33 | }
34 |
35 | // AccessLogMiddleware generate accesslog and output
36 | func AccessLogMiddleware(logger *logrus.Logger) gin.HandlerFunc {
37 | return func(c *gin.Context) {
38 | // Start timer
39 | start := time.Now()
40 | path := c.Request.URL.Path
41 | query := c.Request.URL.RawQuery
42 |
43 | // Process request
44 | c.Next()
45 |
46 | // Shutdown timer
47 | end := time.Now()
48 | latency := end.Sub(start)
49 |
50 | clientIP := c.ClientIP()
51 | method := c.Request.Method
52 | statusCode := c.Writer.Status()
53 |
54 | fields := logrus.Fields{
55 | "pool": c.GetString("pool"),
56 | "path": path,
57 | "query": query,
58 | "latency": latency,
59 | "ip": clientIP,
60 | "method": method,
61 | "code": statusCode,
62 | "req_id": c.GetString("req_id"),
63 | }
64 |
65 | if !isAccessLogEnabled {
66 | return
67 | }
68 |
69 | if statusCode >= 500 {
70 | logger.WithFields(fields).Error()
71 | } else if statusCode >= 400 && statusCode != 404 {
72 | logger.WithFields(fields).Warn()
73 | } else {
74 | logger.WithFields(fields).Info()
75 | }
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.yml:
--------------------------------------------------------------------------------
1 | name: Bug report
2 | description: Problems with the software
3 | labels: [ "bug" ]
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | Thank you very much for submitting feedback to LMSTFY.
9 |
10 | If it is an idea or help wanted, please go to:
11 | 1. [Github Discussion](https://github.com/bitleak/lmstfy/discussions).
12 | - type: checkboxes
13 | attributes:
14 | label: Search before asking
15 | description: >
16 | Please make sure to search in the [issues](https://github.com/bitleak/lmstfy/issues) first to see whether the same issue was reported already.
17 | options:
18 | - label: >
19 | I had searched in the [issues](https://github.com/bitleak/lmstfy/issues) and found no similar issues.
20 | required: true
21 | - type: textarea
22 | attributes:
23 | label: Version
24 | description: >
25 | Please provide LMSTYF version you are using. If it is the unstable version, please input commit id.
26 | validations:
27 | required: true
28 | - type: textarea
29 | attributes:
30 | label: Minimal reproduce step
31 | description: Please try to give reproducing steps to facilitate quick location of the problem.
32 | validations:
33 | required: true
34 | - type: textarea
35 | attributes:
36 | label: What did you expect to see?
37 | validations:
38 | required: true
39 | - type: textarea
40 | attributes:
41 | label: What did you see instead?
42 | validations:
43 | required: true
44 | - type: textarea
45 | attributes:
46 | label: Anything Else?
47 | - type: checkboxes
48 | attributes:
49 | label: Are you willing to submit a PR?
50 | description: >
51 | We very much look forward to developers or users to help solve LMSTFY problems together. If you are willing to submit a PR to fix this problem, please tick it.
52 | options:
53 | - label: I'm willing to submit a PR!
54 | - type: markdown
55 | attributes:
56 | value: "Thanks for completing our form!"
57 |
58 |
--------------------------------------------------------------------------------
/client/setup_test.go:
--------------------------------------------------------------------------------
1 | package client
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | "io"
8 | "net/http"
9 | "os"
10 | "testing"
11 |
12 | "github.com/bitleak/lmstfy/config"
13 | "github.com/bitleak/lmstfy/helper"
14 | )
15 |
16 | var (
17 | Host string
18 | Port int
19 | Namespace = "client-ns"
20 | Token string
21 | )
22 |
23 | // NOTE: lmstfy server should be start by gitlab CI script from outside, but should use the same
24 | // config file specified in $LMSTFY_TEST_CONFIG
25 | func setup(CONF *config.Config) {
26 | ctx := context.Background()
27 | Host = CONF.Host
28 | Port = CONF.Port
29 | adminPort := CONF.AdminPort
30 |
31 | // Flush redis DB
32 | for _, poolConf := range CONF.Pool {
33 | conn := helper.NewRedisClient(&poolConf, nil)
34 | err := conn.Ping(ctx).Err()
35 | if err != nil {
36 | panic(fmt.Sprintf("Failed to ping: %s", err))
37 | }
38 | err = conn.FlushDB(ctx).Err()
39 | if err != nil {
40 | panic(fmt.Sprintf("Failed to flush db: %s", err))
41 | }
42 | }
43 |
44 | // Create the token first
45 | req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:%d/token/%s?description=client", adminPort, Namespace), nil)
46 | if err != nil {
47 | panic("Failed to create testing token")
48 | }
49 | resp, err := http.DefaultClient.Do(req)
50 | if err != nil {
51 | panic("Failed to create testing token")
52 | }
53 | if resp.StatusCode != http.StatusCreated {
54 | panic("Failed to create testing token")
55 | }
56 | respBytes, err := io.ReadAll(resp.Body)
57 | if err != nil {
58 | panic("Failed to create testing token")
59 | }
60 | var respData struct {
61 | Token string `json:"token"`
62 | }
63 | err = json.Unmarshal(respBytes, &respData)
64 | if err != nil {
65 | panic("Failed to create testing token")
66 | }
67 | Token = respData.Token
68 | }
69 |
70 | func teardown() {}
71 |
72 | func TestMain(m *testing.M) {
73 | presetConfig, err := config.CreatePresetForTest("")
74 | if err != nil {
75 | panic(fmt.Sprintf("CreatePresetForTest failed with error: %s", err))
76 | }
77 | defer presetConfig.Destroy()
78 | setup(presetConfig.Config)
79 | ret := m.Run()
80 | teardown()
81 | os.Exit(ret)
82 | }
83 |
--------------------------------------------------------------------------------
/server/handlers/metrics.go:
--------------------------------------------------------------------------------
1 | package handlers
2 |
3 | import (
4 | "strconv"
5 | "time"
6 |
7 | "github.com/gin-gonic/gin"
8 | "github.com/prometheus/client_golang/prometheus"
9 | )
10 |
11 | type PerformanceMetrics struct {
12 | Latencies *prometheus.HistogramVec
13 | HTTPCodes *prometheus.CounterVec
14 | RateLimits *prometheus.CounterVec
15 | }
16 |
17 | var metrics *PerformanceMetrics
18 |
19 | func setupMetrics() {
20 | metrics = &PerformanceMetrics{}
21 | latencies := prometheus.NewHistogramVec(
22 | prometheus.HistogramOpts{
23 | Namespace: "infra",
24 | Subsystem: "lmstfy_http",
25 | Name: "latency_milliseconds",
26 | Help: "rest api latencies",
27 | Buckets: prometheus.ExponentialBuckets(15, 2.5, 9),
28 | },
29 | []string{"pool", "namespace", "api", "method"},
30 | )
31 |
32 | httpCodes := prometheus.NewCounterVec(
33 | prometheus.CounterOpts{
34 | Namespace: "infra",
35 | Subsystem: "lmstfy_http",
36 | Name: "http_codes",
37 | Help: "rest api response code",
38 | },
39 | []string{"pool", "namespace", "api", "method", "code"},
40 | )
41 |
42 | rateLimits := prometheus.NewCounterVec(
43 | prometheus.CounterOpts{
44 | Namespace: "infra",
45 | Subsystem: "lmstfy_http",
46 | Name: "rate_limit",
47 | Help: "consume/produce rate limit",
48 | },
49 | []string{"pool", "namespace", "token", "action"},
50 | )
51 | prometheus.MustRegister(latencies)
52 | prometheus.MustRegister(httpCodes)
53 | prometheus.MustRegister(rateLimits)
54 | metrics.Latencies = latencies
55 | metrics.HTTPCodes = httpCodes
56 | metrics.RateLimits = rateLimits
57 | }
58 |
59 | func CollectMetrics(c *gin.Context) {
60 | before := time.Now()
61 | c.Next()
62 | after := time.Now()
63 | duration := after.Sub(before)
64 | relativePath := c.FullPath()
65 | code := c.Writer.Status()
66 | if code < 300 {
67 | metrics.Latencies.WithLabelValues(
68 | c.GetString("pool"),
69 | c.Param("namespace"),
70 | relativePath,
71 | c.Request.Method,
72 | ).Observe(duration.Seconds() * 1000)
73 | }
74 | metrics.HTTPCodes.WithLabelValues(
75 | c.GetString("pool"),
76 | c.Param("namespace"),
77 | relativePath,
78 | c.Request.Method,
79 | strconv.Itoa(code),
80 | ).Inc()
81 |
82 | }
83 |
--------------------------------------------------------------------------------
/log/utils.go:
--------------------------------------------------------------------------------
1 | package log
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "path"
7 |
8 | "github.com/sirupsen/logrus"
9 | )
10 |
11 | var (
12 | globalLogger *logrus.Logger
13 | accessLogger *logrus.Logger
14 | )
15 |
16 | func ReopenLogs(logDir string) error {
17 | if logDir == "" {
18 | return nil
19 | }
20 | accessLog, err := os.OpenFile(path.Join(logDir, "access.log"), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
21 | if err != nil {
22 | return err
23 | }
24 | errorLog, err := os.OpenFile(path.Join(logDir, "error.log"), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
25 | if err != nil {
26 | return err
27 | }
28 |
29 | oldFd := accessLogger.Out.(*os.File)
30 | accessLogger.Out = accessLog
31 | oldFd.Close()
32 |
33 | oldFd = globalLogger.Out.(*os.File)
34 | globalLogger.Out = errorLog
35 | oldFd.Close()
36 |
37 | return nil
38 | }
39 |
40 | func Setup(logFormat, logDir, logLevel, backtrackLevel string) error {
41 | level, err := logrus.ParseLevel(logLevel)
42 | if err != nil {
43 | return fmt.Errorf("failed to parse log level: %s", err)
44 | }
45 | btLevel, err := logrus.ParseLevel(backtrackLevel)
46 | if err != nil {
47 | return fmt.Errorf("failed to parse backtrack level: %s", err)
48 | }
49 | accessLogger = logrus.New()
50 | globalLogger = logrus.New()
51 |
52 | if logFormat == "json" {
53 | accessLogger.SetFormatter(&logrus.JSONFormatter{})
54 | globalLogger.SetFormatter(&logrus.JSONFormatter{})
55 | }
56 |
57 | globalLogger.Level = level
58 | globalLogger.Hooks.Add(NewBackTrackHook(btLevel))
59 | if logDir == "" {
60 | accessLogger.Out = os.Stdout
61 | globalLogger.Out = os.Stdout
62 | return nil
63 | }
64 | accessLog, err := os.OpenFile(path.Join(logDir, "access.log"), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
65 | if err != nil {
66 | return fmt.Errorf("failed to create access.log: %s", err)
67 | }
68 | errorLog, err := os.OpenFile(path.Join(logDir, "error.log"), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
69 | if err != nil {
70 | return fmt.Errorf("failed to create error.log: %s", err)
71 | }
72 |
73 | accessLogger.Out = accessLog
74 | globalLogger.Out = errorLog
75 | return nil
76 | }
77 |
78 | func Get() *logrus.Logger {
79 | return globalLogger
80 | }
81 |
82 | func GetAccessLogger() *logrus.Logger {
83 | return accessLogger
84 | }
85 |
--------------------------------------------------------------------------------
/engine/redis/pool_test.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "bytes"
5 | "testing"
6 | "time"
7 |
8 | go_redis "github.com/go-redis/redis/v8"
9 | "github.com/stretchr/testify/require"
10 |
11 | "github.com/bitleak/lmstfy/engine"
12 | "github.com/bitleak/lmstfy/uuid"
13 | )
14 |
15 | func TestPool_Add(t *testing.T) {
16 | p := NewPool(R)
17 | job := engine.NewJob("ns-pool", "q1", []byte("hello msg 1"), nil, 10, 0, 1, "")
18 | if err := p.Add(job); err != nil {
19 | t.Errorf("Failed to add job to pool: %s", err)
20 | }
21 | }
22 |
23 | // Test TTL
24 | func TestPool_Add2(t *testing.T) {
25 | p := NewPool(R)
26 | job := engine.NewJob("ns-pool", "q2", []byte("hello msg 2"), nil, 1, 0, 1, "")
27 | p.Add(job)
28 | time.Sleep(2 * time.Second)
29 | _, err := R.Conn.Get(dummyCtx, PoolJobKey(job)).Result()
30 | if err != go_redis.Nil {
31 | t.Fatalf("Expected to get nil result, but got: %s", err)
32 | }
33 |
34 | }
35 |
36 | func TestPool_Delete(t *testing.T) {
37 | p := NewPool(R)
38 | job := engine.NewJob("ns-pool", "q3", []byte("hello msg 3"), nil, 10, 0, 1, "")
39 | p.Add(job)
40 | if err := p.Delete(job.Namespace(), job.Queue(), job.ID()); err != nil {
41 | t.Fatalf("Failed to delete job from pool: %s", err)
42 | }
43 | }
44 |
45 | func TestPool_Get(t *testing.T) {
46 | p := NewPool(R)
47 | job := engine.NewJob("ns-pool", "q4", []byte("hello msg 4"), nil, 50, 0, 1, "")
48 | p.Add(job)
49 | payload, ttl, err := p.Get(job.Namespace(), job.Queue(), job.ID())
50 | if err != nil {
51 | t.Fatalf("Failed to get job: %s", err)
52 | }
53 | if !bytes.Equal(payload.Body, []byte("hello msg 4")) {
54 | t.Fatal("Mismatched job data")
55 | }
56 | if ttl > 50 || 50-ttl > 2 {
57 | t.Fatalf("Expected TTL is around 50 seconds")
58 | }
59 | }
60 |
61 | func TestPool_GetCompatibility(t *testing.T) {
62 | p := NewPool(R)
63 |
64 | t.Run("test job with different versions should get correct body", func(t *testing.T) {
65 | for i := 0; i <= uuid.JobIDV1; i++ {
66 | jobID := uuid.GenJobIDWithVersion(i, 123)
67 | job := engine.NewJob("ns-pool", "q5", []byte("hello msg 5"), nil, 50, 0, 1, jobID)
68 | p.Add(job)
69 | payload, ttl, err := p.Get(job.Namespace(), job.Queue(), job.ID())
70 | require.NoError(t, err)
71 | require.Equal(t, []byte("hello msg 5"), payload.Body)
72 | require.InDelta(t, 50, ttl, 5)
73 | require.Equal(t, i, uuid.ExtractJobIDVersion(job.ID()))
74 | }
75 | })
76 | }
77 |
--------------------------------------------------------------------------------
/helper/redis.go:
--------------------------------------------------------------------------------
1 | package helper
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "strings"
7 |
8 | "github.com/go-redis/redis/v8"
9 |
10 | "github.com/bitleak/lmstfy/config"
11 | "github.com/bitleak/lmstfy/engine/redis/hooks"
12 | )
13 |
14 | // NewRedisClient wrap the standalone and sentinel client
15 | func NewRedisClient(conf *config.RedisConf, opt *redis.Options) (client *redis.Client) {
16 | if opt == nil {
17 | opt = &redis.Options{}
18 | }
19 | opt.Addr = conf.Addr
20 | opt.Password = conf.Password
21 | opt.PoolSize = conf.PoolSize
22 | opt.DB = conf.DB
23 | if conf.IsSentinel() {
24 | client = redis.NewFailoverClient(&redis.FailoverOptions{
25 | MasterName: conf.MasterName,
26 | SentinelAddrs: strings.Split(opt.Addr, ","),
27 | SentinelPassword: conf.SentinelPassword,
28 | Password: opt.Password,
29 | PoolSize: opt.PoolSize,
30 | ReadTimeout: opt.ReadTimeout,
31 | WriteTimeout: opt.WriteTimeout,
32 | MinIdleConns: opt.MinIdleConns,
33 | DB: opt.DB,
34 | })
35 | client.AddHook(hooks.NewMetricsHook(client))
36 | return client
37 | }
38 | client = redis.NewClient(opt)
39 | client.AddHook(hooks.NewMetricsHook(client))
40 | return client
41 | }
42 |
43 | // validateRedisPersistConfig will check whether persist config of Redis is good or not
44 | func validateRedisPersistConfig(ctx context.Context, cli *redis.Client, conf *config.RedisConf) error {
45 | infoStr, err := cli.Info(ctx).Result()
46 | if err != nil {
47 | return err
48 | }
49 | isNoEvictionPolicy, isAppendOnlyEnabled := false, false
50 | lines := strings.Split(infoStr, "\r\n")
51 | for _, line := range lines {
52 | fields := strings.Split(line, ":")
53 | if len(fields) != 2 {
54 | continue
55 | }
56 | switch fields[0] {
57 | case "maxmemory_policy":
58 | isNoEvictionPolicy = fields[1] == "noeviction"
59 | case "aof_enabled":
60 | isAppendOnlyEnabled = fields[1] == "1"
61 | }
62 | }
63 | if !isNoEvictionPolicy {
64 | return errors.New("redis memory_policy MUST be 'noeviction' to prevent data loss")
65 | }
66 | if !isAppendOnlyEnabled {
67 | return errors.New("redis appendonly MUST be 'yes' to prevent data loss")
68 | }
69 | return nil
70 | }
71 |
72 | // ValidateRedisConfig will check the redis configuration is good or not
73 | func ValidateRedisConfig(ctx context.Context, conf *config.RedisConf) error {
74 | // For sentinel mode, it will only check the master persist config
75 | redisCli := NewRedisClient(conf, &redis.Options{PoolSize: 1})
76 | defer redisCli.Close()
77 |
78 | return validateRedisPersistConfig(ctx, redisCli, conf)
79 | }
80 |
--------------------------------------------------------------------------------
/uuid/uuid.go:
--------------------------------------------------------------------------------
1 | package uuid
2 |
3 | import (
4 | "encoding/binary"
5 | "errors"
6 | "fmt"
7 | "math/rand"
8 | "sync"
9 | "time"
10 |
11 | "github.com/oklog/ulid"
12 | )
13 |
14 | const JobIDV1 = 1
15 |
16 | // Use pool to avoid concurrent access for rand.Source
17 | var entropyPool = sync.Pool{
18 | New: func() interface{} {
19 | return rand.New(rand.NewSource(time.Now().UnixNano()))
20 | },
21 | }
22 |
23 | // Generate Unique ID
24 | // Currently using ULID, this maybe conflict with other process with very low possibility
25 | func GenUniqueID() string {
26 | entropy := entropyPool.Get().(*rand.Rand)
27 | defer entropyPool.Put(entropy)
28 | id := ulid.MustNew(ulid.Now(), entropy)
29 | return id.String()
30 | }
31 |
32 | // GenJobIDWithVersion generates a job ID with version prefix and delaySecond.
33 | // For the legacy version 0 job ID, the version prefix is not included,
34 | // we use the version prefix to distinguish different job payload format.
35 | //
36 | // Use the last four bytes of the 16-byte's ULID to store the delaySecond.
37 | // The last fours bytes was some random value in ULID, so changing that value won't
38 | // affect anything except randomness.
39 | func GenJobIDWithVersion(version int, delaySecond uint32) string {
40 | entropy := entropyPool.Get().(*rand.Rand)
41 | defer entropyPool.Put(entropy)
42 | id := ulid.MustNew(ulid.Now(), entropy)
43 | // Encode the delayHour in littleEndian and store at the last four bytes
44 | binary.LittleEndian.PutUint32(id[len(id)-4:], delaySecond)
45 | // legacy version is 0, it doesn't include version prefix in the id
46 | if version == 0 {
47 | return id.String()
48 | }
49 | if version < 0 || version > 9 {
50 | version = JobIDV1
51 | }
52 | return fmt.Sprintf("%d%s", version, id.String())
53 | }
54 |
55 | func ElapsedMilliSecondFromUniqueID(s string) (int64, error) {
56 | s, _ = extractJobID(s)
57 | id, err := ulid.Parse(s)
58 | if err != nil {
59 | return 0, err
60 | }
61 | t := id.Time()
62 | now := ulid.Now()
63 | if t <= now {
64 | return int64(now - t), nil
65 | } else {
66 | return 0, errors.New("id has a future timestamp")
67 | }
68 | }
69 |
70 | func ExtractDelaySecondFromUniqueID(s string) (uint32, error) {
71 | s, _ = extractJobID(s)
72 | id, err := ulid.Parse(s)
73 | if err != nil {
74 | return 0, err
75 | }
76 | return binary.LittleEndian.Uint32(id[len(id)-4:]), nil
77 | }
78 |
79 | func extractJobID(s string) (string, int) {
80 | if len(s) <= ulid.EncodedSize {
81 | return s, 0
82 | }
83 | return s[1:], int(s[0] - '0')
84 | }
85 |
86 | func ExtractJobIDVersion(s string) int {
87 | if len(s) == ulid.EncodedSize {
88 | return 0
89 | }
90 | return int(s[0] - '0')
91 | }
92 |
--------------------------------------------------------------------------------
/scripts/token-cli:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python
2 |
3 | import argparse
4 | import requests
5 |
6 | # arguments
7 | examples = """examples:
8 | ./token-cli 127.0.0.1:7778 --pools # list pools
9 | ./token-cli 127.0.0.1:7778 -c -p pool -n namespace -D description # create new namespace
10 | ./token-cli 127.0.0.1:7778 -d -p pool -n namespace # delete the namespace
11 | ./token-cli 127.0.0.1:7778 -l -p pool -n namespace # list tokens in namespace
12 | """
13 | parser = argparse.ArgumentParser(
14 | description="Manage the namepace and tokens",
15 | formatter_class=argparse.RawDescriptionHelpFormatter,
16 | epilog=examples)
17 |
18 | parser.add_argument("server", type=str, help="the server address(host:port)")
19 | parser.add_argument("-D", "--description", type=str, help="the description")
20 | parser.add_argument("-p", "--pool", type=str, help="the name of pool")
21 | parser.add_argument("-n", "--namespace", type=str, help="the name of namespace")
22 | parser.add_argument("-t", "--token", type=str, help="the token to be created or deleted")
23 | parser.add_argument("-c", "--create", action="store_true", help="create the namespace")
24 | parser.add_argument("-d", "--delete", action="store_true", help="delete the namespace")
25 | parser.add_argument("-l", "--list", action="store_true", help="list tokens")
26 | parser.add_argument("-P", "--pools", action="store_true", help="list pools")
27 | args = parser.parse_args()
28 |
29 | addr = "http://"+args.server
30 | if args.pools:
31 | r = requests.get(addr+"/pools")
32 | print(r.text)
33 | exit(0)
34 |
35 | if args.namespace is None:
36 | print("param 'namespace' is missing, please use -n to assign the namespace")
37 | exit(1)
38 | if args.create and args.description is None:
39 | print("param 'description' is missing, please use -D to assign the description")
40 | exit(1)
41 | if args.delete and args.token is None:
42 | print("param 'token' is missing, please use -t to assign the token")
43 | exit(1)
44 |
45 | if args.pool is None or args.pool == "default":
46 | args.pool = ""
47 |
48 | if args.list:
49 | r = requests.get(addr+"/token/"+args.namespace+"?pool="+args.pool)
50 | print(r.text)
51 | elif args.create:
52 | r = requests.post(addr+"/token/"+args.namespace+"?pool="+args.pool, data = {'description':args.description, 'token':args.token})
53 | print(r.text)
54 | elif args.delete:
55 | r = requests.delete(addr+"/token/"+args.namespace+"/"+args.token+"?pool="+args.pool)
56 | if r.status_code == 204:
57 | print("ok")
58 | else:
59 | print(r.text)
60 | else:
61 | print("aha, I'm do nothing.")
62 |
63 |
--------------------------------------------------------------------------------
/server/handlers/setup_test.go:
--------------------------------------------------------------------------------
1 | package handlers_test
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "net/http"
7 | "net/http/httptest"
8 | "os"
9 | "testing"
10 |
11 | "github.com/bitleak/lmstfy/auth"
12 | "github.com/bitleak/lmstfy/config"
13 | "github.com/bitleak/lmstfy/engine"
14 | redis_engine "github.com/bitleak/lmstfy/engine/redis"
15 | "github.com/bitleak/lmstfy/helper"
16 | "github.com/bitleak/lmstfy/server/handlers"
17 | "github.com/bitleak/lmstfy/throttler"
18 |
19 | "github.com/gin-gonic/gin"
20 | "github.com/sirupsen/logrus"
21 | )
22 |
23 | func ginTest(req *http.Request) (*gin.Context, *gin.Engine, *httptest.ResponseRecorder) {
24 | w := httptest.NewRecorder()
25 | gin.SetMode(gin.ReleaseMode)
26 | ctx, engine := gin.CreateTestContext(w)
27 | ctx.Request = req
28 | return ctx, engine, w
29 | }
30 |
31 | func setup(Conf *config.Config) {
32 | dummyCtx := context.TODO()
33 | logger := logrus.New()
34 | level, _ := logrus.ParseLevel(Conf.LogLevel)
35 | logger.SetLevel(level)
36 |
37 | conn := helper.NewRedisClient(&Conf.AdminRedis, nil)
38 | err := conn.Ping(dummyCtx).Err()
39 | if err != nil {
40 | panic(fmt.Sprintf("Failed to ping: %s", err))
41 | }
42 | err = conn.FlushDB(dummyCtx).Err()
43 | if err != nil {
44 | panic(fmt.Sprintf("Failed to flush db: %s", err))
45 | }
46 |
47 | for _, poolConf := range Conf.Pool {
48 | conn := helper.NewRedisClient(&poolConf, nil)
49 | err := conn.Ping(dummyCtx).Err()
50 | if err != nil {
51 | panic(fmt.Sprintf("Failed to ping: %s", err))
52 | }
53 | err = conn.FlushDB(dummyCtx).Err()
54 | if err != nil {
55 | panic(fmt.Sprintf("Failed to flush db: %s", err))
56 | }
57 | }
58 |
59 | if err := redis_engine.Setup(Conf); err != nil {
60 | panic(fmt.Sprintf("Failed to setup redis engine: %s", err))
61 | }
62 | if engine.GetEngine(config.DefaultPoolName) == nil {
63 | panic("missing default pool")
64 | }
65 |
66 | if err := auth.Setup(Conf); err != nil {
67 | panic(fmt.Sprintf("Failed to setup auth module: %s", err))
68 | }
69 | if err := throttler.Setup(&Conf.AdminRedis, logger); err != nil {
70 | panic(fmt.Sprintf("Failed to setup throttler module: %s", err))
71 | }
72 | handlers.Setup(logger)
73 | handlers.SetupParamDefaults(Conf)
74 | }
75 |
76 | func runAllTests(m *testing.M, version string) {
77 | presetConfig, err := config.CreatePresetForTest(version)
78 | if err != nil {
79 | panic(fmt.Sprintf("CreatePresetForTest failed with error: %s", err))
80 | }
81 |
82 | setup(presetConfig.Config)
83 | ret := m.Run()
84 | if ret != 0 {
85 | os.Exit(ret)
86 | }
87 | engine.Shutdown()
88 | presetConfig.Destroy()
89 | }
90 |
91 | func TestMain(m *testing.M) {
92 | logger := logrus.New()
93 | redis_engine.SetLogger(logger)
94 |
95 | runAllTests(m, "")
96 | }
97 |
--------------------------------------------------------------------------------
/doc/Usage-Patterns.md:
--------------------------------------------------------------------------------
1 | # Common Usage Patterns for lmstfy
2 |
3 | ## 普通的异步任务执行 (Fire and Forget)
4 |
5 | 如下图所示, 任务发送到相应的队列后, 所有的 worker 作为计算集群竞争地获取相应的任务. 抢到任务
6 | 的 worker 在执行完相应的任务后, 删除这个任务. 如果期间 worker 异常死亡了, 而且如果任务
7 | 配置了重试次数, 那其他剩下的 worker 在一段时间后(TTR), 会重新去获取任务.
8 |
9 |
10 |
11 | 涉及到的 API demo:
12 |
13 | ### publisher
14 |
15 | ```
16 | # 发布任务, (如果任务失败, 最多重试两次)
17 | curl -X PUT -H "X-Token: xxx" "localhost:9999/api/test/q?tries=3" -d "hey new job"
18 | ```
19 |
20 | ### worker
21 |
22 | worker 的工作流循环:
23 |
24 | ```
25 | # 获取任务
26 | curl -H "X-Token: xxx" "localhost:9999/api/test/q?timeout=10" > job.json
27 |
28 | # 处理任务
29 | cat job.json
30 |
31 | # 删除完成的任务 (job_id 为获得到的任务的 ID)
32 | curl -X DELETE -H "X-Token: xxx" "localhost:9999/api/test/q/job/{job_id}"
33 |
34 | # 回到前面, 继续获取任务
35 | ```
36 |
37 | ## 延迟任务 (Delay and Fire)
38 |
39 | 和上面的异步任务类似, 但是 publisher 可以指定任务在过了一段时间之后才能被 worker 发现.
40 |
41 |
42 |
43 | ### publisher
44 |
45 | ```
46 | # 发布任务, (如果任务失败, 最多重试两次, 延迟 30 秒)
47 | curl -X PUT -H "X-Token: xxx" "localhost:9999/api/test/q?tries=3&delay=30" -d "hey new delay job"
48 | ```
49 |
50 |
51 | ## "同步非阻塞" (Fire and Wait, 这其实是一种不是很"高效"的 RPC)
52 |
53 | publisher 发布任务后需要等待任务的返回结果. 这个使用的场景和使用 RPC 的场景类似, 不同的是, publisher 不需要知道
54 | 任何 worker 相关的信息, 也不需要检查 worker 的健康状态, worker 是可以水平扩展的.
55 |
56 |
57 |
58 | ### publisher
59 |
60 | ```
61 | # 发布任务, (如果任务失败, 最多重试两次)
62 | curl -X PUT -H "X-Token: xxx" "localhost:9999/api/test/q?tries=3" -d "hey new job"
63 |
64 | # 等待任务执行成功的通知, (其实就是等待一个临时的任务队列, 队列名字和发布的任务的 ID 相同)
65 | curl -H "X-Token: xxx" "localhost:9999/api/test/{job_id}"
66 | ```
67 |
68 | ### worker
69 |
70 | worker 的工作流循环:
71 |
72 | ```
73 | # 获取任务
74 | curl -H "X-Token: xxx" "localhost:9999/api/test/q?timeout=10" > job.json
75 |
76 | # 处理任务
77 | cat job.json
78 |
79 | # 通知 publisher 任务执行成功
80 | curl -X PUT -H "X-Token: xxx" "localhost:9999/api/test/{job_id}" -d "hey I'm done"
81 |
82 | # 删除完成的任务 (job_id 为获得到的任务的 ID)
83 | curl -X DELETE -H "X-Token: xxx" "localhost:9999/api/test/q/job/{job_id}"
84 |
85 | # 回到前面, 继续获取任务
86 | ```
87 |
88 | ## 优先级任务队列
89 |
90 | worker 可以一次监听多个任务队列, 并根据监听的顺序, 优先获取排在前面的任务.
91 |
92 | ### worker
93 |
94 | ```
95 | # 获取任务
96 | curl -H "X-Token: xxx" "localhost:9999/api/test/q1,q2,q3?timeout=10" > job.json
97 |
98 | # 处理任务
99 | cat job.json
100 |
101 | # 删除完成的任务 (job_id 为获得到的任务的 ID)
102 | curl -X DELETE -H "X-Token: xxx" "localhost:9999/api/test/q/job/{job_id}"
103 |
104 | # 回到前面, 继续获取任务
105 | ```
106 |
107 | 如上面的工作流, 如果有任务同时到达 q1 和 q2, 那 worker 会先获得 q1 队列中的任务.
108 |
--------------------------------------------------------------------------------
/engine/redis/info.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "strconv"
5 | "strings"
6 | "time"
7 | )
8 |
9 | type RedisInfo struct {
10 | MemUsed int64 // used_memory
11 | MemMax int64 // maxmemory
12 | NKeys int64 // total keys
13 | NExpires int64 // keys with TTL
14 | NClients int64 // connected_clients
15 | NBlocking int64 // blocked_clients
16 | }
17 |
18 | func GetRedisInfo(redis *RedisInstance) *RedisInfo {
19 | info := &RedisInfo{}
20 |
21 | memoryInfo, err := redis.Conn.Info(dummyCtx, "memory").Result()
22 | if err == nil {
23 | lines := strings.Split(memoryInfo, "\r\n")
24 | for _, l := range lines {
25 | k, v, _ := parseColonSeparatedKV(l)
26 | switch k {
27 | case "used_memory":
28 | info.MemUsed = v
29 | case "maxmemory":
30 | info.MemMax = v
31 | }
32 | }
33 | }
34 | keyInfo, err := redis.Conn.Info(dummyCtx, "keyspace").Result()
35 | if err == nil {
36 | lines := strings.Split(keyInfo, "\r\n")
37 | for _, l := range lines {
38 | splits := strings.SplitN(l, ":", 2)
39 | if len(splits) != 2 || splits[0] != "db0" {
40 | continue
41 | }
42 | splits2 := strings.SplitN(splits[1], ",", 3)
43 | for _, s := range splits2 {
44 | k, v, _ := parseEqualSeparatedKV(s)
45 | switch k {
46 | case "keys":
47 | info.NKeys = v
48 | case "expires":
49 | info.NExpires = v
50 | }
51 | }
52 | }
53 | }
54 | clientInfo, err := redis.Conn.Info(dummyCtx, "clients").Result()
55 | if err == nil {
56 | lines := strings.Split(clientInfo, "\r\n")
57 | for _, l := range lines {
58 | k, v, _ := parseColonSeparatedKV(l)
59 | switch k {
60 | case "connected_clients":
61 | info.NClients = v
62 | case "blocked_clients":
63 | info.NBlocking = v
64 | }
65 | }
66 | }
67 |
68 | return info
69 | }
70 |
71 | func parseColonSeparatedKV(str string) (key string, value int64, err error) {
72 | splits := strings.SplitN(str, ":", 2)
73 | if len(splits) == 2 {
74 | key = splits[0]
75 | value, err = strconv.ParseInt(splits[1], 10, 64)
76 | }
77 | return
78 | }
79 |
80 | func parseEqualSeparatedKV(str string) (key string, value int64, err error) {
81 | splits := strings.SplitN(str, "=", 2)
82 | if len(splits) == 2 {
83 | key = splits[0]
84 | value, err = strconv.ParseInt(splits[1], 10, 64)
85 | }
86 | return
87 | }
88 |
89 | func RedisInstanceMonitor(redis *RedisInstance) {
90 | for {
91 | time.Sleep(5 * time.Second)
92 | info := GetRedisInfo(redis)
93 |
94 | metrics.redisMaxMem.WithLabelValues(redis.Name).Set(float64(info.MemMax))
95 | metrics.redisMemUsed.WithLabelValues(redis.Name).Set(float64(info.MemUsed))
96 | metrics.redisConns.WithLabelValues(redis.Name).Set(float64(info.NClients))
97 | metrics.redisBlockings.WithLabelValues(redis.Name).Set(float64(info.NBlocking))
98 | metrics.redisKeys.WithLabelValues(redis.Name).Set(float64(info.NKeys))
99 | metrics.redisExpires.WithLabelValues(redis.Name).Set(float64(info.NExpires))
100 | }
101 | }
102 |
--------------------------------------------------------------------------------
/config/config.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "os"
7 |
8 | "github.com/BurntSushi/toml"
9 | "github.com/sirupsen/logrus"
10 | )
11 |
12 | const DefaultPoolName = "default"
13 |
14 | type Config struct {
15 | Host string
16 | Port int
17 | AdminHost string
18 | AdminPort int
19 | LogLevel string
20 | LogDir string
21 | LogFormat string
22 | Accounts map[string]string
23 | EnableAccessLog bool
24 | AdminRedis RedisConf
25 | Pool RedisPool
26 |
27 | // Default publish params
28 | TTLSecond int
29 | DelaySecond int
30 | TriesNum int
31 | // Default consume params
32 | TTRSecond int
33 | TimeoutSecond int
34 | }
35 |
36 | type RedisPool map[string]RedisConf
37 |
38 | type RedisConf struct {
39 | Addr string
40 | Password string
41 | DB int
42 | PoolSize int
43 | MigrateTo string // If this is not empty, all the PUBLISH will go to that pool
44 | MasterName string
45 | Version string
46 | SentinelPassword string
47 | }
48 |
49 | func (rc *RedisConf) validate() error {
50 | if rc.Addr == "" {
51 | return errors.New("the pool addr must not be empty")
52 | }
53 | if rc.DB < 0 {
54 | return errors.New("the pool db must be greater than 0 or equal to 0")
55 | }
56 | return nil
57 | }
58 |
59 | // IsSentinel return whether the pool was running in sentinel mode
60 | func (rc *RedisConf) IsSentinel() bool {
61 | return rc.MasterName != ""
62 | }
63 |
64 | // MustLoad load config file with specified path, an error returned if any condition not met
65 | func MustLoad(path string) (*Config, error) {
66 | _, err := os.Stat(path)
67 | if err != nil {
68 | return nil, fmt.Errorf("failed to stat config file: %s", err)
69 | }
70 | conf := new(Config)
71 | conf.LogLevel = "info"
72 | conf.AdminHost = "127.0.0.1"
73 |
74 | conf.TTLSecond = 24 * 60 * 60 // 1 day
75 | conf.DelaySecond = 0
76 | conf.TriesNum = 1
77 | conf.TTRSecond = 2 * 60 // 2 minutes
78 | conf.TimeoutSecond = 0 // means non-blocking
79 |
80 | if _, err := toml.DecodeFile(path, conf); err != nil {
81 | panic(err)
82 | }
83 |
84 | if conf.Host == "" {
85 | return nil, errors.New("invalid host")
86 | }
87 | if conf.Port == 0 {
88 | return nil, errors.New("invalid port")
89 | }
90 | if _, ok := conf.Pool[DefaultPoolName]; !ok {
91 | return nil, errors.New("default redis pool not found")
92 | }
93 | for name, poolConf := range conf.Pool {
94 | if err := poolConf.validate(); err != nil {
95 | return nil, fmt.Errorf("invalid config in pool(%s): %s", name, err)
96 | }
97 | }
98 | if err := conf.AdminRedis.validate(); err != nil {
99 | return nil, fmt.Errorf("invalid config in admin redis: %s", err)
100 | }
101 | if conf.AdminPort == 0 {
102 | return nil, errors.New("invalid admin port")
103 | }
104 |
105 | _, err = logrus.ParseLevel(conf.LogLevel)
106 | if err != nil {
107 | return nil, errors.New("invalid log level")
108 | }
109 | return conf, nil
110 | }
111 |
--------------------------------------------------------------------------------
/server/handlers/middleware.go:
--------------------------------------------------------------------------------
1 | package handlers
2 |
3 | import (
4 | "math"
5 | "net/http"
6 | "regexp"
7 | "strings"
8 |
9 | "github.com/bitleak/lmstfy/auth"
10 | "github.com/bitleak/lmstfy/config"
11 | "github.com/bitleak/lmstfy/engine"
12 | "github.com/gin-gonic/gin"
13 | )
14 |
15 | func getToken(c *gin.Context) (token string) {
16 | token = c.GetHeader("X-Token")
17 | if token == "" {
18 | token = c.Query("token")
19 | }
20 | return
21 | }
22 |
23 | // The user token's format is: [{pool}:]{token}
24 | // there is a optional pool prefix, if provided, use that pool; otherwise use the default pool.
25 | func parseToken(rawToken string) (pool, token string) {
26 | splits := strings.SplitN(rawToken, ":", 2)
27 | if len(splits) == 2 {
28 | return splits[0], splits[1]
29 | }
30 | return config.DefaultPoolName, rawToken
31 | }
32 |
33 | func SetupQueueEngine(c *gin.Context) {
34 | pool, token := parseToken(getToken(c))
35 | c.Set("pool", pool)
36 | c.Set("token", token)
37 | e := engine.GetEngine(pool)
38 | if e == nil {
39 | c.JSON(http.StatusNotFound, gin.H{"error": "pool not found"})
40 | c.Abort()
41 | return
42 | }
43 | c.Set("engine", e)
44 | }
45 |
46 | func ValidateToken(c *gin.Context) {
47 | tk := c.GetString("token")
48 | if tk == "" {
49 | c.JSON(http.StatusUnauthorized, gin.H{"error": "token not found"})
50 | c.Abort()
51 | return
52 | }
53 | tm := auth.GetTokenManager()
54 | ok, err := tm.Exist(c.GetString("pool"), c.Param("namespace"), tk)
55 | if err != nil {
56 | c.JSON(http.StatusInternalServerError, gin.H{"error": "internal error"})
57 | c.Abort()
58 | return
59 | }
60 | if !ok {
61 | c.JSON(http.StatusUnauthorized, gin.H{"error": "invalid token"})
62 | c.Abort()
63 | return
64 | }
65 | }
66 |
67 | var paramRegex = regexp.MustCompile("^[-_[:alnum:]]+$")
68 | var multiQueuesRegex = regexp.MustCompile("^[-_,[:alnum:]]+$")
69 |
70 | // Validate namespace and queue names don't contain any illegal characters
71 | func ValidateParams(c *gin.Context) {
72 | ns := c.Param("namespace")
73 | q := c.Param("queue")
74 | if len(ns) > math.MaxUint8 || len(q) > math.MaxUint8 {
75 | c.JSON(http.StatusBadRequest, gin.H{"error": "namespace or queue name too long"})
76 | c.Abort()
77 | return
78 | }
79 | if !paramRegex.MatchString(ns) {
80 | c.JSON(http.StatusBadRequest, gin.H{"error": "namespace name contains forbidden characters"})
81 | c.Abort()
82 | return
83 | }
84 | if strings.HasPrefix(q, "_") || !paramRegex.MatchString(q) {
85 | c.JSON(http.StatusBadRequest, gin.H{"error": "queue name contains forbidden characters"})
86 | c.Abort()
87 | return
88 | }
89 | }
90 |
91 | func ValidateMultiConsume(c *gin.Context) {
92 | ns := c.Param("namespace")
93 | q := c.Param("queue")
94 | if !paramRegex.MatchString(ns) {
95 | c.JSON(http.StatusBadRequest, gin.H{"error": "namespace name contains forbidden characters"})
96 | c.Abort()
97 | return
98 | }
99 | if !multiQueuesRegex.MatchString(q) {
100 | c.JSON(http.StatusBadRequest, gin.H{"error": "queue name contains forbidden characters"})
101 | c.Abort()
102 | return
103 | }
104 | }
105 |
--------------------------------------------------------------------------------
/.github/workflows/codeql.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | #
7 | # ******** NOTE ********
8 | # We have attempted to detect the languages in your repository. Please check
9 | # the `language` matrix defined below to confirm you have the correct set of
10 | # supported CodeQL languages.
11 | #
12 | name: "CodeQL"
13 |
14 | on:
15 | push:
16 | branches: [ "master" ]
17 | pull_request:
18 | # The branches below must be a subset of the branches above
19 | branches: [ "master" ]
20 | schedule:
21 | - cron: '26 17 * * 6'
22 |
23 | jobs:
24 | analyze:
25 | name: Analyze
26 | runs-on: ubuntu-latest
27 | permissions:
28 | actions: read
29 | contents: read
30 | security-events: write
31 |
32 | strategy:
33 | fail-fast: false
34 | matrix:
35 | language: [ 'go' ]
36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
37 | # Use only 'java' to analyze code written in Java, Kotlin or both
38 | # Use only 'javascript' to analyze code written in JavaScript, TypeScript or both
39 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
40 |
41 | steps:
42 | - name: Checkout repository
43 | uses: actions/checkout@v3
44 |
45 | # Initializes the CodeQL tools for scanning.
46 | - name: Initialize CodeQL
47 | uses: github/codeql-action/init@v2
48 | with:
49 | languages: ${{ matrix.language }}
50 | # If you wish to specify custom queries, you can do so here or in a config file.
51 | # By default, queries listed here will override any specified in a config file.
52 | # Prefix the list here with "+" to use these queries and those in the config file.
53 |
54 | # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
55 | # queries: security-extended,security-and-quality
56 |
57 |
58 | # Autobuild attempts to build any compiled languages (C/C++, C#, Go, or Java).
59 | # If this step fails, then you should remove it and run the build manually (see below)
60 | - name: Autobuild
61 | uses: github/codeql-action/autobuild@v2
62 |
63 | # ℹ️ Command-line programs to run using the OS shell.
64 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
65 |
66 | # If the Autobuild fails above, remove it and uncomment the following three lines.
67 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
68 |
69 | # - run: |
70 | # echo "Run, Build Application using script"
71 | # ./location_of_script_within_repo/buildscript.sh
72 |
73 | - name: Perform CodeQL Analysis
74 | uses: github/codeql-action/analyze@v2
75 | with:
76 | category: "/language:${{matrix.language}}"
77 |
--------------------------------------------------------------------------------
/server/route.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/gin-gonic/gin"
5 | "github.com/sirupsen/logrus"
6 | "net/http"
7 |
8 | "github.com/bitleak/lmstfy/server/handlers"
9 | )
10 |
11 | func SetupRoutes(e *gin.Engine, logger *logrus.Logger, devMode bool) {
12 | handlers.Setup(logger)
13 | group := e.Group("/api")
14 | group.Use(handlers.ValidateParams, handlers.SetupQueueEngine)
15 | if !devMode {
16 | group.Use(handlers.ValidateToken)
17 | }
18 | group.PUT("/:namespace/:queue", handlers.Throttle(handlers.ThrottleActionProduce), handlers.Publish)
19 | group.PUT("/:namespace/:queue/bulk", handlers.Throttle(handlers.ThrottleActionProduce), handlers.PublishBulk)
20 | group.PUT("/:namespace/:queue/job/:job_id", handlers.Publish)
21 | group.GET("/:namespace/:queue/peek", handlers.PeekQueue)
22 | group.GET("/:namespace/:queue/job/:job_id", handlers.PeekJob)
23 | group.DELETE("/:namespace/:queue/job/:job_id", handlers.Delete)
24 | group.DELETE("/:namespace/:queue", handlers.DestroyQueue)
25 | // Consume API is special, it accepts the url like `/api/namespace/q1,q2,q3`,
26 | // while all other APIs forbid.
27 | group2 := e.Group("/api")
28 | group2.Use(handlers.ValidateMultiConsume, handlers.SetupQueueEngine)
29 | if !devMode {
30 | group2.Use(handlers.ValidateToken)
31 | }
32 | // NOTE: the route should be named /:namespace/:queues, but gin http-router reports conflict
33 | // when mixing /:queue and /:queues together, :(
34 | group2.GET("/:namespace/:queue", handlers.Throttle(handlers.ThrottleActionConsume), handlers.Consume)
35 |
36 | // Dead letter
37 | group.GET("/:namespace/:queue/deadletter", handlers.PeekDeadLetter)
38 | group.PUT("/:namespace/:queue/deadletter", handlers.RespawnDeadLetter)
39 | group.DELETE("/:namespace/:queue/deadletter", handlers.DeleteDeadLetter)
40 |
41 | // Public API group
42 | pubGroup := e.Group("/api")
43 | pubGroup.Use(handlers.ValidateParams, handlers.SetupQueueEngine)
44 | pubGroup.GET("/:namespace/:queue/size", handlers.Size)
45 | pubGroup.GET("/:namespace/:queue/deadletter/size", handlers.GetDeadLetterSize)
46 |
47 | e.NoRoute(func(c *gin.Context) {
48 | c.JSON(http.StatusNotFound, gin.H{"error": "api not found"})
49 | })
50 | }
51 |
52 | func SetupAdminRoutes(e *gin.Engine, accounts gin.Accounts) {
53 | basicAuthMiddleware := func(c *gin.Context) { c.Next() }
54 | if len(accounts) > 0 {
55 | basicAuthMiddleware = gin.BasicAuth(accounts)
56 | }
57 |
58 | e.GET("/info", handlers.EngineMetaInfo)
59 | e.GET("/version", handlers.Version)
60 | e.GET("/metrics", handlers.PrometheusMetrics)
61 | e.GET("/pools", handlers.ListPools)
62 |
63 | // token's limit URI
64 | e.GET("/limits", basicAuthMiddleware, handlers.ListLimiters)
65 |
66 | tokenGroup := e.Group("/token")
67 | {
68 | tokenGroup.Use(basicAuthMiddleware)
69 | tokenGroup.GET("/:namespace", handlers.ListTokens)
70 | tokenGroup.POST("/:namespace", handlers.NewToken)
71 | tokenGroup.DELETE("/:namespace/:token", handlers.DeleteToken)
72 | tokenGroup.GET("/:namespace/:token/limit", handlers.GetLimiter)
73 | tokenGroup.POST("/:namespace/:token/limit", handlers.AddLimiter)
74 | tokenGroup.PUT("/:namespace/:token/limit", handlers.SetLimiter)
75 | tokenGroup.DELETE("/:namespace/:token/limit", handlers.DeleteLimiter)
76 | }
77 |
78 | e.Any("/debug/pprof/*profile", handlers.PProf)
79 | e.GET("/accesslog", handlers.GetAccessLogStatus)
80 | e.POST("/accesslog", basicAuthMiddleware, handlers.UpdateAccessLogStatus)
81 | }
82 |
--------------------------------------------------------------------------------
/engine/job.go:
--------------------------------------------------------------------------------
1 | package engine
2 |
3 | import (
4 | "encoding"
5 | "encoding/json"
6 |
7 | "github.com/bitleak/lmstfy/uuid"
8 | )
9 |
10 | type Job interface {
11 | Namespace() string
12 | Queue() string
13 | ID() string
14 | Body() []byte
15 | TTL() uint32
16 | Delay() uint32
17 | Tries() uint16
18 | ElapsedMS() int64
19 | Attributes() map[string]string
20 |
21 | encoding.TextMarshaler
22 | }
23 |
24 | type jobImpl struct {
25 | namespace string
26 | queue string
27 | id string
28 | body []byte
29 | ttl uint32
30 | delay uint32
31 | tries uint16
32 | attributes map[string]string
33 |
34 | _elapsedMS int64
35 | }
36 |
37 | // NOTE: there is a trick in this factory, the delay is embedded in the jobID.
38 | // By doing this we can delete the job that's located in hourly AOF, by placing
39 | // a tombstone record in that AOF.
40 | func NewJob(namespace, queue string, body []byte, attributes map[string]string, ttl, delay uint32, tries uint16, jobID string) Job {
41 | if jobID == "" {
42 | jobID = uuid.GenJobIDWithVersion(0, delay)
43 | }
44 | return &jobImpl{
45 | namespace: namespace,
46 | queue: queue,
47 | id: jobID,
48 | body: body,
49 | ttl: ttl,
50 | delay: delay,
51 | tries: tries,
52 | attributes: attributes,
53 | }
54 | }
55 |
56 | func NewJobWithID(namespace, queue string, body []byte, attributes map[string]string, ttl uint32, tries uint16, jobID string) Job {
57 | delay, _ := uuid.ExtractDelaySecondFromUniqueID(jobID)
58 | return &jobImpl{
59 | namespace: namespace,
60 | queue: queue,
61 | id: jobID,
62 | body: body,
63 | ttl: ttl,
64 | delay: delay,
65 | tries: tries,
66 | attributes: attributes,
67 | }
68 | }
69 |
70 | func (j *jobImpl) Namespace() string {
71 | return j.namespace
72 | }
73 |
74 | func (j *jobImpl) Queue() string {
75 | return j.queue
76 | }
77 |
78 | func (j *jobImpl) ID() string {
79 | return j.id
80 | }
81 |
82 | func (j *jobImpl) Body() []byte {
83 | return j.body
84 | }
85 |
86 | func (j *jobImpl) TTL() uint32 {
87 | return j.ttl
88 | }
89 |
90 | func (j *jobImpl) Delay() uint32 {
91 | return j.delay
92 | }
93 |
94 | func (j *jobImpl) Tries() uint16 {
95 | return j.tries
96 | }
97 |
98 | func (j *jobImpl) ElapsedMS() int64 {
99 | if j._elapsedMS != 0 {
100 | return j._elapsedMS
101 | }
102 | ms, _ := uuid.ElapsedMilliSecondFromUniqueID(j.id)
103 | j._elapsedMS = ms
104 | return ms
105 | }
106 |
107 | func (j *jobImpl) Attributes() map[string]string {
108 | return j.attributes
109 | }
110 |
111 | func (j *jobImpl) MarshalText() (text []byte, err error) {
112 | var job struct {
113 | Namespace string `json:"namespace"`
114 | Queue string `json:"queue"`
115 | ID string `json:"id"`
116 | TTL uint32 `json:"ttl"`
117 | ElapsedMS int64 `json:"elapsed_ms"`
118 | Body []byte `json:"body"`
119 | Attributes map[string]string `json:"attributes,omitempty"`
120 | }
121 | job.Namespace = j.namespace
122 | job.Queue = j.queue
123 | job.ID = j.id
124 | job.TTL = j.ttl
125 | job.ElapsedMS = j._elapsedMS
126 | job.Body = j.body
127 | job.Attributes = j.attributes
128 | return json.Marshal(job)
129 | }
130 |
131 | func (j *jobImpl) GetDelayHour() uint16 {
132 | return 0
133 | }
134 |
--------------------------------------------------------------------------------
/engine/redis/meta.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "sync"
5 |
6 | "github.com/sirupsen/logrus"
7 | )
8 |
9 | /**
10 | Record meta info passively. meta info includes:
11 | - namespaces list
12 | - queue list of namespace
13 | */
14 |
15 | type MetaManager struct {
16 | redis *RedisInstance
17 | nsCache map[string]bool // namespace => bool
18 | qCache map[string]bool // {namespace}+{queue} => bool
19 | rwmu sync.RWMutex
20 | }
21 |
22 | func NewMetaManager(redis *RedisInstance) *MetaManager {
23 | m := &MetaManager{
24 | redis: redis,
25 | nsCache: make(map[string]bool),
26 | qCache: make(map[string]bool),
27 | }
28 | go m.initialize()
29 | return m
30 | }
31 |
32 | func (m *MetaManager) RecordIfNotExist(namespace, queue string) {
33 | m.rwmu.RLock()
34 | if m.nsCache[namespace] && m.qCache[join(namespace, queue)] {
35 | m.rwmu.RUnlock()
36 | return
37 | }
38 | m.rwmu.RUnlock()
39 |
40 | m.rwmu.Lock()
41 | if m.nsCache[namespace] {
42 | m.qCache[join(namespace, queue)] = true
43 | m.rwmu.Unlock()
44 | m.redis.Conn.HSet(dummyCtx, join(MetaPrefix, "ns", namespace), queue, 1)
45 | } else {
46 | m.nsCache[namespace] = true
47 | m.qCache[join(namespace, queue)] = true
48 | m.rwmu.Unlock()
49 | m.redis.Conn.HSet(dummyCtx, join(MetaPrefix, "ns"), namespace, 1)
50 | m.redis.Conn.HSet(dummyCtx, join(MetaPrefix, "ns", namespace), queue, 1)
51 | }
52 | }
53 |
54 | func (m *MetaManager) Remove(namespace, queue string) {
55 | m.rwmu.Lock()
56 | delete(m.nsCache, namespace)
57 | delete(m.qCache, join(namespace, queue))
58 | m.rwmu.Unlock()
59 | m.redis.Conn.HDel(dummyCtx, join(MetaPrefix, "ns", namespace), queue)
60 | }
61 |
62 | func (m *MetaManager) ListNamespaces() (namespaces []string, err error) {
63 | val, err := m.redis.Conn.HGetAll(dummyCtx, join(MetaPrefix, "ns")).Result()
64 | if err != nil {
65 | return nil, err
66 | }
67 | for k := range val {
68 | namespaces = append(namespaces, k)
69 | }
70 | return namespaces, nil
71 | }
72 |
73 | func (m *MetaManager) ListQueues(namespace string) (queues []string, err error) {
74 | val, err := m.redis.Conn.HGetAll(dummyCtx, join(MetaPrefix, "ns", namespace)).Result()
75 | if err != nil {
76 | return nil, err
77 | }
78 | for k := range val {
79 | queues = append(queues, k)
80 | }
81 | return queues, nil
82 | }
83 |
84 | func (m *MetaManager) initialize() {
85 | namespaces, err := m.ListNamespaces()
86 | if err != nil {
87 | logger.WithField("error", err).Error("initialize meta manager list namespaces error")
88 | return
89 | }
90 | for _, n := range namespaces {
91 | queues, err := m.ListQueues(n)
92 | if err != nil {
93 | logger.WithFields(logrus.Fields{
94 | "namespace": n,
95 | "error": err,
96 | }).Error("initialize meta manager list queues error")
97 | return
98 | }
99 | for _, q := range queues {
100 | m.rwmu.Lock()
101 | m.nsCache[n] = true
102 | m.qCache[join(n, q)] = true
103 | m.rwmu.Unlock()
104 | }
105 | }
106 | }
107 |
108 | func (m *MetaManager) Dump() (map[string][]string, error) {
109 | data := make(map[string][]string)
110 | namespaces, err := m.ListNamespaces()
111 | if err != nil {
112 | return nil, err
113 | }
114 | for _, n := range namespaces {
115 | queues, err := m.ListQueues(n)
116 | if err != nil {
117 | return nil, err
118 | }
119 | data[n] = queues
120 | }
121 | return data, nil
122 | }
123 |
--------------------------------------------------------------------------------
/engine/redis/pool.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "encoding/json"
5 | "errors"
6 | "time"
7 |
8 | go_redis "github.com/go-redis/redis/v8"
9 |
10 | "github.com/bitleak/lmstfy/engine"
11 | "github.com/bitleak/lmstfy/uuid"
12 | )
13 |
14 | type JobPayload struct {
15 | Body []byte `json:"body"`
16 | Attributes map[string]string `json:"attributes,omitempty"`
17 | }
18 |
19 | // Pool stores all the jobs' data. this is a global singleton per engine
20 | // note: this `Pool` is NOT the same terminology as the EnginePool
21 | type Pool struct {
22 | redis *RedisInstance
23 | }
24 |
25 | func NewPool(redis *RedisInstance) *Pool {
26 | return &Pool{
27 | redis: redis,
28 | }
29 | }
30 |
31 | func PoolJobKey(j engine.Job) string {
32 | return join(PoolPrefix, j.Namespace(), j.Queue(), j.ID())
33 | }
34 |
35 | func PoolJobKey2(namespace, queue, jobID string) string {
36 | return join(PoolPrefix, namespace, queue, jobID)
37 | }
38 |
39 | func PoolJobKeyPrefix(namespace, queue string) string {
40 | return join(PoolPrefix, namespace, queue)
41 | }
42 |
43 | func (p *Pool) Add(j engine.Job) (err error) {
44 | metrics.poolAddJobs.WithLabelValues(p.redis.Name).Inc()
45 |
46 | // For the version 0(legacy) jobID, the payload is the body directly,
47 | // for the version 1 jobID, the payload is a JSON string contains the body.
48 | payload := j.Body()
49 | if uuid.ExtractJobIDVersion(j.ID()) != 0 {
50 | payload, err = json.Marshal(JobPayload{Body: j.Body(), Attributes: j.Attributes()})
51 | if err != nil {
52 | return err
53 | }
54 | }
55 |
56 | // SetNX return OK(true) if key didn't exist before.
57 | ok, err := p.redis.Conn.SetNX(dummyCtx, PoolJobKey(j), payload, time.Duration(j.TTL())*time.Second).Result()
58 | if err != nil {
59 | // Just retry once.
60 | ok, err = p.redis.Conn.SetNX(dummyCtx, PoolJobKey(j), payload, time.Duration(j.TTL())*time.Second).Result()
61 | }
62 | if err != nil {
63 | return err
64 | }
65 | if !ok {
66 | return errors.New("key existed") // Key existed before, avoid overwriting it, so return error
67 | }
68 | return err
69 | }
70 |
71 | func (p *Pool) Get(namespace, queue, jobID string) (*JobPayload, uint32, error) {
72 | pipeline := p.redis.Conn.Pipeline()
73 | jobKey := join(PoolPrefix, namespace, queue, jobID)
74 | getCmd := pipeline.Get(dummyCtx, jobKey)
75 | ttlCmd := pipeline.TTL(dummyCtx, jobKey)
76 | _, err := pipeline.Exec(dummyCtx)
77 | if err != nil {
78 | if errors.Is(err, go_redis.Nil) {
79 | return nil, 0, engine.ErrNotFound
80 | }
81 | return nil, 0, err
82 | }
83 |
84 | val := []byte(getCmd.Val())
85 | ttl := int64(ttlCmd.Val().Seconds())
86 | if ttl < 0 {
87 | // Use `0` to identify indefinite TTL, NOTE: in redis ttl=0 is possible when
88 | // the key is not recycled fast enough. but here is okay we use `0` to identify
89 | // indefinite TTL, because we issue GET cmd before TTL cmd, so the ttl must be > 0,
90 | // OR GET cmd would fail.
91 | ttl = 0
92 | }
93 | metrics.poolGetJobs.WithLabelValues(p.redis.Name).Inc()
94 |
95 | if uuid.ExtractJobIDVersion(jobID) == 0 {
96 | // For the version 0(legacy) jobID, the val only contains the body,
97 | // so we need to return the val as body directly.
98 | return &JobPayload{Body: val}, uint32(ttl), nil
99 | }
100 |
101 | // For the version 1 jobID, the value is encoded as a JSON string,
102 | // need to unmarshal it before return.
103 | var payload JobPayload
104 | if err := json.Unmarshal(val, &payload); err != nil {
105 | return nil, 0, err
106 | }
107 | return &payload, uint32(ttl), nil
108 | }
109 |
110 | func (p *Pool) Delete(namespace, queue, jobID string) error {
111 | metrics.poolDeleteJobs.WithLabelValues(p.redis.Name).Inc()
112 | return p.redis.Conn.Del(dummyCtx, join(PoolPrefix, namespace, queue, jobID)).Err()
113 | }
114 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/bitleak/lmstfy
2 |
3 | go 1.17
4 |
5 | require (
6 | github.com/BurntSushi/toml v0.3.1
7 | github.com/gin-gonic/gin v1.6.3
8 | github.com/go-redis/redis/v8 v8.11.4
9 | github.com/magiconair/properties v1.8.5
10 | github.com/mitchellh/go-homedir v1.1.0
11 | github.com/oklog/ulid v1.3.1
12 | github.com/orlangure/gnomock v0.18.1
13 | github.com/prometheus/client_golang v1.12.1
14 | github.com/sirupsen/logrus v1.8.1
15 | github.com/spf13/cobra v1.0.0
16 | github.com/spf13/viper v1.9.0
17 | github.com/stretchr/testify v1.7.1
18 | go.uber.org/automaxprocs v1.5.1
19 | google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44 // indirect
20 | google.golang.org/grpc v1.42.0 // indirect
21 | )
22 |
23 | require (
24 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
25 | github.com/Microsoft/go-winio v0.5.0 // indirect
26 | github.com/beorn7/perks v1.0.1 // indirect
27 | github.com/cespare/xxhash/v2 v2.1.2 // indirect
28 | github.com/containerd/containerd v1.5.7 // indirect
29 | github.com/davecgh/go-spew v1.1.1 // indirect
30 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
31 | github.com/docker/distribution v2.7.1+incompatible // indirect
32 | github.com/docker/docker v20.10.10+incompatible // indirect
33 | github.com/docker/go-connections v0.4.0 // indirect
34 | github.com/docker/go-units v0.4.0 // indirect
35 | github.com/fsnotify/fsnotify v1.5.1 // indirect
36 | github.com/gin-contrib/sse v0.1.0 // indirect
37 | github.com/go-playground/locales v0.14.0 // indirect
38 | github.com/go-playground/universal-translator v0.18.0 // indirect
39 | github.com/go-playground/validator/v10 v10.8.0 // indirect
40 | github.com/go-redis/redis/v7 v7.4.1 // indirect
41 | github.com/gogo/protobuf v1.3.2 // indirect
42 | github.com/golang/protobuf v1.5.2 // indirect
43 | github.com/google/go-cmp v0.5.7 // indirect
44 | github.com/google/uuid v1.3.0 // indirect
45 | github.com/hashicorp/hcl v1.0.0 // indirect
46 | github.com/inconshreveable/mousetrap v1.0.0 // indirect
47 | github.com/json-iterator/go v1.1.12 // indirect
48 | github.com/kr/text v0.2.0 // indirect
49 | github.com/leodido/go-urn v1.2.1 // indirect
50 | github.com/mattn/go-isatty v0.0.12 // indirect
51 | github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
52 | github.com/mitchellh/mapstructure v1.4.2 // indirect
53 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
54 | github.com/modern-go/reflect2 v1.0.2 // indirect
55 | github.com/opencontainers/go-digest v1.0.0 // indirect
56 | github.com/opencontainers/image-spec v1.0.1 // indirect
57 | github.com/pelletier/go-toml v1.9.4 // indirect
58 | github.com/pkg/errors v0.9.1 // indirect
59 | github.com/pmezard/go-difflib v1.0.0 // indirect
60 | github.com/prometheus/client_model v0.2.0 // indirect
61 | github.com/prometheus/common v0.32.1 // indirect
62 | github.com/prometheus/procfs v0.7.3 // indirect
63 | github.com/spf13/afero v1.6.0 // indirect
64 | github.com/spf13/cast v1.4.1 // indirect
65 | github.com/spf13/jwalterweatherman v1.1.0 // indirect
66 | github.com/spf13/pflag v1.0.5 // indirect
67 | github.com/subosito/gotenv v1.2.0 // indirect
68 | github.com/ugorji/go/codec v1.1.7 // indirect
69 | go.uber.org/atomic v1.9.0 // indirect
70 | go.uber.org/multierr v1.7.0 // indirect
71 | go.uber.org/zap v1.19.1 // indirect
72 | golang.org/x/crypto v0.1.0 // indirect
73 | golang.org/x/net v0.1.0 // indirect
74 | golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
75 | golang.org/x/sys v0.1.0 // indirect
76 | golang.org/x/text v0.4.0 // indirect
77 | google.golang.org/protobuf v1.27.1 // indirect
78 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
79 | gopkg.in/ini.v1 v1.63.2 // indirect
80 | gopkg.in/yaml.v2 v2.4.0 // indirect
81 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
82 | )
83 |
--------------------------------------------------------------------------------
/engine/redis/timer_test.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 | "time"
7 |
8 | "github.com/sirupsen/logrus"
9 |
10 | "github.com/bitleak/lmstfy/engine"
11 | )
12 |
13 | func TestTimer_Add(t *testing.T) {
14 | timer, err := NewTimer("timer_set_1", R, time.Second)
15 | if err != nil {
16 | panic(fmt.Sprintf("Failed to new timer: %s", err))
17 | }
18 | job := engine.NewJob("ns-timer", "q1", []byte("hello msg 1"), nil, 10, 0, 1, "")
19 | if err = timer.Add(job.Namespace(), job.Queue(), job.ID(), 10, 1); err != nil {
20 | t.Errorf("Failed to add job to timer: %s", err)
21 | }
22 | }
23 |
24 | func TestTimer_Tick(t *testing.T) {
25 | timer, err := NewTimer("timer_set_2", R, time.Second)
26 | if err != nil {
27 | panic(fmt.Sprintf("Failed to new timer: %s", err))
28 | }
29 | defer timer.Shutdown()
30 | job := engine.NewJob("ns-timer", "q2", []byte("hello msg 2"), nil, 5, 0, 1, "")
31 | pool := NewPool(R)
32 | pool.Add(job)
33 | timer.Add(job.Namespace(), job.Queue(), job.ID(), 3, 1)
34 | errChan := make(chan error, 1)
35 | go func() {
36 | var err error = nil
37 | defer func() {
38 | // BRPop could panic
39 | if r := recover(); r != nil {
40 | err = fmt.Errorf("recover with panic %v", r)
41 | }
42 | errChan <- err
43 | }()
44 | val, err := R.Conn.BRPop(dummyCtx, 5*time.Second, join(QueuePrefix, "ns-timer", "q2")).Result()
45 | if err != nil || len(val) == 0 {
46 | err = fmt.Errorf("Failed to pop the job from target queue")
47 | return
48 | }
49 | tries, jobID, err := structUnpack(val[1])
50 | if err != nil {
51 | err = fmt.Errorf("Failed to decode the job pop from queue")
52 | return
53 | }
54 | if tries != 1 || jobID != job.ID() {
55 | err = fmt.Errorf("Job data mismatched")
56 | return
57 | }
58 | }()
59 | err = <-errChan
60 | if err != nil {
61 | t.Error(err)
62 | }
63 | }
64 |
65 | func BenchmarkTimer(b *testing.B) {
66 | // Disable logging temporarily
67 | logger.SetLevel(logrus.ErrorLevel)
68 | defer logger.SetLevel(logrus.DebugLevel)
69 |
70 | t, err := NewTimer("timer_set_3", R, time.Second)
71 | if err != nil {
72 | panic(fmt.Sprintf("Failed to new timer: %s", err))
73 | }
74 | defer t.Shutdown()
75 | b.Run("Add", benchmarkTimer_Add(t))
76 |
77 | b.Run("Pop", benchmarkTimer_Pop(t))
78 | }
79 |
80 | func benchmarkTimer_Add(timer *Timer) func(b *testing.B) {
81 | pool := NewPool(R)
82 | return func(b *testing.B) {
83 | for i := 0; i < b.N; i++ {
84 | job := engine.NewJob("ns-timer", "q3", []byte("hello msg 1"), nil, 100, 0, 1, "")
85 | pool.Add(job)
86 | timer.Add(job.Namespace(), job.Queue(), job.ID(), 1, 1)
87 | }
88 | }
89 | }
90 |
91 | func benchmarkTimer_Pop(timer *Timer) func(b *testing.B) {
92 | return func(b *testing.B) {
93 | key := join(QueuePrefix, "ns-timer", "q3")
94 | b.StopTimer()
95 | pool := NewPool(R)
96 | for i := 0; i < b.N; i++ {
97 | job := engine.NewJob("ns-timer", "q3", []byte("hello msg 1"), nil, 100, 0, 1, "")
98 | pool.Add(job)
99 | timer.Add(job.Namespace(), job.Queue(), job.ID(), 1, 1)
100 | }
101 | b.StartTimer()
102 | for i := 0; i < b.N; i++ {
103 | R.Conn.BRPop(dummyCtx, 5*time.Second, key)
104 | }
105 | }
106 | }
107 |
108 | // How long did it take to fire 10000 due jobs
109 | func BenchmarkTimer_Pump(b *testing.B) {
110 | // Disable logging temporarily
111 | logger.SetLevel(logrus.ErrorLevel)
112 | defer logger.SetLevel(logrus.DebugLevel)
113 |
114 | b.StopTimer()
115 |
116 | pool := NewPool(R)
117 | timer, err := NewTimer("timer_set_4", R, time.Second)
118 | if err != nil {
119 | panic(fmt.Sprintf("Failed to new timer: %s", err))
120 | }
121 | timer.Shutdown()
122 | for i := 0; i < 10000; i++ {
123 | job := engine.NewJob("ns-timer", "q4", []byte("hello msg 1"), nil, 100, 0, 1, "")
124 | pool.Add(job)
125 | timer.Add(job.Namespace(), job.Queue(), job.ID(), 1, 1)
126 | }
127 |
128 | b.StartTimer()
129 | timer.pump(time.Now().Unix() + 1)
130 | }
131 |
--------------------------------------------------------------------------------
/engine/redis/deadletter_test.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 | "time"
7 |
8 | "github.com/bitleak/lmstfy/engine"
9 | )
10 |
11 | func TestDeadLetter_Add(t *testing.T) {
12 | dl, _ := NewDeadLetter("ns-dead", "q0", R)
13 | if err := dl.Add("x"); err != nil {
14 |
15 | }
16 | }
17 |
18 | func TestDeadLetter_Peek(t *testing.T) {
19 | dl, _ := NewDeadLetter("ns-dead", "q1", R)
20 | dl.Add("x")
21 | dl.Add("y")
22 | dl.Add("z")
23 |
24 | size, jobID, err := dl.Peek()
25 | if err != nil {
26 | t.Fatalf("Failed to peek deadletter: %s", err)
27 | }
28 | if size != 3 || jobID != "x" {
29 | t.Fatal("Mismatched job")
30 | }
31 | }
32 |
33 | func TestDeadLetter_Delete(t *testing.T) {
34 | dl, _ := NewDeadLetter("ns-dead", "q2", R)
35 | dl.Add("x")
36 | dl.Add("y")
37 | dl.Add("z")
38 |
39 | count, err := dl.Delete(2)
40 | if err != nil || count != 2 {
41 | t.Fatalf("Failed to delete two jobs from deadletter")
42 | }
43 | size, jobID, _ := dl.Peek()
44 | if size != 1 || jobID != "z" {
45 | t.Fatal("Expected two jobs in deadletter")
46 | }
47 |
48 | count, err = dl.Delete(1)
49 | if err != nil || count != 1 {
50 | t.Fatalf("Failed to delete job from deadletter")
51 | }
52 | size, jobID, _ = dl.Peek()
53 | if size != 0 {
54 | t.Fatal("Expected no job in deadletter")
55 | }
56 | }
57 |
58 | func TestDeadLetter_Respawn(t *testing.T) {
59 | p := NewPool(R)
60 | job1 := engine.NewJob("ns-dead", "q3", []byte("1"), nil, 60, 0, 1, "")
61 | job2 := engine.NewJob("ns-dead", "q3", []byte("2"), nil, 60, 0, 1, "")
62 | job3 := engine.NewJob("ns-dead", "q3", []byte("3"), nil, 60, 0, 1, "")
63 | p.Add(job1)
64 | p.Add(job2)
65 | p.Add(job3)
66 | dl, _ := NewDeadLetter("ns-dead", "q3", R)
67 | dl.Add(job1.ID())
68 | dl.Add(job2.ID())
69 | dl.Add(job3.ID())
70 |
71 | // Ensure TTL is removed when put into deadletter
72 | job1Key := PoolJobKey(job1)
73 | job1TTL := R.Conn.TTL(dummyCtx, job1Key).Val()
74 | if job1TTL.Seconds() > 0 {
75 | t.Fatalf("Respawned job's TTL should be removed")
76 | }
77 |
78 | timer, err := NewTimer("ns-dead", R, time.Second)
79 | if err != nil {
80 | panic(fmt.Sprintf("Failed to new timer: %s", err))
81 | }
82 | defer timer.Shutdown()
83 | q := NewQueue("ns-dead", "q3", R, timer)
84 |
85 | count, err := dl.Respawn(2, 10)
86 | if err != nil || count != 2 {
87 | t.Fatalf("Failed to respawn two jobs: %s", err)
88 | }
89 | jobID, _, err := q.Poll(1, 1)
90 | if err != nil || jobID != job1.ID() {
91 | t.Fatal("Expected to poll the first job respawned from deadletter")
92 | }
93 | // Ensure TTL is set
94 | job1Key = PoolJobKey(job1)
95 | job1TTL = R.Conn.TTL(dummyCtx, job1Key).Val()
96 | if 10-job1TTL.Seconds() > 2 { // 2 seconds passed? no way.
97 | t.Fatal("Deadletter job's TTL is not correct")
98 | }
99 | q.Poll(1, 1) // rm job2
100 |
101 | count, err = dl.Respawn(1, 10)
102 | if err != nil || count != 1 {
103 | t.Fatalf("Failed to respawn one jobs: %s", err)
104 | }
105 | jobID, _, err = q.Poll(1, 1)
106 | if err != nil || jobID != job3.ID() {
107 | t.Fatal("Expected to poll the second job respawned from deadletter")
108 | }
109 |
110 | // Ensure TTL is set
111 | job2Key := PoolJobKey(job2)
112 | job2TTL := R.Conn.TTL(dummyCtx, job2Key).Val()
113 | if 10-job2TTL.Seconds() > 2 {
114 | t.Fatal("Deadletter job's TTL is not correct")
115 | }
116 | }
117 |
118 | func TestDeadLetter_Size(t *testing.T) {
119 | p := NewPool(R)
120 | dl, _ := NewDeadLetter("ns-dead", "q3", R)
121 | cnt := 3
122 | for i := 0; i < cnt; i++ {
123 | job := engine.NewJob("ns-dead", "q3", []byte("1"), nil, 60, 0, 1, "")
124 | p.Add(job)
125 | dl.Add(job.ID())
126 | }
127 | size, _ := dl.Size()
128 | if size != int64(cnt) {
129 | t.Fatalf("Expected the deadletter queue size is: %d, but got %d\n", cnt, size)
130 | }
131 | dl.Delete(3)
132 | size, _ = dl.Size()
133 | if size != 0 {
134 | t.Fatalf("Expected the deadletter queue size is: %d, but got %d\n", 0, size)
135 | }
136 | }
137 |
--------------------------------------------------------------------------------
/server/handlers/throttler_test.go:
--------------------------------------------------------------------------------
1 | package handlers_test
2 |
3 | import (
4 | "fmt"
5 | "net/http"
6 | "net/url"
7 | "strings"
8 | "testing"
9 | "time"
10 |
11 | "github.com/bitleak/lmstfy/auth"
12 | "github.com/bitleak/lmstfy/server/handlers"
13 | "github.com/bitleak/lmstfy/uuid"
14 | )
15 |
16 | func publishWithThrottler(namespace, queue, token string) error {
17 | query := url.Values{}
18 | query.Add("delay", "0")
19 | query.Add("ttl", "10")
20 | query.Add("tries", "100")
21 | query.Add("token", token)
22 | targetURL := fmt.Sprintf("http://localhost/api/%s/%s?%s", namespace, queue, query.Encode())
23 | body := strings.NewReader("hello msg")
24 | req, err := http.NewRequest("PUT", targetURL, body)
25 | if err != nil {
26 | return fmt.Errorf("create request err: %s", err.Error())
27 | }
28 | c, e, resp := ginTest(req)
29 | e.Use(handlers.ValidateParams, handlers.SetupQueueEngine, handlers.Throttle(handlers.ThrottleActionProduce))
30 | e.PUT("/api/:namespace/:queue", handlers.Publish)
31 | e.HandleContext(c)
32 | if resp.Code != http.StatusCreated {
33 | return fmt.Errorf("publish code: %d, error: %s", resp.Code, resp.Body.String())
34 | }
35 | return nil
36 | }
37 |
38 | func consumeWithThrottler(namespace, queue, token string) error {
39 | query := url.Values{}
40 | query.Add("ttr", "10")
41 | query.Add("timeout", "2")
42 | query.Add("token", token)
43 | targetURL := fmt.Sprintf("http://localhost/api/%s/%s?%s", namespace, queue, query.Encode())
44 | req, err := http.NewRequest("GET", targetURL, nil)
45 | if err != nil {
46 | return fmt.Errorf("create request err: %s", err.Error())
47 | }
48 | c, e, resp := ginTest(req)
49 | e.Use(handlers.ValidateParams, handlers.SetupQueueEngine, handlers.Throttle(handlers.ThrottleActionConsume))
50 | e.GET("/api/:namespace/:queue", handlers.Consume)
51 | e.HandleContext(c)
52 | if resp.Code != http.StatusOK {
53 | return fmt.Errorf("consume code: %d, error: %s", resp.Code, resp.Body.String())
54 | }
55 | return nil
56 | }
57 |
58 | func TestThrottlePublish(t *testing.T) {
59 | tk := auth.GetTokenManager()
60 | namespace := "ns-throttler"
61 | queue := "q1"
62 | token, _ := tk.New("", namespace, uuid.GenUniqueID(), "test publish throttler")
63 | limitStr := "{\"read\": 0, \"write\": 3, \"interval\":1}"
64 | if err := addTokenLimit(namespace, token, limitStr); err != nil {
65 | t.Fatal(err)
66 | }
67 | for i := 0; i < 4; i++ {
68 | err := publishWithThrottler(namespace, queue, token)
69 | if i != 3 {
70 | if err != nil {
71 | t.Fatalf("Publish the test job, err: %s", err.Error())
72 | }
73 | } else {
74 | if err == nil {
75 | t.Fatalf("Publish test job reached limit error was expected")
76 | }
77 | }
78 | }
79 | time.Sleep(2 * time.Second)
80 | // retry after interval
81 | err := publishWithThrottler(namespace, queue, token)
82 | if err != nil {
83 | t.Fatalf("Publish the test job, err: %s", err.Error())
84 | }
85 | }
86 |
87 | func TestThrottleConsume(t *testing.T) {
88 | tk := auth.GetTokenManager()
89 | namespace := "ns-throttler"
90 | queue := "q2"
91 | token, _ := tk.New("", namespace, uuid.GenUniqueID(), "test publish throttler")
92 | limitStr := "{\"read\": 3, \"write\": 100, \"interval\":1}"
93 | if err := addTokenLimit(namespace, token, limitStr); err != nil {
94 | t.Fatal(err)
95 | }
96 | for i := 0; i < 10; i++ {
97 | err := publishWithThrottler(namespace, queue, token)
98 | if err != nil {
99 | t.Fatalf("Publish test job, err: %s", err.Error())
100 | }
101 | }
102 | for i := 0; i < 4; i++ {
103 | err := consumeWithThrottler(namespace, queue, token)
104 | if i != 3 {
105 | if err != nil {
106 | t.Fatalf("Consume the test job, err: %s", err.Error())
107 | }
108 | } else {
109 | if err == nil {
110 | t.Fatalf("Consume test job reached limit error was expected")
111 | }
112 | }
113 | }
114 | // retry after interval
115 | time.Sleep(2 * time.Second)
116 | err := consumeWithThrottler(namespace, queue, token)
117 | if err != nil {
118 | t.Fatalf("Consume the test job, err: %s", err.Error())
119 | }
120 | }
121 |
--------------------------------------------------------------------------------
/engine/migration/engine.go:
--------------------------------------------------------------------------------
1 | package migration
2 |
3 | import (
4 | "io"
5 |
6 | "github.com/bitleak/lmstfy/engine"
7 | )
8 |
9 | type Engine struct {
10 | oldEngine engine.Engine
11 | newEngine engine.Engine
12 | }
13 |
14 | func NewEngine(old, new engine.Engine) engine.Engine {
15 | return &Engine{
16 | oldEngine: old,
17 | newEngine: new,
18 | }
19 | }
20 |
21 | func (e *Engine) Publish(job engine.Job) (jobID string, err error) {
22 | return e.newEngine.Publish(job)
23 | }
24 |
25 | // BatchConsume consume some jobs of a queue
26 | func (e *Engine) BatchConsume(namespace string, queues []string, count, ttrSecond, timeoutSecond uint32) (jobs []engine.Job, err error) {
27 | jobs, err = e.oldEngine.BatchConsume(namespace, queues, count, ttrSecond, 0)
28 | if len(jobs) != 0 {
29 | return // During migration, we always prefer the old engine's data as we need to drain it
30 | }
31 | return e.newEngine.BatchConsume(namespace, queues, count, ttrSecond, timeoutSecond)
32 | }
33 |
34 | func (e *Engine) Consume(namespace string, queues []string, ttrSecond, timeoutSecond uint32) (job engine.Job, err error) {
35 | job, err = e.oldEngine.Consume(namespace, queues, ttrSecond, 0)
36 | if job != nil {
37 | return // During migration, we always prefer the old engine's data as we need to drain it
38 | }
39 | return e.newEngine.Consume(namespace, queues, ttrSecond, timeoutSecond)
40 | }
41 |
42 | func (e *Engine) Delete(namespace, queue, jobID string) error {
43 | err := e.oldEngine.Delete(namespace, queue, jobID)
44 | if err != nil {
45 | return err
46 | }
47 | return e.newEngine.Delete(namespace, queue, jobID)
48 | }
49 |
50 | func (e *Engine) Peek(namespace, queue, optionalJobID string) (job engine.Job, err error) {
51 | job, err = e.oldEngine.Peek(namespace, queue, optionalJobID)
52 | if job != nil {
53 | return
54 | }
55 | return e.newEngine.Peek(namespace, queue, optionalJobID)
56 | }
57 |
58 | func (e *Engine) Size(namespace, queue string) (size int64, err error) {
59 | size1, err := e.oldEngine.Size(namespace, queue)
60 | if err != nil {
61 | return 0, err
62 | }
63 | size2, err := e.newEngine.Size(namespace, queue)
64 | return size1 + size2, err
65 | }
66 |
67 | func (e *Engine) Destroy(namespace, queue string) (count int64, err error) {
68 | count1, err := e.oldEngine.Destroy(namespace, queue)
69 | if err != nil {
70 | return 0, err
71 | }
72 | count2, err := e.newEngine.Destroy(namespace, queue)
73 | return count1 + count2, err
74 | }
75 |
76 | func (e *Engine) PeekDeadLetter(namespace, queue string) (size int64, jobID string, err error) {
77 | size1, jobID1, err := e.oldEngine.PeekDeadLetter(namespace, queue)
78 | if err != nil && err != engine.ErrNotFound {
79 | return 0, "", err
80 | }
81 | size2, jobID2, err := e.newEngine.PeekDeadLetter(namespace, queue)
82 | if err != nil {
83 | return 0, "", err
84 | }
85 | if size1 == 0 {
86 | return size2, jobID2, nil
87 | } else {
88 | return size1 + size2, jobID1, nil // If both engines has deadletter, return the old engine's job ID
89 | }
90 | }
91 |
92 | func (e *Engine) DeleteDeadLetter(namespace, queue string, limit int64) (count int64, err error) {
93 | count1, err := e.oldEngine.DeleteDeadLetter(namespace, queue, limit)
94 | if err != nil {
95 | return 0, err
96 | }
97 | count2, err := e.newEngine.DeleteDeadLetter(namespace, queue, limit-count1)
98 | return count1 + count2, err
99 | }
100 |
101 | func (e *Engine) RespawnDeadLetter(namespace, queue string, limit, ttlSecond int64) (count int64, err error) {
102 | count1, err := e.oldEngine.RespawnDeadLetter(namespace, queue, limit, ttlSecond)
103 | if err != nil {
104 | return 0, err
105 | }
106 | count2, err := e.newEngine.RespawnDeadLetter(namespace, queue, limit-count1, ttlSecond)
107 | return count1 + count2, err
108 | }
109 |
110 | // SizeOfDeadLetter return the queue size of dead letter
111 | func (e *Engine) SizeOfDeadLetter(namespace, queue string) (size int64, err error) {
112 | size1, err := e.oldEngine.SizeOfDeadLetter(namespace, queue)
113 | if err != nil {
114 | return 0, err
115 | }
116 | size2, err := e.newEngine.SizeOfDeadLetter(namespace, queue)
117 | return size1 + size2, err
118 | }
119 |
120 | func (e *Engine) Shutdown() {
121 | e.oldEngine.Shutdown()
122 | e.newEngine.Shutdown()
123 | }
124 |
125 | func (e *Engine) DumpInfo(output io.Writer) error {
126 | return e.newEngine.DumpInfo(output)
127 | }
128 |
--------------------------------------------------------------------------------
/auth/token.go:
--------------------------------------------------------------------------------
1 | package auth
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "strings"
7 | "sync"
8 |
9 | "github.com/go-redis/redis/v8"
10 |
11 | "github.com/bitleak/lmstfy/config"
12 | "github.com/bitleak/lmstfy/engine"
13 | "github.com/bitleak/lmstfy/helper"
14 | )
15 |
16 | var (
17 | dummyCtx = context.TODO()
18 | )
19 |
20 | const TokenPrefix = "tk"
21 |
22 | var ErrPoolNotExist error = errors.New("the pool was not exists")
23 |
24 | // ErrTokenExist means the user-defined has already existed
25 | var ErrTokenExist error = errors.New("the token has already existed")
26 |
27 | type TokenManager struct {
28 | cli *redis.Client
29 | cache map[string]bool // Caching {pool+namespace+token} => bool
30 | rwmu sync.RWMutex
31 | }
32 |
33 | func tokenKey(pool, namespace string) string {
34 | if pool == "" {
35 | pool = config.DefaultPoolName
36 | }
37 |
38 | var b strings.Builder
39 | b.Grow(len(TokenPrefix) + len(pool) + len(namespace) + 2)
40 | b.WriteString(TokenPrefix)
41 | b.WriteString("/")
42 | b.WriteString(pool)
43 | b.WriteString("/")
44 | b.WriteString(namespace)
45 | return b.String()
46 | }
47 |
48 | func cacheKey(pool, namespace, token string) string {
49 | if pool == "" {
50 | pool = config.DefaultPoolName
51 | }
52 |
53 | var b strings.Builder
54 | b.Grow(len(pool) + len(namespace) + len(token))
55 | b.WriteString(pool)
56 | b.WriteString(namespace)
57 | b.WriteString(token)
58 | return b.String()
59 | }
60 |
61 | func NewTokenManager(cli *redis.Client) *TokenManager {
62 | return &TokenManager{
63 | cli: cli,
64 | cache: make(map[string]bool),
65 | }
66 | }
67 |
68 | func (tm *TokenManager) isDefaultPool(pool string) bool {
69 | return pool == "" || pool == config.DefaultPoolName
70 | }
71 |
72 | // New would create the token in pool
73 | func (tm *TokenManager) New(pool, namespace, token, description string) (string, error) {
74 | if exists := engine.ExistsPool(pool); !exists {
75 | return "", ErrPoolNotExist
76 | }
77 | ok, err := tm.cli.HSetNX(dummyCtx, tokenKey(pool, namespace), token, description).Result()
78 | if err != nil {
79 | return "", err
80 | }
81 | if !ok {
82 | return "", ErrTokenExist
83 | }
84 | tm.rwmu.Lock()
85 | tm.cache[cacheKey(pool, namespace, token)] = true
86 | tm.rwmu.Unlock()
87 | if tm.isDefaultPool(pool) {
88 | return token, nil
89 | }
90 | return pool + ":" + token, nil
91 | }
92 |
93 | func (tm *TokenManager) Exist(pool, namespace, token string) (exist bool, err error) {
94 | if exists := engine.ExistsPool(pool); !exists {
95 | return false, ErrPoolNotExist
96 | }
97 | tm.rwmu.RLock()
98 | if tm.cache[cacheKey(pool, namespace, token)] {
99 | tm.rwmu.RUnlock()
100 | return true, nil
101 | }
102 | tm.rwmu.RUnlock()
103 | exist, err = tm.cli.HExists(dummyCtx, tokenKey(pool, namespace), token).Result()
104 | if err == nil && exist {
105 | tm.rwmu.Lock()
106 | tm.cache[cacheKey(pool, namespace, token)] = true
107 | tm.rwmu.Unlock()
108 | }
109 | return exist, err
110 | }
111 |
112 | func (tm *TokenManager) Delete(pool, namespace, token string) error {
113 | if exists := engine.ExistsPool(pool); !exists {
114 | return ErrPoolNotExist
115 | }
116 | tm.rwmu.Lock()
117 | delete(tm.cache, cacheKey(pool, namespace, token))
118 | tm.rwmu.Unlock()
119 | return tm.cli.HDel(dummyCtx, tokenKey(pool, namespace), token).Err()
120 | }
121 |
122 | func (tm *TokenManager) List(pool, namespace string) (tokens map[string]string, err error) {
123 | if exists := engine.ExistsPool(pool); !exists {
124 | return nil, ErrPoolNotExist
125 | }
126 | val, err := tm.cli.HGetAll(dummyCtx, tokenKey(pool, namespace)).Result()
127 | if err != nil {
128 | return nil, err
129 | }
130 | if pool == "" { // Default pool
131 | return val, nil
132 | }
133 | tokens = make(map[string]string)
134 | for k, v := range val {
135 | tokens[pool+":"+k] = v
136 | }
137 | return tokens, nil
138 | }
139 |
140 | var _tokenManager *TokenManager
141 |
142 | // Setup config auth redis client and token manager
143 | func Setup(conf *config.Config) error {
144 | redisConf := conf.AdminRedis
145 | cli := helper.NewRedisClient(&redisConf, nil)
146 | if cli.Ping(dummyCtx).Err() != nil {
147 | return errors.New("can not connect to admin redis")
148 | }
149 | _tokenManager = NewTokenManager(cli)
150 | return nil
151 | }
152 |
153 | func GetTokenManager() *TokenManager {
154 | return _tokenManager
155 | }
156 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # LMSTFY(Let Me Schedule Task For You)
4 | [](https://github.com/bitleak/lmstfy/actions) [](https://goreportcard.com/report/github.com/bitleak/lmstfy) [](https://codecov.io/gh/bitleak/lmstfy) [](https://github.com/bitleak/lmstfy/releases) [](https://github.com/bitleak/lmstfy/releases) [](https://github.com/bitleak/lmstfy/blob/master/LICENSE) [](https://godoc.org/github.com/bitleak/lmstfy)
5 |
6 | lmstfy(pronounce /'lam.si.fai/) is a simple task queue (or job queue) service based on the Redis storage, providing the following features:
7 |
8 | - basic job queue primitives: PUBLISH, CONSUME and DELETE via HTTP API
9 | - support extra lifecycle management of jobs:
10 | * job TTL (time-to-live)
11 | * job delay trigger (at second granularity)
12 | * job auto-retry
13 | * dead letter
14 | - namespace/queue level metrics
15 | - token consume/produce rate limit
16 |
17 | lmstfy itself doesn't handle data storage, it delegates the storage to the `Redis` or `Redis Sentinel` currently (a file based
18 | storage backend is under implementing). So data integrity and durability is in the hand of redis,
19 | we use AOF and replication on our production env to ensure that.
20 |
21 | ## Playing with Lmstfy
22 |
23 | If you just want to have a try, the `docker-compose` was highly recommended but DON'T use it in production.
24 | We would use docker-compose to setup and play with Lmstfy.
25 |
26 | * Running the Redis and Lmstfy server
27 | ```shell
28 | # run the lmstfy and redis, server would listen on localhost:7777
29 | # and admin port on localhost:7778.
30 |
31 | % cd docker && docker-compose -p test-lmstfy up -d
32 | ```
33 | * Create a new namespace and token
34 | ```shell
35 | % curl -XPOST -d "description=test namesapce" "http://127.0.0.1:7778/token/test-ns"
36 | ```
37 |
38 | * Publish a new message
39 | ```shell
40 | # The queue would be dynamic created, so feel free to publish the message to any queues.
41 | # Below http request would create a job with delay = 1s, ttl = 3600s and tries = 16.
42 |
43 | % curl -XPUT -H "X-token:{ENTER YOUR TOKEN}" "http://127.0.0.1:7777/api/test-ns/test-queue?delay=1&ttl=3600&tries=16"
44 | ```
45 |
46 | * Consume a job from the queue
47 |
48 | ```shell
49 | % curl -H "X-token:{ENTER YOUR TOKEN}" "http://127.0.0.1:7777/api/test-ns/test-queue?ttr=30&timeout=2"
50 | ```
51 |
52 | * ACK the job
53 | ```shell
54 | % curl -i -XDELETE -H "X-token:{ENTER YOUR TOKEN}" "http://127.0.0.1:7777/api/test-ns/test-queue/job/{YOUR JOB ID}"
55 | ```
56 |
57 | ## Building Lmstfy
58 |
59 | It is as simple as:
60 |
61 | ```shell
62 | % make
63 | ```
64 |
65 | The application binary would be generated at `_build` dir, you can run it on the `Running Lmstfy` section.
66 |
67 | ## Running Lmstfy
68 |
69 | You must setup the Redis first and configure it in the lmstfy config file before running:
70 |
71 | ```
72 | _build/lmstfy-server -c config/demo-conf.toml
73 | ```
74 |
75 | ## Internal
76 |
77 | Detailed internal implementation looks like:
78 |
79 | 
80 |
81 | ## Client drivers
82 |
83 | * [Go](https://github.com/bitleak/lmstfy/tree/master/client) (The most stable and widely used)
84 | * [PHP](https://github.com/bitleak/php-lmstfy-client)
85 | * [Java](https://github.com/bitleak/java-lmstfy-client)
86 | * [Rust](https://github.com/bitleak/rust-lmstfy-client)
87 |
88 | ## Documentation
89 |
90 | * [HTTP API](https://github.com/bitleak/lmstfy/blob/master/doc/API.md)
91 | * [Administration API](https://github.com/bitleak/lmstfy/blob/master/doc/administration.en.md)
92 | * [Throttler API](https://github.com/bitleak/lmstfy/blob/master/doc/throttler.en.md)
93 | * [Administration API Chinese](https://github.com/bitleak/lmstfy/blob/master/doc/administration.cn.md)
94 | * [Throttler API Chinese](https://github.com/bitleak/lmstfy/blob/master/doc/throttler.cn.md)
95 | * [Grafana](https://grafana.com/grafana/dashboards/12748)
96 |
97 | ## License
98 | LMSTFY is under the MIT license. See the [LICENSE](https://github.com/bitleak/lmstfy/blob/master/LICENSE) file for details.
99 |
--------------------------------------------------------------------------------
/engine/redis/timer.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "encoding/binary"
5 | "time"
6 |
7 | "github.com/go-redis/redis/v8"
8 | )
9 |
10 | const (
11 | luaPumpQueueScript = `
12 | local zset_key = KEYS[1]
13 | local output_queue_prefix = KEYS[2]
14 | local pool_prefix = KEYS[3]
15 | local output_deadletter_prefix = KEYS[4]
16 | local now = ARGV[1]
17 | local limit = ARGV[2]
18 |
19 | local expiredMembers = redis.call("ZRANGEBYSCORE", zset_key, 0, now, "LIMIT", 0, limit)
20 |
21 | if #expiredMembers == 0 then
22 | return 0
23 | end
24 |
25 | for _,v in ipairs(expiredMembers) do
26 | local ns, q, tries, job_id = struct.unpack("Hc0Hc0HHc0", v)
27 | if redis.call("EXISTS", table.concat({pool_prefix, ns, q, job_id}, "/")) > 0 then
28 | -- only pump job to ready queue/dead letter if the job did not expire
29 | if tries == 0 then
30 | -- no more tries, move to dead letter
31 | local val = struct.pack("HHc0", 1, #job_id, job_id)
32 | redis.call("PERSIST", table.concat({pool_prefix, ns, q, job_id}, "/")) -- remove ttl
33 | redis.call("LPUSH", table.concat({output_deadletter_prefix, ns, q}, "/"), val)
34 | else
35 | -- move to ready queue
36 | local val = struct.pack("HHc0", tonumber(tries), #job_id, job_id)
37 | redis.call("LPUSH", table.concat({output_queue_prefix, ns, q}, "/"), val)
38 | end
39 | end
40 | end
41 | redis.call("ZREM", zset_key, unpack(expiredMembers))
42 | return #expiredMembers
43 | `
44 | )
45 |
46 | // Timer is the other way of saying "delay queue". timer kick jobs into ready queue when
47 | // it's ready.
48 | type Timer struct {
49 | name string
50 | redis *RedisInstance
51 | interval time.Duration
52 | shutdown chan struct{}
53 |
54 | pumpSHA string
55 | }
56 |
57 | // NewTimer return an instance of delay queue
58 | func NewTimer(name string, redis *RedisInstance, interval time.Duration) (*Timer, error) {
59 | timer := &Timer{
60 | name: name,
61 | redis: redis,
62 | interval: interval,
63 | shutdown: make(chan struct{}),
64 | }
65 |
66 | // Preload the lua scripts
67 | sha, err := redis.Conn.ScriptLoad(dummyCtx, luaPumpQueueScript).Result()
68 | if err != nil {
69 | logger.WithField("err", err).Error("Failed to preload lua script in timer")
70 | return nil, err
71 | }
72 | timer.pumpSHA = sha
73 |
74 | go timer.tick()
75 | return timer, nil
76 | }
77 |
78 | func (t *Timer) Name() string {
79 | return t.name
80 | }
81 |
82 | func (t *Timer) Add(namespace, queue, jobID string, delaySecond uint32, tries uint16) error {
83 | metrics.timerAddJobs.WithLabelValues(t.redis.Name).Inc()
84 | timestamp := time.Now().Unix() + int64(delaySecond)
85 |
86 | // struct-pack the data in the format `Hc0Hc0HHc0`:
87 | // {namespace len}{namespace}{queue len}{queue}{tries}{jobID len}{jobID}
88 | // length are 2-byte uint16 in little-endian
89 | namespaceLen := len(namespace)
90 | queueLen := len(queue)
91 | jobIDLen := len(jobID)
92 | buf := make([]byte, 2+namespaceLen+2+queueLen+2+2+jobIDLen)
93 | binary.LittleEndian.PutUint16(buf[0:], uint16(namespaceLen))
94 | copy(buf[2:], namespace)
95 | binary.LittleEndian.PutUint16(buf[2+namespaceLen:], uint16(queueLen))
96 | copy(buf[2+namespaceLen+2:], queue)
97 | binary.LittleEndian.PutUint16(buf[2+namespaceLen+2+queueLen:], uint16(tries))
98 | binary.LittleEndian.PutUint16(buf[2+namespaceLen+2+queueLen+2:], uint16(jobIDLen))
99 | copy(buf[2+namespaceLen+2+queueLen+2+2:], jobID)
100 |
101 | return t.redis.Conn.ZAdd(dummyCtx, t.Name(), &redis.Z{Score: float64(timestamp), Member: buf}).Err()
102 | }
103 |
104 | // Tick pump all due jobs to the target queue
105 | func (t *Timer) tick() {
106 | tick := time.NewTicker(t.interval)
107 | for {
108 | select {
109 | case now := <-tick.C:
110 | currentSecond := now.Unix()
111 | t.pump(currentSecond)
112 | case <-t.shutdown:
113 | return
114 | }
115 | }
116 | }
117 |
118 | func (t *Timer) pump(currentSecond int64) {
119 | for {
120 | val, err := t.redis.Conn.EvalSha(dummyCtx, t.pumpSHA, []string{t.Name(), QueuePrefix, PoolPrefix, DeadLetterPrefix}, currentSecond, BatchSize).Result()
121 | if err != nil {
122 | if isLuaScriptGone(err) { // when redis restart, the script needs to be uploaded again
123 | sha, err := t.redis.Conn.ScriptLoad(dummyCtx, luaPumpQueueScript).Result()
124 | if err != nil {
125 | logger.WithField("err", err).Error("Failed to reload script")
126 | time.Sleep(time.Second)
127 | return
128 | }
129 | t.pumpSHA = sha
130 | }
131 | logger.WithField("err", err).Error("Failed to pump")
132 | time.Sleep(time.Second)
133 | return
134 | }
135 | n, _ := val.(int64)
136 | logger.WithField("count", n).Debug("Due jobs")
137 | metrics.timerDueJobs.WithLabelValues(t.redis.Name).Add(float64(n))
138 | if n == BatchSize {
139 | // There might have more expired jobs to pump
140 | metrics.timerFullBatches.WithLabelValues(t.redis.Name).Inc()
141 | time.Sleep(10 * time.Millisecond) // Hurry up! accelerate pumping the due jobs
142 | continue
143 | }
144 | return
145 | }
146 | }
147 |
148 | func (t *Timer) Shutdown() {
149 | close(t.shutdown)
150 | }
151 |
152 | func (t *Timer) Size() (size int64, err error) {
153 | return t.redis.Conn.ZCard(dummyCtx, t.name).Result()
154 | }
155 |
--------------------------------------------------------------------------------
/engine/redis/queue_test.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "testing"
7 | "time"
8 |
9 | "github.com/go-redis/redis/v8"
10 |
11 | "github.com/bitleak/lmstfy/engine"
12 | )
13 |
14 | func TestQueue_Push(t *testing.T) {
15 | timer, err := NewTimer("timer_set_q", R, time.Second)
16 | if err != nil {
17 | panic(fmt.Sprintf("Failed to new timer: %s", err))
18 | }
19 | defer timer.Shutdown()
20 | q := NewQueue("ns-queue", "q1", R, timer)
21 | job := engine.NewJob("ns-queue", "q1", []byte("hello msg 1"), nil, 10, 0, 1, "")
22 | if err := q.Push(job, 5); err != nil {
23 | t.Fatalf("Failed to push job into queue: %s", err)
24 | }
25 |
26 | job2 := engine.NewJob("ns-queue", "q2", []byte("hello msg 1"), nil, 10, 0, 1, "")
27 | if err := q.Push(job2, 5); err != engine.ErrWrongQueue {
28 | t.Fatalf("Expected to get wrong queue error, but got: %s", err)
29 | }
30 | }
31 |
32 | func TestQueue_Poll(t *testing.T) {
33 | timer, err := NewTimer("timer_set_q", R, time.Second)
34 | if err != nil {
35 | panic(fmt.Sprintf("Failed to new timer: %s", err))
36 | }
37 | defer timer.Shutdown()
38 | q := NewQueue("ns-queue", "q2", R, timer)
39 | job := engine.NewJob("ns-queue", "q2", []byte("hello msg 2"), nil, 10, 0, 1, "")
40 | go func() {
41 | time.Sleep(time.Second)
42 | q.Push(job, 2)
43 | }()
44 | jobID, _, err := q.Poll(2, 1)
45 | if err != nil || jobID == "" {
46 | t.Fatalf("Failed to poll job from queue: %s", err)
47 | }
48 | if job.ID() != jobID {
49 | t.Fatal("Mismatched job")
50 | }
51 | }
52 |
53 | func TestQueue_Peek(t *testing.T) {
54 | timer, err := NewTimer("timer_set_q", R, time.Second)
55 | if err != nil {
56 | panic(fmt.Sprintf("Failed to new timer: %s", err))
57 | }
58 | defer timer.Shutdown()
59 | q := NewQueue("ns-queue", "q3", R, timer)
60 | job := engine.NewJob("ns-queue", "q3", []byte("hello msg 3"), nil, 10, 0, 1, "")
61 | q.Push(job, 2)
62 | jobID, tries, err := q.Peek()
63 | if err != nil || jobID == "" || tries != 2 {
64 | t.Fatalf("Failed to peek job from queue: %s", err)
65 | }
66 | if job.ID() != jobID {
67 | t.Fatal("Mismatched job")
68 | }
69 | }
70 |
71 | func TestQueue_Destroy(t *testing.T) {
72 | timer, err := NewTimer("timer_set_q", R, time.Second)
73 | if err != nil {
74 | panic(fmt.Sprintf("Failed to new timer: %s", err))
75 | }
76 | defer timer.Shutdown()
77 | q := NewQueue("ns-queue", "q4", R, timer)
78 | job := engine.NewJob("ns-queue", "q4", []byte("hello msg 4"), nil, 10, 0, 1, "")
79 | q.Push(job, 2)
80 | count, err := q.Destroy()
81 | if err != nil {
82 | t.Fatalf("Failed to destroy queue: %s", err)
83 | }
84 | if count != 1 {
85 | t.Fatalf("Mismatched deleted jobs count")
86 | }
87 | size, _ := q.Size()
88 | if size != 0 {
89 | t.Fatalf("Destroyed queue should be of size 0")
90 | }
91 | }
92 |
93 | func TestQueue_Tries(t *testing.T) {
94 | timer, err := NewTimer("timer_set_q", R, time.Second)
95 | if err != nil {
96 | panic(fmt.Sprintf("Failed to new timer: %s", err))
97 | }
98 | defer timer.Shutdown()
99 | namespace := "ns-queue"
100 | queue := "q5"
101 | q := NewQueue(namespace, queue, R, timer)
102 | var maxTries uint16 = 2
103 | job := engine.NewJob(namespace, queue, []byte("hello msg 5"), nil, 30, 0, maxTries, "")
104 | q.Push(job, maxTries)
105 | pool := NewPool(R)
106 | pool.Add(job)
107 | jobID, tries, err := q.Poll(2, 1)
108 | if err != nil || jobID == "" {
109 | t.Fatalf("Failed to poll job from queue: %s", err)
110 | }
111 | if tries != (maxTries - 1) {
112 | t.Fatalf("Expected to get tries 1 , but got " + strconv.Itoa(int(tries)))
113 | }
114 | if job.ID() != jobID {
115 | t.Fatal("Mismatched job")
116 | }
117 | jobID, tries, err = q.Poll(5, 1)
118 | if err != nil || jobID == "" {
119 | t.Fatalf("Failed to poll job from queue: %s", err)
120 | }
121 | if tries != (maxTries - 2) {
122 | t.Fatalf("Expected to get tries 0 , but got " + strconv.Itoa(int(tries)))
123 | }
124 | if job.ID() != jobID {
125 | t.Fatal("Mismatched job")
126 | }
127 | }
128 |
129 | func TestStructPacking(t *testing.T) {
130 | var tries uint16 = 23
131 | jobID := " a test ID#"
132 | data := structPack(tries, jobID)
133 | tries2, jobID2, err := structUnpack(data)
134 | if err != nil {
135 | t.Fatal("Failed to unpack")
136 | }
137 | if tries != tries2 || jobID != jobID2 {
138 | t.Fatal("Mismatched unpack data")
139 | }
140 | }
141 |
142 | func TestPopMultiQueues(t *testing.T) {
143 | namespace := "ns-queueName"
144 | queues := make([]QueueName, 3)
145 | queueNames := make([]string, 3)
146 | for i, queueName := range []string{"q6", "q7", "q8"} {
147 | queues[i] = QueueName{Namespace: namespace, Queue: queueName}
148 | queueNames[i] = queues[i].String()
149 | }
150 | gotQueueName, gotVal, err := popMultiQueues(R, queueNames)
151 | if err != redis.Nil {
152 | t.Fatalf("redis nil err was expected, but got %s", err.Error())
153 | }
154 | if gotQueueName != "" || gotVal != "" || err != redis.Nil {
155 | t.Fatal("queueName name and value should be empty")
156 | }
157 |
158 | queueName := "q7"
159 | q := NewQueue(namespace, queueName, R, nil)
160 | msg := "hello msg 7"
161 | job := engine.NewJob(namespace, queueName, []byte(msg), nil, 30, 0, 2, "")
162 | q.Push(job, 2)
163 | gotQueueName, gotVal, err = popMultiQueues(R, queueNames)
164 | if err != nil {
165 | t.Fatalf("redis nil err was expected, but got %s", err.Error())
166 | }
167 | if gotQueueName != q.Name() {
168 | t.Fatalf("invalid queueName name, %s was expected but got %s", q.Name(), gotQueueName)
169 | }
170 |
171 | // single queue condition
172 | queueName = "q8"
173 | job = engine.NewJob(namespace, queueName, []byte(msg), nil, 30, 0, 2, "")
174 | q = NewQueue(namespace, queueName, R, nil)
175 | q.Push(job, 2)
176 | gotQueueName, gotVal, err = popMultiQueues(R, []string{queueNames[2]})
177 | if err != nil {
178 | t.Fatalf("redis nil err was expected, but got %s", err.Error())
179 | }
180 | if gotQueueName != q.Name() {
181 | t.Fatalf("invalid queueName name, %s was expected but got %s", q.Name(), gotQueueName)
182 | }
183 | }
184 |
--------------------------------------------------------------------------------
/server/handlers/admin_test.go:
--------------------------------------------------------------------------------
1 | package handlers_test
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | "net/http"
8 | "net/url"
9 | "testing"
10 |
11 | "github.com/bitleak/lmstfy/auth"
12 | "github.com/bitleak/lmstfy/server/handlers"
13 | "github.com/bitleak/lmstfy/throttler"
14 | "github.com/bitleak/lmstfy/uuid"
15 | )
16 |
17 | func TestNewToken(t *testing.T) {
18 | query := url.Values{}
19 | query.Add("description", "test")
20 | targetUrl := fmt.Sprintf("http://localhost/token/ns-token?%s", query.Encode())
21 | req, err := http.NewRequest("POST", targetUrl, nil)
22 | if err != nil {
23 | t.Fatalf("Failed to create request")
24 | }
25 | c, e, resp := ginTest(req)
26 | e.POST("/token/:namespace", handlers.NewToken)
27 | e.HandleContext(c)
28 | if resp.Code != http.StatusCreated {
29 | t.Logf(resp.Body.String())
30 | t.Fatal("Failed to create new token")
31 | }
32 | }
33 |
34 | func TestListTokens(t *testing.T) {
35 | targetUrl := "http://localhost/token/ns-token"
36 | req, err := http.NewRequest("GET", targetUrl, nil)
37 | if err != nil {
38 | t.Fatalf("Failed to create request")
39 | }
40 | c, e, resp := ginTest(req)
41 | e.GET("/token/:namespace", handlers.ListTokens)
42 | e.HandleContext(c)
43 | if resp.Code != http.StatusOK {
44 | t.Logf(resp.Body.String())
45 | t.Fatal("Failed to list tokens")
46 | }
47 | }
48 |
49 | func TestDeleteToken(t *testing.T) {
50 | tk := auth.GetTokenManager()
51 | token, _ := tk.New("", "ns-token", uuid.GenUniqueID(), "to be deleted")
52 |
53 | targetUrl := fmt.Sprintf("http://localhost/token/ns-token/%s", token)
54 | req, err := http.NewRequest("DELETE", targetUrl, nil)
55 | if err != nil {
56 | t.Fatalf("Failed to create request")
57 | }
58 | c, e, resp := ginTest(req)
59 | e.DELETE("/token/:namespace/:token", handlers.DeleteToken)
60 | e.HandleContext(c)
61 | if resp.Code != http.StatusNoContent {
62 | t.Logf(resp.Body.String())
63 | t.Fatal("Failed to delete token")
64 | }
65 |
66 | ok, _ := tk.Exist("", "ns-token", token)
67 | if ok {
68 | t.Fatal("Expected token to be deleted")
69 | }
70 | }
71 |
72 | func getLimiter(namespace, token string) (*throttler.Limiter, error) {
73 | // force update limiters
74 | throttler.GetThrottler().GetAll(true)
75 | targetUrl := fmt.Sprintf("http://localhost/token/%s/%s/limit", namespace, token)
76 | req, err := http.NewRequest("GET", targetUrl, nil)
77 | if err != nil {
78 | return nil, fmt.Errorf("failed to create the request, err: %s", err.Error())
79 | }
80 | c, e, resp := ginTest(req)
81 | e.GET("/token/:namespace/:token/limit", handlers.GetLimiter)
82 | e.HandleContext(c)
83 | if resp.Code != http.StatusOK && resp.Code != http.StatusNotFound {
84 | return nil, fmt.Errorf("http code expected %d, but got %d", http.StatusOK, resp.Code)
85 | }
86 | if resp.Code == http.StatusOK {
87 | var limiter throttler.Limiter
88 | json.Unmarshal(resp.Body.Bytes(), &limiter)
89 | return &limiter, nil
90 | }
91 | return nil, nil
92 | }
93 |
94 | func addTokenLimit(namespace, token, limitStr string) error {
95 | targetUrl := fmt.Sprintf("http://localhost/token/%s/%s/limit", namespace, token)
96 | req, err := http.NewRequest("POST", targetUrl, bytes.NewReader([]byte(limitStr)))
97 | if err != nil {
98 | return fmt.Errorf("failed to create the request, err: %s", err.Error())
99 | }
100 | c, e, resp := ginTest(req)
101 | e.POST("/token/:namespace/:token/limit", handlers.AddLimiter)
102 | e.HandleContext(c)
103 | if resp.Code != http.StatusCreated {
104 | return fmt.Errorf("http code expected %d, but got %d", http.StatusOK, resp.Code)
105 | }
106 | return nil
107 | }
108 |
109 | func TestAddTokenLimiter(t *testing.T) {
110 | limitStr := "{\"read\": 100, \"write\": 100, \"interval\":100}"
111 | namespace := "ns-token"
112 | tk := auth.GetTokenManager()
113 | token, _ := tk.New("", "ns-token", uuid.GenUniqueID(), "token limiter")
114 | if err := addTokenLimit(namespace, token, limitStr); err != nil {
115 | t.Fatal(err)
116 | }
117 | limiter, err := getLimiter(namespace, token)
118 | if err != nil {
119 | t.Fatal(err.Error())
120 | }
121 | if limiter.Read != 100 && limiter.Write != 100 && limiter.Interval != 100 {
122 | t.Fatalf("Invaild limiter's value, %v", limiter)
123 | }
124 | if err := addTokenLimit(namespace, token, limitStr); err == nil {
125 | t.Fatal("duplicate token error was expected")
126 | }
127 | }
128 |
129 | func TestSetTokenLimiter(t *testing.T) {
130 | tk := auth.GetTokenManager()
131 | token, _ := tk.New("", "ns-token", uuid.GenUniqueID(), "token limiter")
132 | targetUrl := fmt.Sprintf("http://localhost/token/ns-token/%s/limit", token)
133 | limitStr := "{\"read\": 100, \"write\": 100, \"interval\":100}"
134 | req, err := http.NewRequest("PUT", targetUrl, bytes.NewReader([]byte(limitStr)))
135 | if err != nil {
136 | t.Fatalf("Failed to create the request, err: %s", err.Error())
137 | }
138 | c, e, resp := ginTest(req)
139 | e.PUT("/token/:namespace/:token/limit", handlers.SetLimiter)
140 | e.HandleContext(c)
141 | if resp.Code != http.StatusOK {
142 | t.Logf(resp.Body.String())
143 | t.Fatalf("Failed to add the limit to the token, err: %v", err)
144 | }
145 | }
146 |
147 | func TestDeleteTokenLimiter(t *testing.T) {
148 | limitStr := "{\"read\": 100, \"write\": 100, \"interval\":100}"
149 | namespace := "ns-token"
150 | tk := auth.GetTokenManager()
151 | token, _ := tk.New("", "ns-token", uuid.GenUniqueID(), "token limiter")
152 | if err := addTokenLimit(namespace, token, limitStr); err != nil {
153 | t.Fatal(err)
154 | }
155 |
156 | targetUrl := fmt.Sprintf("http://localhost/token/%s/%s/limit", namespace, token)
157 | req, err := http.NewRequest("DELETE", targetUrl, nil)
158 | if err != nil {
159 | t.Fatalf("Failed to create the request, err: %s", err.Error())
160 | }
161 | c, e, resp := ginTest(req)
162 | e.DELETE("/token/:namespace/:token/limit", handlers.DeleteLimiter)
163 | e.HandleContext(c)
164 | if resp.Code != http.StatusOK {
165 | t.Logf(resp.Body.String())
166 | t.Fatalf("Failed to add the limit to the token, err: %v", err)
167 | }
168 |
169 | limiter, err := getLimiter("ns-token", token)
170 | if err != nil {
171 | t.Fatal(err.Error())
172 | }
173 | if limiter != nil {
174 | t.Fatal("the token's limiter was expected to be deleted")
175 | }
176 | }
177 |
--------------------------------------------------------------------------------
/client/cmd/lmstfy/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "time"
7 |
8 | "github.com/bitleak/lmstfy/client"
9 | "github.com/mitchellh/go-homedir"
10 | "github.com/spf13/cobra"
11 | "github.com/spf13/viper"
12 | )
13 |
14 | var (
15 | cfgFile string
16 | lmstfyClient *client.LmstfyClient
17 | )
18 |
19 | func initLmstfyClient() {
20 | if cfgFile == "" {
21 | viper.SetConfigName(".lmstfy")
22 | home, err := homedir.Dir()
23 | if err != nil {
24 | fmt.Println("Failed to get home directory")
25 | os.Exit(1)
26 | }
27 | viper.AddConfigPath(home)
28 | } else {
29 | viper.SetConfigFile(cfgFile)
30 | }
31 | err := viper.ReadInConfig()
32 | if err != nil {
33 | fmt.Printf("Failed to load config: %s", err)
34 | os.Exit(1)
35 | }
36 | host := viper.GetString("host")
37 | port := viper.GetInt("port")
38 | namespace := viper.GetString("namespace")
39 | token := viper.GetString("token")
40 | lmstfyClient = client.NewLmstfyClient(host, port, namespace, token)
41 | }
42 |
43 | func main() {
44 | cobra.OnInitialize(initLmstfyClient)
45 |
46 | publishCmd := &cobra.Command{
47 | Use: "publish [queue] [job data]",
48 | Short: "publish a job to queue",
49 | Example: `publish test "hello world"`,
50 | Aliases: []string{"put", "pub"},
51 | Args: cobra.ExactArgs(2),
52 | Run: func(cmd *cobra.Command, args []string) {
53 | ttl, _ := cmd.Flags().GetUint32("ttl")
54 | tries, _ := cmd.Flags().GetUint16("tries")
55 | delay, _ := cmd.Flags().GetUint32("delay")
56 | jobID, err := lmstfyClient.Publish(args[0], []byte(args[1]), ttl, tries, delay)
57 | if err != nil {
58 | fmt.Printf("Failed: %s\n", err)
59 | } else {
60 | fmt.Printf("Job ID: %s\n", jobID)
61 | }
62 | },
63 | }
64 | publishCmd.Flags().Uint32P("ttl", "t", 0, "time-to-live in second, no TTL by default")
65 | publishCmd.Flags().Uint16P("tries", "r", 1, "number of tries")
66 | publishCmd.Flags().Uint32P("delay", "d", 0, "delay in second, no delay by default")
67 |
68 | consumeCmd := &cobra.Command{
69 | Use: "consume [queue]",
70 | Short: "consume a job from queue",
71 | Example: "consume test",
72 | Aliases: []string{"get", "con"},
73 | Args: cobra.ExactArgs(1),
74 | Run: func(cmd *cobra.Command, args []string) {
75 | ttr, _ := cmd.Flags().GetUint32("ttr")
76 | timeout, _ := cmd.Flags().GetUint32("timeout")
77 | job, err := lmstfyClient.Consume(args[0], ttr, timeout)
78 | if err != nil {
79 | fmt.Printf("Failed: %s\n", err)
80 | } else if job == nil {
81 | fmt.Println("No job available")
82 | } else {
83 | fmt.Printf("Job ID: %s\n", job.ID)
84 | fmt.Printf("Job data: %s\n", string(job.Data))
85 | fmt.Printf("* TTL: %s\n", time.Duration(job.TTL)*time.Second)
86 | fmt.Printf("* Elapsed: %s\n", time.Duration(job.ElapsedMS)*time.Millisecond)
87 | }
88 | },
89 | }
90 | consumeCmd.Flags().Uint32P("ttr", "t", 120, "time-to-run in second")
91 | consumeCmd.Flags().Uint32P("timeout", "w", 10, "blocking wait timeout in second")
92 |
93 | ackCmd := &cobra.Command{
94 | Use: "ack [queue] [job ID]",
95 | Short: "acknowledge the job, mark it as finished",
96 | Example: "ack test 01CG14G3JKF840QHZB6NR1NHVJ",
97 | Aliases: []string{"del"},
98 | Args: cobra.ExactArgs(2),
99 | Run: func(cmd *cobra.Command, args []string) {
100 | err := lmstfyClient.Ack(args[0], args[1])
101 | if err != nil {
102 | fmt.Printf("Failed: %s\n", err)
103 | } else {
104 | fmt.Println("ACK")
105 | }
106 | },
107 | }
108 |
109 | sizeCmd := &cobra.Command{
110 | Use: "size [queue]",
111 | Short: "get the queue size, and related deadletter size",
112 | Example: "size test",
113 | Aliases: []string{"len"},
114 | Args: cobra.ExactArgs(1),
115 | Run: func(cmd *cobra.Command, args []string) {
116 | qSize, err := lmstfyClient.QueueSize(args[0])
117 | if err != nil {
118 | fmt.Printf("Failed: %s\n", err)
119 | return
120 | }
121 | dSize, dHead, err := lmstfyClient.PeekDeadLetter(args[0])
122 | if err != nil {
123 | fmt.Printf("Failed: %s\n", err)
124 | return
125 | }
126 | fmt.Printf("Queue size: %d\n", qSize)
127 | fmt.Printf("DeadLetter size: %d\n", dSize)
128 | if dSize > 0 {
129 | fmt.Printf("DeadLetter head: %s\n", dHead)
130 | }
131 | },
132 | }
133 |
134 | peekCmd := &cobra.Command{
135 | Use: "peek [queue] [job ID]",
136 | Short: "peek a job without consuming it",
137 | Example: "peek test\npeek test 01CG14G3JKF840QHZB6NR1NHVJ",
138 | Args: cobra.RangeArgs(1, 2),
139 | Run: func(cmd *cobra.Command, args []string) {
140 | if len(args) == 1 {
141 | job, err := lmstfyClient.PeekQueue(args[0])
142 | if err != nil {
143 | fmt.Printf("Failed: %s\n", err)
144 | return
145 | }
146 | if job == nil {
147 | fmt.Printf("Not found\n")
148 | return
149 | }
150 | fmt.Printf("Job ID: %s\n", job.ID)
151 | fmt.Printf("Job data: %s\n", string(job.Data))
152 | fmt.Printf("* TTL: %s\n", time.Duration(job.TTL)*time.Second)
153 | fmt.Printf("* Elapsed: %s\n", time.Duration(job.ElapsedMS)*time.Millisecond)
154 | } else {
155 | job, err := lmstfyClient.PeekJob(args[0], args[1])
156 | if err != nil {
157 | fmt.Printf("Failed: %s\n", err)
158 | return
159 | }
160 | if job == nil {
161 | fmt.Printf("Not found\n")
162 | return
163 | }
164 | fmt.Printf("Job data: %s\n", string(job.Data))
165 | fmt.Printf("* TTL: %s\n", time.Duration(job.TTL)*time.Second)
166 | fmt.Printf("* Elapsed: %s\n", time.Duration(job.ElapsedMS)*time.Millisecond)
167 | }
168 | },
169 | }
170 |
171 | respawnCmd := &cobra.Command{
172 | Use: "respawn [queue]",
173 | Short: "respawn a job or all the jobs in the deadletter",
174 | Example: "respawn test",
175 | Aliases: []string{"kick"},
176 | Args: cobra.ExactArgs(1),
177 | Run: func(cmd *cobra.Command, args []string) {
178 | limit, _ := cmd.Flags().GetInt64("limit")
179 | ttl, _ := cmd.Flags().GetInt64("ttl")
180 | count, err := lmstfyClient.RespawnDeadLetter(args[0], limit, ttl)
181 | if err != nil {
182 | fmt.Printf("Failed: %s\n", err)
183 | } else {
184 | fmt.Printf("Respawn [%d] jobs", count)
185 | }
186 | },
187 | }
188 | respawnCmd.Flags().Int64P("limit", "l", 1, "upper limit of the number of dead jobs to be respawned")
189 | respawnCmd.Flags().Int64P("ttl", "t", 86400, "time-to-live in second, no TTL by default")
190 |
191 | rootCmd := &cobra.Command{Use: "lmstfy"}
192 | rootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file path")
193 |
194 | rootCmd.AddCommand(publishCmd, consumeCmd, ackCmd, sizeCmd, peekCmd, respawnCmd)
195 | _ = rootCmd.Execute()
196 | }
197 |
--------------------------------------------------------------------------------
/server/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "flag"
7 | "fmt"
8 | "io"
9 | "net/http"
10 | "os"
11 | "os/signal"
12 | "syscall"
13 |
14 | "github.com/gin-gonic/gin"
15 | "github.com/sirupsen/logrus"
16 | "go.uber.org/automaxprocs/maxprocs"
17 |
18 | "github.com/bitleak/lmstfy/auth"
19 | "github.com/bitleak/lmstfy/config"
20 | "github.com/bitleak/lmstfy/engine"
21 | "github.com/bitleak/lmstfy/engine/migration"
22 | redis_engine "github.com/bitleak/lmstfy/engine/redis"
23 | "github.com/bitleak/lmstfy/helper"
24 | "github.com/bitleak/lmstfy/log"
25 | "github.com/bitleak/lmstfy/server/handlers"
26 | "github.com/bitleak/lmstfy/server/middleware"
27 | "github.com/bitleak/lmstfy/throttler"
28 | "github.com/bitleak/lmstfy/version"
29 | )
30 |
31 | type optionFlags struct {
32 | ConfFile string
33 | PidFile string
34 | ShowVersion bool
35 | BackTrackLevel string
36 | SkipVerification bool
37 | }
38 |
39 | var (
40 | Flags optionFlags
41 | )
42 |
43 | func registerSignal(shutdown chan struct{}, logsReopenCallback func()) {
44 | c := make(chan os.Signal, 1)
45 | signal.Notify(c, []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGUSR1}...)
46 | go func() {
47 | for sig := range c {
48 | if handleSignals(sig, logsReopenCallback) {
49 | close(shutdown)
50 | return
51 | }
52 | }
53 | }()
54 | }
55 |
56 | func handleSignals(sig os.Signal, logsReopenCallback func()) (exitNow bool) {
57 | switch sig {
58 | case syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM:
59 | return true
60 | case syscall.SIGUSR1:
61 | logsReopenCallback()
62 | return false
63 | }
64 | return false
65 | }
66 |
67 | func parseFlags() {
68 | flagSet := flag.NewFlagSet("lmstfy", flag.ExitOnError)
69 | flagSet.StringVar(&Flags.ConfFile, "c", "conf/config.toml", "config file path")
70 | flagSet.BoolVar(&Flags.ShowVersion, "v", false, "show current version")
71 | flagSet.StringVar(&Flags.BackTrackLevel, "bt", "warn", "show backtrack in the log >= {level}")
72 | flagSet.BoolVar(&Flags.SkipVerification, "sv", false, "dev mode, used to bypass token verification")
73 | flagSet.StringVar(&Flags.PidFile, "p", "running.pid", "pid file path")
74 | flagSet.Parse(os.Args[1:])
75 | }
76 |
77 | func printVersion() {
78 | fmt.Printf("version: %s\nbuilt at: %s\ncommit: %s\n", version.Version, version.BuildDate, version.BuildCommit)
79 | }
80 |
81 | func apiServer(conf *config.Config, accessLogger, errorLogger *logrus.Logger, devMode bool) *http.Server {
82 | gin.SetMode(gin.ReleaseMode)
83 | engine := gin.New()
84 | engine.Use(
85 | middleware.RequestIDMiddleware,
86 | middleware.AccessLogMiddleware(accessLogger),
87 | handlers.CollectMetrics,
88 | gin.RecoveryWithWriter(errorLogger.Out),
89 | )
90 | handlers.SetupParamDefaults(conf)
91 | err := throttler.Setup(&conf.AdminRedis, errorLogger)
92 | if err != nil {
93 | errorLogger.Errorf("Failed to create throttler, err: %s", err.Error())
94 | }
95 | SetupRoutes(engine, errorLogger, devMode)
96 | addr := fmt.Sprintf("%s:%d", conf.Host, conf.Port)
97 | errorLogger.Infof("Server listening at %s", addr)
98 | srv := http.Server{
99 | Addr: addr,
100 | Handler: engine,
101 | }
102 | go func() {
103 | if err := srv.ListenAndServe(); err != nil {
104 | if err == http.ErrServerClosed {
105 | return
106 | }
107 | panic(fmt.Sprintf("API server failed: %s", err))
108 | }
109 | }()
110 | return &srv
111 | }
112 |
113 | func adminServer(conf *config.Config, accessLogger *logrus.Logger, errorLogger *logrus.Logger) *http.Server {
114 | gin.SetMode(gin.ReleaseMode)
115 | engine := gin.New()
116 | engine.Use(
117 | middleware.RequestIDMiddleware,
118 | middleware.AccessLogMiddleware(accessLogger),
119 | gin.RecoveryWithWriter(errorLogger.Out))
120 | SetupAdminRoutes(engine, conf.Accounts)
121 | errorLogger.Infof("Admin port %d", conf.AdminPort)
122 | srv := http.Server{
123 | Addr: fmt.Sprintf("%s:%d", conf.AdminHost, conf.AdminPort),
124 | Handler: engine,
125 | }
126 | go func() {
127 | if err := srv.ListenAndServe(); err != nil {
128 | if err == http.ErrServerClosed {
129 | return
130 | }
131 | panic(fmt.Sprintf("Admin server failed: %s", err))
132 | }
133 | }()
134 | return &srv
135 | }
136 |
137 | func createPidFile(logger *logrus.Logger) {
138 | f, err := os.OpenFile(Flags.PidFile, os.O_CREATE|os.O_WRONLY, 0644)
139 | if err != nil {
140 | panic("failed to create pid file")
141 | }
142 | io.WriteString(f, fmt.Sprintf("%d", os.Getpid()))
143 | f.Close()
144 | logger.Infof("Server pid: %d", os.Getpid())
145 | }
146 |
147 | func removePidFile() {
148 | os.Remove(Flags.PidFile)
149 | }
150 |
151 | func postValidateConfig(ctx context.Context, conf *config.Config) error {
152 | for name, poolConf := range conf.Pool {
153 | if err := helper.ValidateRedisConfig(ctx, &poolConf); err != nil {
154 | return fmt.Errorf("validate pool[%s] err: %w", name, err)
155 | }
156 | }
157 | if err := helper.ValidateRedisConfig(ctx, &conf.AdminRedis); err != nil {
158 | return fmt.Errorf("validate admin redis err: %w", err)
159 | }
160 | return nil
161 | }
162 |
163 | func setupEngines(conf *config.Config, l *logrus.Logger) error {
164 | redis_engine.SetLogger(l)
165 | if err := redis_engine.Setup(conf); err != nil {
166 | return fmt.Errorf("%w in redis engine", err)
167 | }
168 | migration.SetLogger(l)
169 | if err := migration.Setup(conf); err != nil {
170 | return fmt.Errorf("%w in migration engine", err)
171 | }
172 | if engine.GetEngine(config.DefaultPoolName) == nil {
173 | return errors.New("missing default pool")
174 | }
175 | return nil
176 | }
177 |
178 | func main() {
179 | parseFlags()
180 | if Flags.ShowVersion {
181 | printVersion()
182 | return
183 | }
184 | conf, err := config.MustLoad(Flags.ConfFile)
185 | if err != nil {
186 | panic(fmt.Sprintf("Failed to load config file: %s", err))
187 | }
188 | if err := postValidateConfig(context.Background(), conf); err != nil {
189 | panic(fmt.Sprintf("Failed to post validate the config file: %s", err))
190 | }
191 | shutdown := make(chan struct{})
192 | err = log.Setup(conf.LogFormat, conf.LogDir, conf.LogLevel, Flags.BackTrackLevel)
193 | if err != nil {
194 | panic(fmt.Sprintf("Failed to setup logger: %s", err))
195 | }
196 |
197 | logger := log.Get()
198 | maxprocs.Logger(func(format string, args ...interface{}) {
199 | logger.Infof(format, args...)
200 | })
201 | registerSignal(shutdown, func() {
202 | log.ReopenLogs(conf.LogDir)
203 | })
204 | if err := setupEngines(conf, logger); err != nil {
205 | panic(fmt.Sprintf("Failed to setup engines, err: %s", err.Error()))
206 | }
207 | if err := auth.Setup(conf); err != nil {
208 | panic(fmt.Sprintf("Failed to setup auth module: %s", err))
209 | }
210 | if conf.EnableAccessLog {
211 | middleware.EnableAccessLog()
212 | }
213 | apiSrv := apiServer(conf, log.GetAccessLogger(), logger, Flags.SkipVerification)
214 | adminSrv := adminServer(conf, log.GetAccessLogger(), logger)
215 |
216 | createPidFile(logger)
217 |
218 | <-shutdown
219 | logger.Infof("[%d] Shutting down...", os.Getpid())
220 | removePidFile()
221 | adminSrv.Close() // Admin server does not need to be stopped gracefully
222 | apiSrv.Shutdown(context.Background())
223 |
224 | throttler.GetThrottler().Shutdown()
225 | logger.Infof("[%d] Bye bye", os.Getpid())
226 | }
227 |
--------------------------------------------------------------------------------
/engine/redis/metrics.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 | "sync"
7 | "time"
8 |
9 | "github.com/prometheus/client_golang/prometheus"
10 | )
11 |
12 | type Metrics struct {
13 | // engine related metrics
14 | publishJobs *prometheus.CounterVec
15 | consumeJobs *prometheus.CounterVec
16 | consumeMultiJobs *prometheus.CounterVec
17 | poolAddJobs *prometheus.CounterVec
18 | poolGetJobs *prometheus.CounterVec
19 | poolDeleteJobs *prometheus.CounterVec
20 | timerAddJobs *prometheus.CounterVec
21 | timerDueJobs *prometheus.CounterVec
22 | timerFullBatches *prometheus.CounterVec
23 | queueDirectPushJobs *prometheus.CounterVec
24 | queuePopJobs *prometheus.CounterVec
25 | deadletterRespawnJobs *prometheus.CounterVec
26 | publishQueueJobs *prometheus.CounterVec
27 | consumeQueueJobs *prometheus.CounterVec
28 | jobElapsedMS *prometheus.HistogramVec
29 | jobAckElapsedMS *prometheus.HistogramVec
30 |
31 | timerSizes *prometheus.GaugeVec
32 | queueSizes *prometheus.GaugeVec
33 | deadletterSizes *prometheus.GaugeVec
34 |
35 | // redis instance related metrics
36 | redisMaxMem *prometheus.GaugeVec
37 | redisMemUsed *prometheus.GaugeVec
38 | redisConns *prometheus.GaugeVec
39 | redisBlockings *prometheus.GaugeVec
40 | redisKeys *prometheus.GaugeVec
41 | redisExpires *prometheus.GaugeVec
42 | }
43 |
44 | var (
45 | metrics *Metrics
46 | )
47 |
48 | const (
49 | Namespace = "infra"
50 | Subsystem = "lmstfy_redis"
51 | )
52 |
53 | func setupMetrics() {
54 | cv := newCounterVecHelper
55 | gv := newGaugeVecHelper
56 | hv := newHistogramHelper
57 | metrics = &Metrics{
58 | publishJobs: cv("publish_jobs"),
59 | consumeJobs: cv("consume_jobs"),
60 | consumeMultiJobs: cv("consume_multi_jobs"),
61 | poolAddJobs: cv("pool_add_jobs"),
62 | poolGetJobs: cv("pool_get_jobs"),
63 | poolDeleteJobs: cv("pool_delete_jobs"),
64 | timerAddJobs: cv("timer_add_jobs"),
65 | timerDueJobs: cv("timer_due_jobs"),
66 | timerFullBatches: cv("timer_full_batches"),
67 | queueDirectPushJobs: cv("queue_direct_push_jobs"),
68 | queuePopJobs: cv("queue_pop_jobs"),
69 | deadletterRespawnJobs: cv("deadletter_respawn_jobs"),
70 | publishQueueJobs: cv("publish_queue_jobs", "namespace", "queue"),
71 | consumeQueueJobs: cv("consume_queue_jobs", "namespace", "queue"),
72 | jobElapsedMS: hv("job_elapsed_ms", "namespace", "queue"),
73 | jobAckElapsedMS: hv("job_ack_elapsed_ms", "namespace", "queue"),
74 |
75 | timerSizes: gv("timer_sizes"),
76 | queueSizes: gv("queue_sizes", "namespace", "queue"),
77 | deadletterSizes: gv("deadletter_sizes", "namespace", "queue"),
78 |
79 | redisMaxMem: gv("max_mem_bytes"),
80 | redisMemUsed: gv("used_mem_bytes"),
81 | redisConns: gv("connections"),
82 | redisBlockings: gv("blocking_connections"),
83 | redisKeys: gv("total_keys"),
84 | redisExpires: gv("total_ttl_keys"),
85 | }
86 | }
87 |
88 | func newCounterVecHelper(name string, labels ...string) *prometheus.CounterVec {
89 | labels = append([]string{"pool"}, labels...) // all metrics has this common field `pool`
90 | opts := prometheus.CounterOpts{}
91 | opts.Namespace = Namespace
92 | opts.Subsystem = Subsystem
93 | opts.Name = name
94 | opts.Help = name
95 | counters := prometheus.NewCounterVec(opts, labels)
96 | prometheus.MustRegister(counters)
97 | return counters
98 | }
99 |
100 | func newGaugeVecHelper(name string, labels ...string) *prometheus.GaugeVec {
101 | labels = append([]string{"pool"}, labels...)
102 | opts := prometheus.GaugeOpts{}
103 | opts.Namespace = Namespace
104 | opts.Subsystem = Subsystem
105 | opts.Name = name
106 | opts.Help = name
107 | gauges := prometheus.NewGaugeVec(opts, labels)
108 | prometheus.MustRegister(gauges)
109 | return gauges
110 | }
111 |
112 | func newSummaryHelper(name string, labels ...string) *prometheus.SummaryVec {
113 | labels = append([]string{"pool"}, labels...)
114 | opts := prometheus.SummaryOpts{}
115 | opts.Namespace = Namespace
116 | opts.Subsystem = Subsystem
117 | opts.Name = name
118 | opts.Help = name
119 | opts.Objectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.95: 0.001}
120 | summary := prometheus.NewSummaryVec(opts, labels)
121 | prometheus.MustRegister(summary)
122 | return summary
123 | }
124 |
125 | func newHistogramHelper(name string, labels ...string) *prometheus.HistogramVec {
126 | labels = append([]string{"pool"}, labels...)
127 | opts := prometheus.HistogramOpts{}
128 | opts.Namespace = Namespace
129 | opts.Subsystem = Subsystem
130 | opts.Name = name
131 | opts.Help = name
132 | opts.Buckets = prometheus.ExponentialBuckets(15, 3.5, 7)
133 | histogram := prometheus.NewHistogramVec(opts, labels)
134 | prometheus.MustRegister(histogram)
135 | return histogram
136 | }
137 |
138 | type SizeProvider interface {
139 | Size() (size int64, err error)
140 | }
141 |
142 | type SizeMonitor struct {
143 | redis *RedisInstance
144 | timer *Timer
145 | providers map[string]SizeProvider
146 |
147 | rwmu sync.RWMutex
148 | }
149 |
150 | func NewSizeMonitor(redis *RedisInstance, timer *Timer, preloadData map[string][]string) *SizeMonitor {
151 | m := &SizeMonitor{
152 | redis: redis,
153 | timer: timer,
154 | providers: make(map[string]SizeProvider),
155 | }
156 | for ns, queues := range preloadData {
157 | for _, q := range queues {
158 | m.MonitorIfNotExist(ns, q)
159 | }
160 | }
161 | return m
162 | }
163 |
164 | func (m *SizeMonitor) Loop() {
165 | for {
166 | time.Sleep(5 * time.Second)
167 | m.collect()
168 | }
169 | }
170 |
171 | func (m *SizeMonitor) MonitorIfNotExist(namespace, queue string) {
172 | qname := fmt.Sprintf("q/%s/%s", namespace, queue)
173 | m.rwmu.RLock()
174 | if m.providers[qname] != nil { // queue and deadletter are monitored together, so only test queue
175 | m.rwmu.RUnlock()
176 | return
177 | }
178 | m.rwmu.RUnlock()
179 | dname := fmt.Sprintf("d/%s/%s", namespace, queue)
180 | m.rwmu.Lock()
181 | m.providers[qname] = NewQueue(namespace, queue, m.redis, nil)
182 | m.providers[dname], _ = NewDeadLetter(namespace, queue, m.redis)
183 | m.rwmu.Unlock()
184 | }
185 |
186 | func (m *SizeMonitor) Remove(namespace, queue string) {
187 | qname := fmt.Sprintf("q/%s/%s", namespace, queue)
188 | dname := fmt.Sprintf("d/%s/%s", namespace, queue)
189 | m.rwmu.Lock()
190 | delete(m.providers, qname)
191 | delete(m.providers, dname)
192 | metrics.queueSizes.DeleteLabelValues(m.redis.Name, namespace, queue)
193 | metrics.deadletterSizes.DeleteLabelValues(m.redis.Name, namespace, queue)
194 | m.rwmu.Unlock()
195 | }
196 |
197 | func (m *SizeMonitor) collect() {
198 | s, err := m.timer.Size()
199 | if err == nil {
200 | metrics.timerSizes.WithLabelValues(m.redis.Name).Set(float64(s))
201 | }
202 | m.rwmu.RLock()
203 | for k, p := range m.providers {
204 | s, err := p.Size()
205 | if err != nil {
206 | continue
207 | }
208 | splits := strings.SplitN(k, "/", 3)
209 | switch splits[0] {
210 | case "q":
211 | metrics.queueSizes.WithLabelValues(m.redis.Name, splits[1], splits[2]).Set(float64(s))
212 | case "d":
213 | metrics.deadletterSizes.WithLabelValues(m.redis.Name, splits[1], splits[2]).Set(float64(s))
214 | }
215 | }
216 | m.rwmu.RUnlock()
217 | }
218 |
219 | func init() {
220 | setupMetrics()
221 | }
222 |
--------------------------------------------------------------------------------
/auth/token_test.go:
--------------------------------------------------------------------------------
1 | package auth
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "testing"
7 |
8 | "github.com/bitleak/lmstfy/config"
9 | "github.com/bitleak/lmstfy/engine"
10 | "github.com/bitleak/lmstfy/engine/redis"
11 | "github.com/bitleak/lmstfy/helper"
12 | go_redis "github.com/go-redis/redis/v8"
13 | )
14 |
15 | var (
16 | adminRedis *go_redis.Client
17 | )
18 |
19 | func setup(conf *config.Config) {
20 |
21 | if err := Setup(conf); err != nil {
22 | panic(fmt.Sprintf("Failed to setup auth testcase: %s", err))
23 | }
24 |
25 | adminRedis = helper.NewRedisClient(&conf.AdminRedis, nil)
26 | err := adminRedis.Ping(dummyCtx).Err()
27 | if err != nil {
28 | panic(fmt.Sprintf("Failed to ping: %s", err))
29 | }
30 | err = adminRedis.FlushDB(dummyCtx).Err()
31 | if err != nil {
32 | panic(fmt.Sprintf("Failed to flush db: %s", err))
33 | }
34 | }
35 |
36 | func TestMain(m *testing.M) {
37 | presetConfig, err := config.CreatePresetForTest("")
38 | if err != nil {
39 | panic(fmt.Sprintf("CreatePresetForTest failed with error: %s", err))
40 | }
41 | defer presetConfig.Destroy()
42 | setup(presetConfig.Config)
43 | ret := m.Run()
44 | os.Exit(ret)
45 | }
46 |
47 | func TestTokenKey(t *testing.T) {
48 | tk1 := tokenKey("", "test-ns")
49 | if tk1 != "tk/"+config.DefaultPoolName+"/test-ns" {
50 | t.Fatalf("Mismatch tokenKey")
51 | }
52 |
53 | tk2 := tokenKey("test-pool", "test-ns")
54 | if tk2 != "tk/test-pool/test-ns" {
55 | t.Fatalf("Mismatch tokenKey")
56 | }
57 | }
58 |
59 | func TestCacheKey(t *testing.T) {
60 | tk1 := cacheKey("", "test-ns", "test-new-token")
61 | if tk1 != config.DefaultPoolName+"test-nstest-new-token" {
62 | t.Fatalf("Mismatch cacheKey")
63 | }
64 |
65 | tk2 := cacheKey("test-pool", "test-ns", "test-new-token")
66 | if tk2 != "test-pooltest-nstest-new-token" {
67 | t.Fatalf("Mismatch cacheKey")
68 | }
69 | }
70 |
71 | func TestTokenManager_New(t *testing.T) {
72 | _, err := GetTokenManager().New("", "test-ns", "test-new-token", "")
73 | if err != ErrPoolNotExist {
74 | t.Fatalf("Expected new token return pool not exist error, but got %v", err)
75 | }
76 |
77 | // New token in default pool
78 | engine.Register(engine.KindRedis, config.DefaultPoolName, &redis.Engine{})
79 | tk, err := GetTokenManager().New("", "test-ns", "test-new-token", "")
80 | if err != nil {
81 | t.Fatalf("Expected new token return nil, but got %v", err)
82 | }
83 | defer GetTokenManager().Delete("", "test-ns", "test-new-token")
84 | if tk != "test-new-token" {
85 | t.Fatalf("Expected new token return test-new-token, but got %v", tk)
86 | }
87 |
88 | // Check token in redis
89 | ok, err := adminRedis.HExists(dummyCtx, tokenKey("", "test-ns"), "test-new-token").Result()
90 | if err != nil {
91 | t.Fatalf("Expected check token exist return nil, but got %v", err)
92 | }
93 | if !ok {
94 | t.Fatalf("Expected check token exist, but not exist")
95 | }
96 | // Check token in cache
97 | if !GetTokenManager().cache[cacheKey("", "test-ns", "test-new-token")] {
98 | t.Fatalf("Expected check token cache exist, but not exist")
99 | }
100 |
101 | engine.Register(engine.KindRedis, config.DefaultPoolName, &redis.Engine{})
102 | _, err = GetTokenManager().New("", "test-ns", "test-new-token", "")
103 | if err != ErrTokenExist {
104 | t.Fatalf("Expected new token return token exsit error, but got %v", err)
105 | }
106 |
107 | // New token in custom pool
108 | engine.Register(engine.KindRedis, "test-pool", &redis.Engine{})
109 | tk, err = GetTokenManager().New("test-pool", "test-ns", "test-new-token", "")
110 | if err != nil {
111 | t.Fatalf("Expected new token return nil, but got %v", err)
112 | }
113 | defer GetTokenManager().Delete("test-pool", "test-ns", "test-new-token")
114 | if tk != "test-pool:test-new-token" {
115 | t.Fatalf("Expected new token return test-pool:test-new-token, but got %v", tk)
116 | }
117 | }
118 |
119 | func TestTokenManager_Exist(t *testing.T) {
120 | engine.Register(engine.KindRedis, config.DefaultPoolName, &redis.Engine{})
121 | tk, err := GetTokenManager().New("", "test-ns", "test-exist-token", "")
122 | if err != nil {
123 | t.Fatalf("Expected new token return nil, but got %v", err)
124 | }
125 | defer GetTokenManager().Delete("", "test-ns", "test-new-token")
126 |
127 | // Check token exist in memory cache
128 | ok, err := GetTokenManager().Exist("", "test-ns", tk)
129 | if err != nil {
130 | t.Fatalf("Expected token exist return nil, but got %v", err)
131 | }
132 | if !ok {
133 | t.Fatalf("Expected token exist")
134 | }
135 |
136 | delete(GetTokenManager().cache, cacheKey("", "test-ns", tk))
137 |
138 | // Check token exist in redis and write back to cache
139 | ok, err = GetTokenManager().Exist("", "test-ns", tk)
140 | if err != nil {
141 | t.Fatalf("Expected token exist return nil, but got %v", err)
142 | }
143 | if !ok {
144 | t.Fatalf("Expected token exist")
145 | }
146 | if !GetTokenManager().cache[cacheKey("", "test-ns", tk)] {
147 | t.Fatalf("Expected check token cache exist, but not exist")
148 | }
149 |
150 | // Check pool not exist
151 | _, err = GetTokenManager().Exist("not-exist", "test-ns", tk)
152 | if err != ErrPoolNotExist {
153 | t.Fatalf("Expected exist token return pool not exist error, but got %v", err)
154 | }
155 | }
156 |
157 | func TestTokenManager_Delete(t *testing.T) {
158 | engine.Register(engine.KindRedis, config.DefaultPoolName, &redis.Engine{})
159 | tk, err := GetTokenManager().New("", "test-ns", "test-delete-token", "")
160 | if err != nil {
161 | t.Fatalf("Expected new token return nil, but got %v", err)
162 | }
163 | err = GetTokenManager().Delete("", "test-ns", tk)
164 | if err != nil {
165 | t.Fatalf("Expected delete token return nil, but got %v", err)
166 | }
167 |
168 | // Check token deleted in cache and redis
169 | if GetTokenManager().cache[cacheKey("", "test-ns", tk)] {
170 | t.Fatalf("Expected check token cache not exist, but exist")
171 | }
172 | ok, err := adminRedis.HExists(dummyCtx, tokenKey("", "test-ns"), tk).Result()
173 | if err != nil {
174 | t.Fatalf("Expected check token in redis exist return nil, but got %v", err)
175 | }
176 | if ok {
177 | t.Fatalf("Expected check token in redis not exist, but exist")
178 | }
179 |
180 | err = GetTokenManager().Delete("not-exist", "test-ns", tk)
181 | if err != ErrPoolNotExist {
182 | t.Fatalf("Expected delete token return pool not exist error, but got %v", err)
183 | }
184 | }
185 |
186 | func TestTokenManager_List(t *testing.T) {
187 | engine.Register(engine.KindRedis, config.DefaultPoolName, &redis.Engine{})
188 | tk, err := GetTokenManager().New("", "test-ns", "test-list-token", "")
189 | if err != nil {
190 | t.Fatalf("Expected new token return nil, but got %v", err)
191 | }
192 | defer GetTokenManager().Delete("", "test-ns", "test-list-token")
193 |
194 | // List tokens in default pool
195 | tokens, err := GetTokenManager().List("", "test-ns")
196 | if err != nil {
197 | t.Fatalf("Expected list token return nil, but got %v", err)
198 | }
199 | if _, ok := tokens[tk]; !ok {
200 | t.Fatalf("Expected list token contains test-list-token")
201 | }
202 |
203 | engine.Register(engine.KindRedis, "test-pool", &redis.Engine{})
204 | tk, err = GetTokenManager().New("test-pool", "test-ns", "test-list-token", "")
205 | if err != nil {
206 | t.Fatalf("Expected new token return nil, but got %v", err)
207 | }
208 | defer GetTokenManager().Delete("test-pool", "test-ns", "test-list-token")
209 |
210 | // List tokens in custom pool
211 | tokens, err = GetTokenManager().List("test-pool", "test-ns")
212 | if err != nil {
213 | t.Fatalf("Expected list token return nil, but got %v", err)
214 | }
215 | if _, ok := tokens[tk]; !ok {
216 | t.Fatalf("Expected list token contains test-pool:test-list-token")
217 | }
218 |
219 | // List tokens in not exist pool
220 | _, err = GetTokenManager().List("not-exist", "test-ns")
221 | if err != ErrPoolNotExist {
222 | t.Fatalf("Expected list token return pool not exist error, but got %v", err)
223 | }
224 | }
225 |
--------------------------------------------------------------------------------
/engine/redis/deadletter.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "time"
7 |
8 | "github.com/bitleak/lmstfy/engine"
9 |
10 | go_redis "github.com/go-redis/redis/v8"
11 | )
12 |
13 | const (
14 | luaRespawnDeadletterScript = `
15 | local deadletter = KEYS[1]
16 | local queue = KEYS[2]
17 | local poolPrefix = KEYS[3]
18 | local limit = tonumber(ARGV[1])
19 | local respawnTTL = tonumber(ARGV[2])
20 |
21 | for i = 1, limit do
22 | local data = redis.call("RPOPLPUSH", deadletter, queue)
23 | if data == false then
24 | return i - 1 -- deadletter is empty
25 | end
26 | -- unpack the jobID, and set the TTL
27 | local _, jobID = struct.unpack("HHc0", data)
28 | if respawnTTL > 0 then
29 | redis.call("EXPIRE", poolPrefix .. "/" .. jobID, respawnTTL)
30 | end
31 | end
32 | return limit -- deadletter has more data when return value is >= limit
33 | `
34 | luaDeleteDeadletterScript = `
35 | local deadletter = KEYS[1]
36 | local poolPrefix = KEYS[2]
37 | local limit = tonumber(ARGV[1])
38 |
39 | for i = 1, limit do
40 | local data = redis.call("RPOP", deadletter)
41 | if data == false then
42 | return i - 1
43 | end
44 | -- unpack the jobID, and delete the job from the job pool
45 | local _, jobID = struct.unpack("HHc0", data)
46 | redis.call("DEL", poolPrefix .. "/" .. jobID)
47 | end
48 | return limit
49 | `
50 | )
51 |
52 | var (
53 | respawnDeadletterSHA string
54 | deleteDeadletterSHA string
55 | )
56 |
57 | // Because the DeadLetter is not like Timer which is a singleton,
58 | // DeadLetters are transient objects like Queue. So we have to preload
59 | // the lua scripts separately.
60 | func PreloadDeadLetterLuaScript(redis *RedisInstance) error {
61 | sha, err := redis.Conn.ScriptLoad(dummyCtx, luaRespawnDeadletterScript).Result()
62 | if err != nil {
63 | return fmt.Errorf("failed to preload lua script: %s", err)
64 | }
65 | respawnDeadletterSHA = sha
66 |
67 | sha, err = redis.Conn.ScriptLoad(dummyCtx, luaDeleteDeadletterScript).Result()
68 | if err != nil {
69 | return fmt.Errorf("failed to preload luascript: %s", err)
70 | }
71 | deleteDeadletterSHA = sha
72 | return nil
73 | }
74 |
75 | // DeadLetter is where dead job will be buried, the job can be respawned into ready queue
76 | type DeadLetter struct {
77 | redis *RedisInstance
78 | namespace string
79 | queue string
80 | }
81 |
82 | // NewDeadLetter return an instance of DeadLetter storage
83 | func NewDeadLetter(namespace, queue string, redis *RedisInstance) (*DeadLetter, error) {
84 | dl := &DeadLetter{
85 | redis: redis,
86 | namespace: namespace,
87 | queue: queue,
88 | }
89 | if respawnDeadletterSHA == "" || deleteDeadletterSHA == "" {
90 | return nil, errors.New("dead letter's lua script is not preloaded")
91 | }
92 | return dl, nil
93 | }
94 |
95 | func (dl *DeadLetter) Name() string {
96 | return join(DeadLetterPrefix, dl.namespace, dl.queue)
97 | }
98 |
99 | // Add a job to dead letter. NOTE the data format is the same
100 | // as the ready queue (lua struct `HHc0`), by doing this we could directly pop
101 | // the dead job back to the ready queue.
102 | //
103 | // NOTE: this method is not called any where except in tests, but this logic is
104 | // implement in the timer's pump script. please refer to that.
105 | func (dl *DeadLetter) Add(jobID string) error {
106 | val := structPack(1, jobID)
107 | if err := dl.redis.Conn.Persist(dummyCtx, PoolJobKey2(dl.namespace, dl.queue, jobID)).Err(); err != nil {
108 | return err
109 | }
110 | return dl.redis.Conn.LPush(dummyCtx, dl.Name(), val).Err()
111 | }
112 |
113 | func (dl *DeadLetter) Peek() (size int64, jobID string, err error) {
114 | val, err := dl.redis.Conn.LIndex(dummyCtx, dl.Name(), -1).Result()
115 | switch err {
116 | case nil:
117 | // continue
118 | case go_redis.Nil:
119 | return 0, "", engine.ErrNotFound
120 | default:
121 | return 0, "", err
122 | }
123 | tries, jobID, err := structUnpack(val)
124 | if err != nil || tries != 1 {
125 | return 0, "", fmt.Errorf("failed to unpack data: %s", err)
126 | }
127 | size, err = dl.redis.Conn.LLen(dummyCtx, dl.Name()).Result()
128 | if err != nil {
129 | return 0, "", err
130 | }
131 | return size, jobID, nil
132 | }
133 |
134 | func (dl *DeadLetter) Delete(limit int64) (count int64, err error) {
135 | if limit > 1 {
136 | poolPrefix := PoolJobKeyPrefix(dl.namespace, dl.queue)
137 | var batchSize int64 = 100
138 | if batchSize > limit {
139 | batchSize = limit
140 | }
141 | for {
142 | val, err := dl.redis.Conn.EvalSha(dummyCtx, deleteDeadletterSHA, []string{dl.Name(), poolPrefix}, batchSize).Result()
143 | if err != nil {
144 | if isLuaScriptGone(err) {
145 | if err := PreloadDeadLetterLuaScript(dl.redis); err != nil {
146 | logger.WithField("err", err).Error("Failed to load deadletter lua script")
147 | }
148 | continue
149 | }
150 | return count, err
151 | }
152 | n, _ := val.(int64)
153 | count += n
154 | if n < batchSize { // Dead letter is empty
155 | break
156 | }
157 | if count >= limit {
158 | break
159 | }
160 | if limit-count < batchSize {
161 | batchSize = limit - count // This is the last batch, we should't respawn jobs exceeding the limit.
162 | }
163 | }
164 | return count, nil
165 | } else if limit == 1 {
166 | data, err := dl.redis.Conn.RPop(dummyCtx, dl.Name()).Result()
167 | if err != nil {
168 | if err == go_redis.Nil {
169 | return 0, nil
170 | }
171 | return 0, err
172 | }
173 | _, jobID, err := structUnpack(data)
174 | if err != nil {
175 | return 1, err
176 | }
177 | err = dl.redis.Conn.Del(dummyCtx, PoolJobKey2(dl.namespace, dl.queue, jobID)).Err()
178 | if err != nil {
179 | return 1, fmt.Errorf("failed to delete job data: %s", err)
180 | }
181 | return 1, nil
182 | } else {
183 | return 0, nil
184 | }
185 | }
186 |
187 | func (dl *DeadLetter) Respawn(limit, ttlSecond int64) (count int64, err error) {
188 | defer func() {
189 | if err != nil && count != 0 {
190 | metrics.deadletterRespawnJobs.WithLabelValues(dl.redis.Name).Add(float64(count))
191 | }
192 | }()
193 | queueName := (&QueueName{
194 | Namespace: dl.namespace,
195 | Queue: dl.queue,
196 | }).String()
197 | poolPrefix := PoolJobKeyPrefix(dl.namespace, dl.queue)
198 | if limit > 1 {
199 | var batchSize = BatchSize
200 | if batchSize > limit {
201 | batchSize = limit
202 | }
203 | for {
204 | val, err := dl.redis.Conn.EvalSha(dummyCtx, respawnDeadletterSHA, []string{dl.Name(), queueName, poolPrefix}, batchSize, ttlSecond).Result() // Respawn `batchSize` jobs at a time
205 | if err != nil {
206 | if isLuaScriptGone(err) {
207 | if err := PreloadDeadLetterLuaScript(dl.redis); err != nil {
208 | logger.WithField("err", err).Error("Failed to load deadletter lua script")
209 | }
210 | continue
211 | }
212 | return 0, err
213 | }
214 | n, _ := val.(int64)
215 | count += n
216 | if n < batchSize { // Dead letter is empty
217 | break
218 | }
219 | if count >= limit {
220 | break
221 | }
222 | if limit-count < batchSize {
223 | batchSize = limit - count // This is the last batch, we should't respawn jobs exceeding the limit.
224 | }
225 | }
226 | return count, nil
227 | } else if limit == 1 {
228 | data, err := dl.redis.Conn.RPopLPush(dummyCtx, dl.Name(), queueName).Result()
229 | if err != nil {
230 | if err == go_redis.Nil {
231 | return 0, nil
232 | }
233 | return 0, err
234 | }
235 | _, jobID, err := structUnpack(data)
236 | if err != nil {
237 | return 1, err
238 | }
239 | if ttlSecond > 0 {
240 | err = dl.redis.Conn.Expire(dummyCtx, PoolJobKey2(dl.namespace, dl.queue, jobID), time.Duration(ttlSecond)*time.Second).Err()
241 | }
242 | if err != nil {
243 | return 1, fmt.Errorf("failed to set TTL on respawned job[%s]: %s", jobID, err)
244 | }
245 | return 1, nil
246 | } else {
247 | return 0, nil
248 | }
249 | }
250 |
251 | func (dl *DeadLetter) Size() (size int64, err error) {
252 | return dl.redis.Conn.LLen(dummyCtx, dl.Name()).Result()
253 | }
254 |
--------------------------------------------------------------------------------
/engine/redis/engine_test.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "testing"
7 |
8 | "github.com/stretchr/testify/assert"
9 |
10 | "github.com/bitleak/lmstfy/engine"
11 | )
12 |
13 | func TestEngine_Publish(t *testing.T) {
14 | e, err := NewEngine(R.Name, R.Conn)
15 | if err != nil {
16 | panic(fmt.Sprintf("Setup engine error: %s", err))
17 | }
18 | defer e.Shutdown()
19 | body := []byte("hello msg 1")
20 | j := engine.NewJob("ns-engine", "q0", body, nil, 10, 2, 1, "")
21 | jobID, err := e.Publish(j)
22 | t.Log(jobID)
23 | if err != nil {
24 | t.Fatalf("Failed to publish: %s", err)
25 | }
26 |
27 | // Publish no-delay job
28 | j = engine.NewJob("ns-engine", "q0", body, nil, 10, 0, 1, "")
29 | jobID, err = e.Publish(j)
30 | t.Log(jobID)
31 | if err != nil {
32 | t.Fatalf("Failed to publish: %s", err)
33 | }
34 | }
35 |
36 | func TestEngine_Consume(t *testing.T) {
37 | e, err := NewEngine(R.Name, R.Conn)
38 | if err != nil {
39 | panic(fmt.Sprintf("Setup engine error: %s", err))
40 | }
41 | defer e.Shutdown()
42 | body := []byte("hello msg 2")
43 | j := engine.NewJob("ns-engine", "q2", body, nil, 10, 2, 1, "")
44 | jobID, err := e.Publish(j)
45 | t.Log(jobID)
46 | if err != nil {
47 | t.Fatalf("Failed to publish: %s", err)
48 | }
49 | job, err := e.Consume("ns-engine", []string{"q2"}, 3, 3)
50 | if err != nil {
51 | t.Fatalf("Failed to consume: %s", err)
52 | }
53 | if job.Tries() != 0 {
54 | t.Fatalf("job tries = 0 was expected, but got %d", job.Tries())
55 | }
56 | if !bytes.Equal(body, job.Body()) || jobID != job.ID() {
57 | t.Fatalf("Mistmatched job data")
58 | }
59 |
60 | // Consume job that's published in no-delay way
61 | j = engine.NewJob("ns-engine", "q2", body, nil, 10, 0, 1, "")
62 | jobID, err = e.Publish(j)
63 | t.Log(jobID)
64 | if err != nil {
65 | t.Fatalf("Failed to publish: %s", err)
66 | }
67 | job, err = e.Consume("ns-engine", []string{"q2"}, 3, 0)
68 | if err != nil {
69 | t.Fatalf("Failed to consume: %s", err)
70 | }
71 | if !bytes.Equal(body, job.Body()) || jobID != job.ID() {
72 | t.Fatal("Mistmatched job data")
73 | }
74 | }
75 |
76 | // Consume the first one from multi publish
77 | func TestEngine_Consume2(t *testing.T) {
78 | e, err := NewEngine(R.Name, R.Conn)
79 | if err != nil {
80 | panic(fmt.Sprintf("Setup engine error: %s", err))
81 | }
82 | defer e.Shutdown()
83 | body := []byte("hello msg 3")
84 | j := engine.NewJob("ns-engine", "q3", []byte("delay msg"), nil, 10, 5, 1, "")
85 | _, err = e.Publish(j)
86 | j = engine.NewJob("ns-engine", "q3", body, nil, 10, 2, 1, "")
87 | jobID, err := e.Publish(j)
88 | if err != nil {
89 | t.Fatalf("Failed to publish: %s", err)
90 | }
91 | job, err := e.Consume("ns-engine", []string{"q3"}, 3, 3)
92 | if err != nil {
93 | t.Fatalf("Failed to consume: %s", err)
94 | }
95 | if job.Tries() != 0 {
96 | t.Fatalf("job tries = 0 was expected, but got %d", job.Tries())
97 | }
98 | if !bytes.Equal(body, job.Body()) || jobID != job.ID() {
99 | t.Fatal("Mistmatched job data")
100 | }
101 | }
102 |
103 | func TestEngine_ConsumeMulti(t *testing.T) {
104 | e, err := NewEngine(R.Name, R.Conn)
105 | if err != nil {
106 | panic(fmt.Sprintf("Setup engine error: %s", err))
107 | }
108 | defer e.Shutdown()
109 | body := []byte("hello msg 4")
110 | j := engine.NewJob("ns-engine", "q4", body, nil, 10, 3, 1, "")
111 | jobID, err := e.Publish(j)
112 | if err != nil {
113 | t.Fatalf("Failed to publish: %s", err)
114 | }
115 | j2 := engine.NewJob("ns-engine", "q5", body, nil, 10, 1, 1, "")
116 | jobID2, err := e.Publish(j2)
117 | if err != nil {
118 | t.Fatalf("Failed to publish: %s", err)
119 | }
120 |
121 | job2, err := e.Consume("ns-engine", []string{"q4", "q5"}, 5, 5)
122 | if err != nil {
123 | t.Fatalf("Failed to consume from multiple queues: %s", err)
124 | }
125 | if job2.Tries() != 0 {
126 | t.Fatalf("job tries = 0 was expected, but got %d", job2.Tries())
127 | }
128 | if job2.Queue() != "q5" || job2.ID() != jobID2 { // q5's job should be fired first
129 | t.Error("Mismatched job data")
130 | }
131 |
132 | job1, err := e.Consume("ns-engine", []string{"q4", "q5"}, 5, 5)
133 | if err != nil {
134 | t.Fatalf("Failed to consume from multiple queues: %s", err)
135 | }
136 | if job1.Tries() != 0 {
137 | t.Fatalf("job tries = 0 was expected, but got %d", job1.Tries())
138 | }
139 | if job1.Queue() != "q4" || job1.ID() != jobID { // q4's job should be fired next
140 | t.Fatalf("Failed to consume from multiple queues: %s", err)
141 | }
142 | }
143 |
144 | func TestEngine_Peek(t *testing.T) {
145 | e, err := NewEngine(R.Name, R.Conn)
146 | if err != nil {
147 | panic(fmt.Sprintf("Setup engine error: %s", err))
148 | }
149 | defer e.Shutdown()
150 | body := []byte("hello msg 6")
151 | j := engine.NewJob("ns-engine", "q6", body, nil, 10, 0, 1, "")
152 | jobID, err := e.Publish(j)
153 | if err != nil {
154 | t.Fatalf("Failed to publish: %s", err)
155 | }
156 | job, err := e.Peek("ns-engine", "q6", "")
157 | if err != nil {
158 | t.Fatalf("Failed to peek: %s", err)
159 | }
160 | if job.ID() != jobID || !bytes.Equal(job.Body(), body) {
161 | t.Fatal("Mismatched job")
162 | }
163 | _, err = e.Consume("ns-engine", []string{"q6"}, 5, 0)
164 | if err != nil {
165 | t.Fatalf("Failed to consume previous queue job: %s", err)
166 | }
167 | }
168 |
169 | func TestEngine_BatchConsume(t *testing.T) {
170 | e, err := NewEngine(R.Name, R.Conn)
171 | if err != nil {
172 | panic(fmt.Sprintf("Setup engine error: %s", err))
173 | }
174 | defer e.Shutdown()
175 | body := []byte("hello msg 7")
176 | j := engine.NewJob("ns-engine", "q7", body, nil, 10, 3, 1, "")
177 | jobID, err := e.Publish(j)
178 | t.Log(jobID)
179 | if err != nil {
180 | t.Fatalf("Failed to publish: %s", err)
181 | }
182 | queues := []string{"q7"}
183 | jobs, err := e.BatchConsume("ns-engine", queues, 2, 5, 2)
184 | if err != nil {
185 | t.Fatalf("Failed to Batch consume: %s", err)
186 | }
187 | if len(jobs) != 0 {
188 | t.Fatalf("Wrong job consumed")
189 | }
190 |
191 | jobs, err = e.BatchConsume("ns-engine", queues, 2, 3, 2)
192 | if err != nil {
193 | t.Fatalf("Failed to Batch consume: %s", err)
194 | }
195 | if len(jobs) != 1 || !bytes.Equal(body, jobs[0].Body()) || jobID != jobs[0].ID() {
196 | t.Fatalf("Mistmatched job data")
197 | }
198 |
199 | // Consume some jobs
200 | jobIDMap := map[string]bool{}
201 | for i := 0; i < 4; i++ {
202 | j := engine.NewJob("ns-engine", "q7", body, nil, 10, 0, 1, "")
203 | jobID, err := e.Publish(j)
204 | t.Log(jobID)
205 | if err != nil {
206 | t.Fatalf("Failed to publish: %s", err)
207 | }
208 | jobIDMap[jobID] = true
209 | }
210 |
211 | // First time batch consume three jobs
212 | jobs, err = e.BatchConsume("ns-engine", queues, 3, 3, 3)
213 | if err != nil {
214 | t.Fatalf("Failed to consume: %s", err)
215 | }
216 | if len(jobs) != 3 {
217 | t.Fatalf("Mistmatched jobs count")
218 | }
219 | for _, job := range jobs {
220 | if !bytes.Equal(body, job.Body()) || !jobIDMap[job.ID()] {
221 | t.Fatalf("Mistmatched job data")
222 | }
223 | }
224 |
225 | // Second time batch consume can only get a single job
226 | jobs, err = e.BatchConsume("ns-engine", queues, 3, 3, 3)
227 | if err != nil {
228 | t.Fatalf("Failed to consume: %s", err)
229 | }
230 | if len(jobs) != 1 {
231 | t.Fatalf("Mistmatched jobs count")
232 | }
233 | if !bytes.Equal(body, jobs[0].Body()) || !jobIDMap[jobs[0].ID()] {
234 | t.Fatalf("Mistmatched job data")
235 | }
236 |
237 | // Third time batch consume will be blocked by 3s
238 | jobs, err = e.BatchConsume("ns-engine", queues, 3, 3, 3)
239 | if err != nil {
240 | t.Fatalf("Failed to consume: %s", err)
241 | }
242 | if len(jobs) != 0 {
243 | t.Fatalf("Mistmatched jobs count")
244 | }
245 | }
246 |
247 | func TestEngine_PublishWithJobID(t *testing.T) {
248 | e, err := NewEngine(R.Name, R.Conn)
249 | if err != nil {
250 | panic(fmt.Sprintf("Setup engine error: %s", err))
251 | }
252 | defer e.Shutdown()
253 | body := []byte("hello msg 1")
254 | j := engine.NewJob("ns-engine", "q8", body, nil, 10, 0, 1, "jobID1")
255 | jobID, err := e.Publish(j)
256 | t.Log(jobID)
257 | assert.Nil(t, err)
258 |
259 | // Make sure the engine received the job
260 | job, err := e.Consume("ns-engine", []string{"q8"}, 3, 0)
261 | assert.EqualValues(t, jobID, job.ID())
262 | }
263 |
--------------------------------------------------------------------------------
/engine/migration/engine_test.go:
--------------------------------------------------------------------------------
1 | package migration
2 |
3 | import (
4 | "bytes"
5 | "testing"
6 | "time"
7 |
8 | "github.com/stretchr/testify/assert"
9 |
10 | "github.com/bitleak/lmstfy/engine"
11 | )
12 |
13 | func TestEngine_Publish(t *testing.T) {
14 | e := NewEngine(OldRedisEngine, NewRedisEngine)
15 | body := []byte("hello msg 1")
16 | j := engine.NewJob("ns-engine", "q1", body, nil, 10, 2, 1, "")
17 | jobID, err := e.Publish(j)
18 | t.Log(jobID)
19 | if err != nil {
20 | t.Fatalf("Failed to publish: %s", err)
21 | }
22 |
23 | // Publish no-delay job
24 | j = engine.NewJob("ns-engine", "q1", body, nil, 10, 0, 1, "")
25 | jobID, err = e.Publish(j)
26 | t.Log(jobID)
27 | if err != nil {
28 | t.Fatalf("Failed to publish: %s", err)
29 | }
30 | // Make sure the new engine received the job
31 | job, err := NewRedisEngine.Consume("ns-engine", []string{"q1"}, 3, 0)
32 | if job.ID() != jobID {
33 | t.Fatal("NewRedisEngine should received the job")
34 | }
35 | }
36 |
37 | func TestEngine_Consume(t *testing.T) {
38 | e := NewEngine(OldRedisEngine, NewRedisEngine)
39 | body := []byte("hello msg 2")
40 | j := engine.NewJob("ns-engine", "q2", body, nil, 10, 2, 1, "")
41 | jobID, err := e.Publish(j)
42 | t.Log(jobID)
43 | if err != nil {
44 | t.Fatalf("Failed to publish: %s", err)
45 | }
46 | queues := []string{"q2"}
47 | job, err := e.Consume("ns-engine", queues, 3, 3)
48 | if err != nil {
49 | t.Fatalf("Failed to consume: %s", err)
50 | }
51 | if !bytes.Equal(body, job.Body()) || jobID != job.ID() {
52 | t.Fatalf("Mistmatched job data")
53 | }
54 |
55 | // Consume job that's published in no-delay way
56 | j = engine.NewJob("ns-engine", "q2", body, nil, 10, 0, 1, "")
57 | jobID, err = e.Publish(j)
58 | t.Log(jobID)
59 | if err != nil {
60 | t.Fatalf("Failed to publish: %s", err)
61 | }
62 | job, err = e.Consume("ns-engine", queues, 3, 0)
63 | if err != nil {
64 | t.Fatalf("Failed to consume: %s", err)
65 | }
66 | if !bytes.Equal(body, job.Body()) || jobID != job.ID() {
67 | t.Fatalf("Mistmatched job data")
68 | }
69 | }
70 |
71 | // Consume the first one from multi publish
72 | func TestEngine_Consume2(t *testing.T) {
73 | e := NewEngine(OldRedisEngine, NewRedisEngine)
74 | body := []byte("hello msg 3")
75 | j1 := engine.NewJob("ns-engine", "q3", []byte("delay msg"), nil, 10, 5, 1, "")
76 | _, err := e.Publish(j1)
77 | j2 := engine.NewJob("ns-engine", "q3", body, nil, 10, 2, 1, "")
78 | jobID, err := e.Publish(j2)
79 | if err != nil {
80 | t.Fatalf("Failed to publish: %s", err)
81 | }
82 | job, err := e.Consume("ns-engine", []string{"q3"}, 3, 3)
83 | if err != nil {
84 | t.Fatalf("Failed to consume: %s", err)
85 | }
86 | if !bytes.Equal(body, job.Body()) || jobID != job.ID() {
87 | t.Fatalf("Mistmatched job data")
88 | }
89 | }
90 |
91 | func TestEngine_ConsumeMulti(t *testing.T) {
92 | e := NewEngine(OldRedisEngine, NewRedisEngine)
93 | body := []byte("hello msg 4")
94 | j1 := engine.NewJob("ns-engine", "q4", body, nil, 10, 3, 1, "")
95 | jobID, err := e.Publish(j1)
96 | if err != nil {
97 | t.Fatalf("Failed to publish: %s", err)
98 | }
99 | j2 := engine.NewJob("ns-engine", "q5", body, nil, 10, 1, 1, "")
100 | jobID2, err := e.Publish(j2)
101 | if err != nil {
102 | t.Fatalf("Failed to publish: %s", err)
103 | }
104 |
105 | job2, err := e.Consume("ns-engine", []string{"q4", "q5"}, 5, 5)
106 | if err != nil {
107 | t.Fatalf("Failed to consume from multiple queues: %s", err)
108 | }
109 | if job2.Queue() != "q5" || job2.ID() != jobID2 { // q5's job should be fired first
110 | t.Error("Mismatched job data")
111 | }
112 |
113 | job1, err := e.Consume("ns-engine", []string{"q4", "q5"}, 5, 5)
114 | if err != nil {
115 | t.Fatalf("Failed to consume from multiple queues: %s", err)
116 | }
117 | if job1.Queue() != "q4" || job1.ID() != jobID { // q4's job should be fired next
118 | t.Fatalf("Failed to consume from multiple queues: %s", err)
119 | }
120 | }
121 |
122 | func TestEngine_Peek(t *testing.T) {
123 | e := NewEngine(OldRedisEngine, NewRedisEngine)
124 | body := []byte("hello msg 6")
125 | j := engine.NewJob("ns-engine", "q6", body, nil, 10, 0, 1, "")
126 | jobID, err := e.Publish(j)
127 | if err != nil {
128 | t.Fatalf("Failed to publish: %s", err)
129 | }
130 | job, err := e.Peek("ns-engine", "q6", "")
131 | if job.ID() != jobID || !bytes.Equal(job.Body(), body) {
132 | t.Fatal("Mismatched job")
133 | }
134 | }
135 |
136 | func TestEngine_DrainOld(t *testing.T) {
137 | e := NewEngine(OldRedisEngine, NewRedisEngine)
138 | body := []byte("hello msg 7")
139 | j := engine.NewJob("ns-engine", "q7", body, nil, 10, 0, 1, "")
140 | jobID, err := OldRedisEngine.Publish(j)
141 | job, err := e.Consume("ns-engine", []string{"q7"}, 5, 0)
142 | if err != nil {
143 | t.Fatal("Failed to drain the old engine's data")
144 | }
145 | if job.ID() != jobID {
146 | t.Fatal("Mismatched job")
147 | }
148 | }
149 |
150 | func TestEngine_BatchConsume(t *testing.T) {
151 | e := NewEngine(OldRedisEngine, NewRedisEngine)
152 | body := []byte("hello msg 8")
153 | j := engine.NewJob("ns-engine", "q8", body, nil, 10, 2, 1, "")
154 | jobID, err := e.Publish(j)
155 | if err != nil {
156 | t.Fatalf("Failed to publish: %s", err)
157 | }
158 |
159 | queues := []string{"q8"}
160 | jobs, err := e.BatchConsume("ns-engine", queues, 2, 5, 0)
161 | if err != nil {
162 | t.Fatalf("Failed to Batch consume: %s", err)
163 | }
164 | if len(jobs) != 0 {
165 | t.Fatalf("Wrong job consumed")
166 | }
167 |
168 | jobs, err = e.BatchConsume("ns-engine", queues, 2, 5, 3)
169 | if err != nil {
170 | t.Fatalf("Failed to Batch consume: %s", err)
171 | }
172 | if len(jobs) != 1 || !bytes.Equal(body, jobs[0].Body()) || jobID != jobs[0].ID() {
173 | t.Fatalf("Mistmatched job data")
174 | }
175 |
176 | // Consume some jobs
177 | jobIDMap := map[string]bool{}
178 | for i := 0; i < 4; i++ {
179 | j := engine.NewJob("ns-engine", "q8", body, nil, 10, 0, 1, "")
180 | jobID, err := e.Publish(j)
181 | t.Log(jobID)
182 | if err != nil {
183 | t.Fatalf("Failed to publish: %s", err)
184 | }
185 | jobIDMap[jobID] = true
186 | }
187 |
188 | // First time batch consume three jobs
189 | jobs, err = e.BatchConsume("ns-engine", queues, 3, 3, 3)
190 | if err != nil {
191 | t.Fatalf("Failed to consume: %s", err)
192 | }
193 | if len(jobs) != 3 {
194 | t.Fatalf("Mistmatched jobs count")
195 | }
196 | for _, job := range jobs {
197 | if !bytes.Equal(body, job.Body()) || !jobIDMap[job.ID()] {
198 | t.Fatalf("Mistmatched job data")
199 | }
200 | }
201 |
202 | // Second time batch consume can only get a single job
203 | jobs, err = e.BatchConsume("ns-engine", queues, 3, 3, 3)
204 | if err != nil {
205 | t.Fatalf("Failed to consume: %s", err)
206 | }
207 | if len(jobs) != 1 {
208 | t.Fatalf("Mistmatched jobs count")
209 | }
210 | if !bytes.Equal(body, jobs[0].Body()) || !jobIDMap[jobs[0].ID()] {
211 | t.Fatalf("Mistmatched job data")
212 | }
213 |
214 | jobs, err = e.BatchConsume("ns-engine", queues, 3, 3, 3)
215 | if err != nil {
216 | t.Fatalf("Failed to consume: %s", err)
217 | }
218 | if len(jobs) != 0 {
219 | t.Fatalf("Mistmatched jobs count")
220 | }
221 | }
222 |
223 | func TestEngine_DeadLetter_Size(t *testing.T) {
224 | body := []byte("hello msg 9")
225 | queues := []string{"q9"}
226 | j := engine.NewJob("ns-engine", "q9", body, nil, 10, 0, 1, "")
227 | jobID, err := OldRedisEngine.Publish(j)
228 | job, err := OldRedisEngine.Consume("ns-engine", queues, 0, 0)
229 | if err != nil {
230 | t.Fatal("Failed to drain the old engine's data")
231 | }
232 | if job.ID() != jobID {
233 | t.Fatal("Mismatched job")
234 | }
235 | j = engine.NewJob("ns-engine", "q9", body, nil, 10, 0, 1, "")
236 | jobID, err = NewRedisEngine.Publish(j)
237 | job, err = NewRedisEngine.Consume("ns-engine", queues, 0, 0)
238 | if job.ID() != jobID {
239 | t.Fatal("Mismatched job")
240 | }
241 | time.Sleep(2 * time.Second)
242 | e := NewEngine(OldRedisEngine, NewRedisEngine)
243 | size, _ := e.SizeOfDeadLetter("ns-engine", "q9")
244 | if size != 2 {
245 | t.Fatalf("Expected the deadletter queue size is: %d, but got %d\n", 2, size)
246 | }
247 | }
248 |
249 | func TestEngine_PublishWithJobID(t *testing.T) {
250 | e := NewEngine(OldRedisEngine, NewRedisEngine)
251 | body := []byte("hello msg 1")
252 | // Publish no-delay job
253 | j := engine.NewJob("ns-engine", "q10", body, nil, 10, 0, 1, "jobID1")
254 | jobID, err := e.Publish(j)
255 | t.Log(jobID)
256 | assert.Nil(t, err)
257 | // Make sure the new engine received the job
258 | job, err := NewRedisEngine.Consume("ns-engine", []string{"q10"}, 3, 0)
259 | assert.Nil(t, err)
260 | assert.EqualValues(t, job.ID(), jobID)
261 | }
262 |
--------------------------------------------------------------------------------
/engine/redis/queue.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "encoding/binary"
5 | "errors"
6 | "fmt"
7 | "time"
8 |
9 | "github.com/bitleak/lmstfy/engine"
10 | go_redis "github.com/go-redis/redis/v8"
11 | "github.com/sirupsen/logrus"
12 | )
13 |
14 | const (
15 | luaRPOPMultiQueuesScript = `
16 | for _, queue in ipairs(KEYS) do
17 | local v = redis.call("RPOP", queue)
18 | if v ~= false then
19 | return {queue, v}
20 | end
21 | end
22 | return {"", ""}
23 | `
24 | )
25 |
26 | var rpopMultiQueuesSHA string
27 |
28 | type QueueName struct {
29 | Namespace string
30 | Queue string
31 | }
32 |
33 | func (k *QueueName) String() string {
34 | return join(QueuePrefix, k.Namespace, k.Queue)
35 | }
36 |
37 | func (k *QueueName) Decode(str string) error {
38 | splits := splits(3, str)
39 | if len(splits) != 3 || splits[0] != QueuePrefix {
40 | return errors.New("invalid format")
41 | }
42 | k.Namespace = splits[1]
43 | k.Queue = splits[2]
44 | return nil
45 | }
46 |
47 | // Queue is the "ready queue" that has all the jobs that can be consumed right now
48 | type Queue struct {
49 | name QueueName
50 | redis *RedisInstance
51 | timer *Timer
52 |
53 | destroySHA string
54 | }
55 |
56 | func NewQueue(namespace, queue string, redis *RedisInstance, timer *Timer) *Queue {
57 | return &Queue{
58 | name: QueueName{Namespace: namespace, Queue: queue},
59 | redis: redis,
60 | timer: timer,
61 |
62 | // NOTE: deadletter and queue are actually the same data structure, we could reuse the lua script
63 | // to empty the redis list (used as queue here). all we need to do is pass the queue name as the
64 | // deadletter name.
65 | destroySHA: deleteDeadletterSHA,
66 | }
67 | }
68 |
69 | func (q *Queue) Name() string {
70 | return q.name.String()
71 | }
72 |
73 | // Push a job into the queue, the job data format: {tries}{job id}
74 | func (q *Queue) Push(j engine.Job, tries uint16) error {
75 | if tries == 0 {
76 | return nil
77 | }
78 | if j.Namespace() != q.name.Namespace || j.Queue() != q.name.Queue {
79 | // Wrong queue for the job
80 | return engine.ErrWrongQueue
81 | }
82 | metrics.queueDirectPushJobs.WithLabelValues(q.redis.Name).Inc()
83 | val := structPack(tries, j.ID())
84 | return q.redis.Conn.LPush(dummyCtx, q.Name(), val).Err()
85 | }
86 |
87 | // Pop a job. If the tries > 0, add job to the "in-flight" timer with timestamp
88 | // set to `TTR + now()`; Or we might just move the job to "dead-letter".
89 | func (q *Queue) Poll(timeoutSecond, ttrSecond uint32) (jobID string, tries uint16, err error) {
90 | _, jobID, tries, err = PollQueues(q.redis, q.timer, []QueueName{q.name}, timeoutSecond, ttrSecond)
91 | return jobID, tries, err
92 | }
93 |
94 | // Return number of the current in-queue jobs
95 | func (q *Queue) Size() (size int64, err error) {
96 | return q.redis.Conn.LLen(dummyCtx, q.name.String()).Result()
97 | }
98 |
99 | // Peek a right-most element in the list without popping it
100 | func (q *Queue) Peek() (jobID string, tries uint16, err error) {
101 | val, err := q.redis.Conn.LIndex(dummyCtx, q.Name(), -1).Result()
102 | switch err {
103 | case nil:
104 | // continue
105 | case go_redis.Nil:
106 | return "", 0, engine.ErrNotFound
107 | default:
108 | return "", 0, err
109 | }
110 | tries, jobID, err = structUnpack(val)
111 | return jobID, tries, err
112 | }
113 |
114 | func (q *Queue) Destroy() (count int64, err error) {
115 | poolPrefix := PoolJobKeyPrefix(q.name.Namespace, q.name.Queue)
116 | var batchSize int64 = 100
117 | for {
118 | val, err := q.redis.Conn.EvalSha(dummyCtx, q.destroySHA, []string{
119 | q.Name(), poolPrefix,
120 | }, batchSize).Result()
121 | if err != nil {
122 | if isLuaScriptGone(err) {
123 | if err := PreloadDeadLetterLuaScript(q.redis); err != nil {
124 | logger.WithField("err", err).Error("Failed to load deadletter lua script")
125 | }
126 | continue
127 | }
128 | return count, err
129 | }
130 | n, _ := val.(int64)
131 | count += n
132 | if n < batchSize { // Dead letter is empty
133 | break
134 | }
135 | }
136 | return count, nil
137 | }
138 |
139 | func PreloadQueueLuaScript(redis *RedisInstance) error {
140 | sha, err := redis.Conn.ScriptLoad(dummyCtx, luaRPOPMultiQueuesScript).Result()
141 | if err != nil {
142 | return fmt.Errorf("preload rpop multi lua script err: %s", err)
143 | }
144 | rpopMultiQueuesSHA = sha
145 | return nil
146 | }
147 |
148 | func popMultiQueues(redis *RedisInstance, queueNames []string) (string, string, error) {
149 | if len(queueNames) == 1 {
150 | val, err := redis.Conn.RPop(dummyCtx, queueNames[0]).Result()
151 | return queueNames[0], val, err
152 | }
153 | vals, err := redis.Conn.EvalSha(dummyCtx, rpopMultiQueuesSHA, queueNames).Result()
154 | if err != nil && isLuaScriptGone(err) {
155 | if err = PreloadQueueLuaScript(redis); err != nil {
156 | return "", "", err
157 | }
158 | vals, err = redis.Conn.EvalSha(dummyCtx, rpopMultiQueuesSHA, queueNames).Result()
159 | }
160 | if err != nil {
161 | return "", "", err
162 | }
163 | fields, ok := vals.([]interface{})
164 | if !ok || len(fields) != 2 {
165 | return "", "", errors.New("lua return value should be two elements array")
166 | }
167 | queueName, ok1 := fields[0].(string)
168 | value, ok2 := fields[1].(string)
169 | if !ok1 || !ok2 {
170 | return "", "", errors.New("invalid lua value type")
171 | }
172 | if queueName == "" && value == "" { // queueName and value is empty means rpop without any values
173 | return "", "", go_redis.Nil
174 | }
175 | return queueName, value, nil
176 | }
177 |
178 | // Poll from multiple queues using blocking method; OR pop a job from one queue using non-blocking method
179 | func PollQueues(redis *RedisInstance, timer *Timer, queueNames []QueueName,
180 | timeoutSecond, ttrSecond uint32,
181 | ) (queueName *QueueName, jobID string, retries uint16, err error) {
182 | defer func() {
183 | if jobID != "" {
184 | metrics.queuePopJobs.WithLabelValues(redis.Name).Inc()
185 | }
186 | }()
187 |
188 | var val []string
189 | keys := make([]string, len(queueNames))
190 | for i, k := range queueNames {
191 | keys[i] = k.String()
192 | }
193 | if timeoutSecond > 0 { // Blocking poll
194 | val, err = redis.Conn.BRPop(dummyCtx, time.Duration(timeoutSecond)*time.Second, keys...).Result()
195 | } else { // Non-Blocking fetch
196 | val = make([]string, 2) // Just to be coherent with BRPop return values
197 | val[0], val[1], err = popMultiQueues(redis, keys)
198 | }
199 | switch err {
200 | case nil:
201 | // continue
202 | case go_redis.Nil:
203 | logger.Debug("Job not found")
204 | return nil, "", 0, nil
205 | default:
206 | logger.WithField("err", err).Error("Failed to pop job from queue")
207 | return nil, "", 0, err
208 | }
209 | queueName = &QueueName{}
210 | if err := queueName.Decode(val[0]); err != nil {
211 | logger.WithField("err", err).Error("Failed to decode queue name")
212 | return nil, "", 0, err
213 | }
214 | tries, jobID, err := structUnpack(val[1])
215 | if err != nil {
216 | logger.WithField("err", err).Error("Failed to unpack lua struct data")
217 | return nil, "", 0, err
218 | }
219 |
220 | if tries == 0 {
221 | logger.WithFields(logrus.Fields{
222 | "jobID": jobID,
223 | "ttr": ttrSecond,
224 | "queue": queueName.String(),
225 | }).Error("Job with tries == 0 appeared")
226 | return nil, "", 0, fmt.Errorf("Job %s with tries == 0 appeared", jobID)
227 | }
228 | tries = tries - 1
229 | err = timer.Add(queueName.Namespace, queueName.Queue, jobID, ttrSecond, tries) // NOTE: tries is not decreased
230 | if err != nil {
231 | logger.WithFields(logrus.Fields{
232 | "err": err,
233 | "jobID": jobID,
234 | "ttr": ttrSecond,
235 | "queue": queueName.String(),
236 | }).Error("Failed to add job to timer for ttr")
237 | return queueName, jobID, tries, err
238 | }
239 | return queueName, jobID, tries, nil
240 | }
241 |
242 | // Pack (tries, jobID) into lua struct pack of format "HHHc0", in lua this can be done:
243 | //
244 | // ```local data = struct.pack("HHc0", tries, #job_id, job_id)```
245 | func structPack(tries uint16, jobID string) (data string) {
246 | buf := make([]byte, 2+2+len(jobID))
247 | binary.LittleEndian.PutUint16(buf[0:], tries)
248 | binary.LittleEndian.PutUint16(buf[2:], uint16(len(jobID)))
249 | copy(buf[4:], jobID)
250 | return string(buf)
251 | }
252 |
253 | // Unpack the "HHc0" lua struct format, in lua this can be done:
254 | //
255 | // ```local tries, job_id = struct.unpack("HHc0", data)```
256 | func structUnpack(data string) (tries uint16, jobID string, err error) {
257 | buf := []byte(data)
258 | h1 := binary.LittleEndian.Uint16(buf[0:])
259 | h2 := binary.LittleEndian.Uint16(buf[2:])
260 | jobID = string(buf[4:])
261 | tries = h1
262 | if len(jobID) != int(h2) {
263 | err = errors.New("corrupted data")
264 | }
265 | return
266 | }
267 |
--------------------------------------------------------------------------------
/engine/redis/engine.go:
--------------------------------------------------------------------------------
1 | package redis
2 |
3 | import (
4 | "encoding/json"
5 | "errors"
6 | "fmt"
7 | "io"
8 | "time"
9 |
10 | go_redis "github.com/go-redis/redis/v8"
11 |
12 | "github.com/bitleak/lmstfy/engine"
13 | "github.com/bitleak/lmstfy/uuid"
14 | )
15 |
16 | type RedisInstance struct {
17 | Name string
18 | Conn *go_redis.Client
19 | }
20 |
21 | // Engine that connects all the dots including:
22 | // - store jobs to timer set or ready queue
23 | // - deliver jobs to clients
24 | // - manage dead letters
25 | type Engine struct {
26 | redis *RedisInstance
27 | pool *Pool
28 | timer *Timer
29 | meta *MetaManager
30 | monitor *SizeMonitor
31 | }
32 |
33 | func NewEngine(redisName string, conn *go_redis.Client) (engine.Engine, error) {
34 | redis := &RedisInstance{
35 | Name: redisName,
36 | Conn: conn,
37 | }
38 | if err := PreloadDeadLetterLuaScript(redis); err != nil {
39 | return nil, err
40 | }
41 | if err := PreloadQueueLuaScript(redis); err != nil {
42 | return nil, err
43 | }
44 | go RedisInstanceMonitor(redis)
45 | meta := NewMetaManager(redis)
46 | timer, err := NewTimer("timer_set", redis, time.Second)
47 | if err != nil {
48 | return nil, err
49 | }
50 | metadata, err := meta.Dump()
51 | if err != nil {
52 | return nil, err
53 | }
54 | monitor := NewSizeMonitor(redis, timer, metadata)
55 | go monitor.Loop()
56 | eng := &Engine{
57 | redis: redis,
58 | pool: NewPool(redis),
59 | timer: timer,
60 | meta: meta,
61 | monitor: monitor,
62 | }
63 | return eng, nil
64 | }
65 |
66 | func (e *Engine) Publish(job engine.Job) (jobID string, err error) {
67 | namespace, queue, delaySecond, tries := job.Namespace(), job.Queue(), job.Delay(), job.Tries()
68 | defer func() {
69 | if err == nil {
70 | metrics.publishJobs.WithLabelValues(e.redis.Name).Inc()
71 | metrics.publishQueueJobs.WithLabelValues(e.redis.Name, namespace, queue).Inc()
72 | }
73 | }()
74 | e.meta.RecordIfNotExist(namespace, queue)
75 | e.monitor.MonitorIfNotExist(namespace, queue)
76 | if tries == 0 {
77 | return job.ID(), errors.New("invalid job: tries cannot be zero")
78 | }
79 |
80 | err = e.pool.Add(job)
81 | if err != nil {
82 | return job.ID(), fmt.Errorf("pool: %s", err)
83 | }
84 |
85 | if delaySecond == 0 {
86 | q := NewQueue(namespace, queue, e.redis, e.timer)
87 | err = q.Push(job, tries)
88 | if err != nil {
89 | err = fmt.Errorf("queue: %s", err)
90 | }
91 | return job.ID(), err
92 | }
93 | err = e.timer.Add(namespace, queue, job.ID(), delaySecond, tries)
94 | if err != nil {
95 | err = fmt.Errorf("timer: %s", err)
96 | }
97 | return job.ID(), err
98 | }
99 |
100 | // BatchConsume consume some jobs of a queue
101 | func (e *Engine) BatchConsume(namespace string, queues []string, count, ttrSecond, timeoutSecond uint32) (jobs []engine.Job, err error) {
102 | jobs = make([]engine.Job, 0)
103 | // timeout is 0 to fast check whether there is any job in the ready queue,
104 | // if any, we wouldn't be blocked until the new job was published.
105 | for i := uint32(0); i < count; i++ {
106 | job, err := e.Consume(namespace, queues, ttrSecond, 0)
107 | if err != nil {
108 | return jobs, err
109 | }
110 | if job == nil {
111 | break
112 | }
113 | jobs = append(jobs, job)
114 | }
115 | // If there is no job and consumed in block mode, wait for a single job and return
116 | if timeoutSecond > 0 && len(jobs) == 0 {
117 | job, err := e.Consume(namespace, queues, ttrSecond, timeoutSecond)
118 | if err != nil {
119 | return jobs, err
120 | }
121 | if job != nil {
122 | jobs = append(jobs, job)
123 | }
124 | return jobs, nil
125 | }
126 | return jobs, nil
127 | }
128 |
129 | // Consume multiple queues under the same namespace. the queue order implies priority:
130 | // the first queue in the list is of the highest priority when that queue has job ready to
131 | // be consumed. if none of the queues has any job, then consume wait for any queue that
132 | // has job first.
133 | func (e *Engine) Consume(namespace string, queues []string, ttrSecond, timeoutSecond uint32) (job engine.Job, err error) {
134 | return e.consumeMulti(namespace, queues, ttrSecond, timeoutSecond)
135 | }
136 |
137 | func (e *Engine) consumeMulti(namespace string, queues []string, ttrSecond, timeoutSecond uint32) (job engine.Job, err error) {
138 | defer func() {
139 | if job != nil {
140 | metrics.consumeMultiJobs.WithLabelValues(e.redis.Name).Inc()
141 | metrics.consumeQueueJobs.WithLabelValues(e.redis.Name, namespace, job.Queue()).Inc()
142 | }
143 | }()
144 | queueNames := make([]QueueName, len(queues))
145 | for i, q := range queues {
146 | queueNames[i].Namespace = namespace
147 | queueNames[i].Queue = q
148 | }
149 | for {
150 | startTime := time.Now().Unix()
151 | queueName, jobID, tries, err := PollQueues(e.redis, e.timer, queueNames, timeoutSecond, ttrSecond)
152 | if err != nil {
153 | return nil, fmt.Errorf("queue: %s", err)
154 | }
155 | if jobID == "" {
156 | return nil, nil
157 | }
158 | endTime := time.Now().Unix()
159 | payload, ttl, err := e.pool.Get(namespace, queueName.Queue, jobID)
160 | switch err {
161 | case nil:
162 | // no-op
163 | case engine.ErrNotFound:
164 | timeoutSecond = timeoutSecond - uint32(endTime-startTime)
165 | if timeoutSecond > 0 {
166 | // This can happen if the job's delay time is larger than job's ttl,
167 | // so when the timer fires the job ID, the actual job data is long gone.
168 | // When so, we should use what's left in the timeoutSecond to keep on polling.
169 | //
170 | // Other scene is: A consumer DELETE the job _after_ TTR, and B consumer is
171 | // polling on the queue, and get notified to retry the job, but only to find that
172 | // job was deleted by A.
173 | continue
174 | } else {
175 | return nil, nil
176 | }
177 | default:
178 | return nil, fmt.Errorf("pool: %s", err)
179 | }
180 | job = engine.NewJobWithID(namespace, queueName.Queue, payload.Body, payload.Attributes, ttl, tries, jobID)
181 | metrics.jobElapsedMS.WithLabelValues(e.redis.Name, namespace, queueName.Queue).Observe(float64(job.ElapsedMS()))
182 | return job, nil
183 | }
184 | }
185 |
186 | func (e *Engine) Delete(namespace, queue, jobID string) error {
187 | err := e.pool.Delete(namespace, queue, jobID)
188 | if err == nil {
189 | elapsedMS, _ := uuid.ElapsedMilliSecondFromUniqueID(jobID)
190 | metrics.jobAckElapsedMS.WithLabelValues(e.redis.Name, namespace, queue).Observe(float64(elapsedMS))
191 | }
192 | return err
193 | }
194 |
195 | func (e *Engine) Peek(namespace, queue, optionalJobID string) (job engine.Job, err error) {
196 | jobID := optionalJobID
197 | var tries uint16
198 | if optionalJobID == "" {
199 | q := NewQueue(namespace, queue, e.redis, e.timer)
200 | jobID, tries, err = q.Peek()
201 | switch err {
202 | case nil:
203 | // continue
204 | case engine.ErrNotFound:
205 | return nil, engine.ErrEmptyQueue
206 | default:
207 | return nil, fmt.Errorf("failed to peek queue: %s", err)
208 | }
209 | }
210 | payload, ttl, err := e.pool.Get(namespace, queue, jobID)
211 | // Tricky: we shouldn't return the not found error when the job was not found,
212 | // since the job may expired(TTL was reached) and it would confuse the user, so
213 | // we return the nil job instead of the not found error here. But if the `optionalJobID`
214 | // was assigned we should return the not fond error.
215 | if optionalJobID == "" && errors.Is(err, engine.ErrNotFound) {
216 | // return jobID with nil body if the job is expired
217 | return engine.NewJobWithID(namespace, queue, nil, nil, 0, 0, jobID), nil
218 | }
219 | if err != nil {
220 | return nil, err
221 | }
222 | return engine.NewJobWithID(namespace, queue, payload.Body, payload.Attributes, ttl, tries, jobID), err
223 | }
224 |
225 | func (e *Engine) Size(namespace, queue string) (size int64, err error) {
226 | q := NewQueue(namespace, queue, e.redis, e.timer)
227 | return q.Size()
228 | }
229 |
230 | func (e *Engine) Destroy(namespace, queue string) (count int64, err error) {
231 | e.meta.Remove(namespace, queue)
232 | e.monitor.Remove(namespace, queue)
233 | q := NewQueue(namespace, queue, e.redis, e.timer)
234 | return q.Destroy()
235 | }
236 |
237 | func (e *Engine) PeekDeadLetter(namespace, queue string) (size int64, jobID string, err error) {
238 | dl, err := NewDeadLetter(namespace, queue, e.redis)
239 | if err != nil {
240 | return 0, "", err
241 | }
242 | return dl.Peek()
243 | }
244 |
245 | func (e *Engine) DeleteDeadLetter(namespace, queue string, limit int64) (count int64, err error) {
246 | dl, err := NewDeadLetter(namespace, queue, e.redis)
247 | if err != nil {
248 | return 0, err
249 | }
250 | return dl.Delete(limit)
251 | }
252 |
253 | func (e *Engine) RespawnDeadLetter(namespace, queue string, limit, ttlSecond int64) (count int64, err error) {
254 | dl, err := NewDeadLetter(namespace, queue, e.redis)
255 | if err != nil {
256 | return 0, err
257 | }
258 | return dl.Respawn(limit, ttlSecond)
259 | }
260 |
261 | // SizeOfDeadLetter return the queue size of dead letter
262 | func (e *Engine) SizeOfDeadLetter(namespace, queue string) (size int64, err error) {
263 | dl, err := NewDeadLetter(namespace, queue, e.redis)
264 | if err != nil {
265 | return 0, err
266 | }
267 | return dl.Size()
268 | }
269 |
270 | func (e *Engine) Shutdown() {
271 | e.timer.Shutdown()
272 | }
273 |
274 | func (e *Engine) DumpInfo(out io.Writer) error {
275 | metadata, err := e.meta.Dump()
276 | if err != nil {
277 | return err
278 | }
279 | enc := json.NewEncoder(out)
280 | enc.SetIndent("", " ")
281 | return enc.Encode(metadata)
282 | }
283 |
--------------------------------------------------------------------------------