├── CHANGELOG.md
├── README.md
├── app.log
├── countlimiter.go
├── fixedwindow.go
├── go.mod
├── go.sum
├── leakybucket.go
├── limitrecord.go
├── rate_test.go
├── ratelimit.go
├── ratelimit_config.toml
├── redistoken_test.go
├── redistokenlimiter.go
├── slidewindow.go
├── time_rate.go
├── tokenbucket.go
├── tokenbucket.lua
├── watchdog.go
├── web.go
└── web_test.go
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ### go-ratelimit-manager
2 |
3 | ### version 0.0.2
4 | 1. 增加WatchDog接口
5 | 2. OPtionFunc增加
6 | 3. 增加LimitRecord对限流情况做记录
7 |
8 | ### version 0.0.1
9 |
10 | 1. 支持滑动窗口、固定窗口、令牌桶的单机限流
11 | 2. 支持令牌桶算法,在redis结合lua实现分布式限流
12 | 3. 单机/分布式限流可配置化
13 |
14 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # go-ratelimit-manager
2 | 使用go实现单机式/分布式限流方案
3 | ## Features
4 |
5 | 1. 支持多种限流策略,固定窗口/滑动窗口/漏桶/令牌桶/,支持自定义扩展
6 | 2. 支持多粒度限流,针对不同的API范围实现多粒度限流。
7 | 3. 支持自定义配置,配置文件直接配置。
8 | 4. 支持单机式/分布式限流,go原生实现单机式,redis+lua脚本实现分布式限流。
9 | 5. 支持WatchDog,监控系统保证限流组件的高可用
10 | 6. todo 高可用
11 | 7. todo 监控系统
12 | 8. todo 平滑限流
13 |
14 |
15 | ## Easy Use
16 | ``` go
17 | go get github.com/ajwlforever/go-ratelimit-manager@latest
18 | ```
19 |
20 | ``` go
21 | import (
22 | rate "github.com/ajwlforever/go-ratelimit-manager"
23 | )
24 |
25 | func main() {
26 | svr, _ := rate.NewRateLimitService("configs\\ratelimit_config.toml", rate.NewRedisClient())
27 | // 使用具体的限流器
28 | res := svr.Limiters["api_ai"].TryAcquire(context.Background())
29 | if res.Ok {
30 | log.Println("allow")
31 | } else {
32 | log.Println("reject")
33 | }
34 | }
35 |
36 | ```
37 | ### 1. Use
38 | 创建xxx限流器的方式:
39 | `NewxxxLimiter` 或者 `NewLimiter(WithxxxLimiter())`
40 | #### (1). FixedWindowLimiter
41 | ``` go
42 | // NewFixedWindowLimiter
43 | func NewFixedWindowLimiter(unitTime time.Duration, maxCount int) *FixedWindowLimiter
44 | ```
45 | 参数:
46 | ``` go
47 | UnitTime time.Duration // 窗口时间
48 | MaxCount int // number 窗口期允许请求的数量
49 | ```
50 | 新建:
51 | ``` go
52 | limiter = NewFixedWindowLimiter(time.Second*5, 1)
53 |
54 | ```
55 | 或者
56 | ``` go
57 | limiter = NewLimiter(WithFixedWindowLimiter(time.Second*5, 1))
58 | ```
59 | #### (2). SlideWindowLimiter
60 | ``` go
61 | type SlideWindowLimiter struct {
62 | UnitTime time.Duration // 窗口时间
63 | SmallUnitTime time.Duration // 小窗口时间
64 | Cnts []int // 每个小窗口的请求数量 - 固定大小- 模拟循环队列
65 | Index int // 目前在循环队列的index
66 | Count int // 实际的请求数量
67 | MaxCount int // number 窗口期允许请求的数量
68 | Mu sync.Mutex //
69 | }
70 | func NewSlideWindowLimiter(unitTime time.Duration, smallUnitTime time.Duration, maxCount int)
71 |
72 | slide = NewSlideWindowLimiter(time.Second*10, time.Second*5, 1)
73 | slide = NewLimiter(WithSlideWindowLimiter(time.Second*10, time.Second*5, 1))
74 | ```
75 | #### (3). TokenBucketLimiter
76 | ``` go
77 | func WithTokenBucketLimiter(limitRate time.Duration, maxCount int, waitTime time.Duration)
78 | limiter = NewLimiter(WithTokenBucketLimiter(time.Second*5, 1, 2*time.Second))
79 | ```
80 | #### (4). RedisTokenLimiter
81 | ``` go
82 | func WithRedisTokenLimiter(rdb *redis.Client, key string, intervalPerPermit time.Duration, resetBucketInterval time.Duration,
83 | initToken int, MaxCount int)
84 | ```
85 | ### 2.Configuration
86 | ``` toml
87 | # RateLimiterService配置文件
88 |
89 | # TokenBucketLimiter
90 | [[Limiter]]
91 | Type = "TokenBucketLimiter"
92 | Key = "api_ai"
93 | LimitRate = "1s" # 每秒产生一个令牌
94 | WaitTime = "500ms" # 最大等待时间500毫秒
95 | MaxCount = 100 # 令牌桶最大容量
96 |
97 | # SlideWindowLimiter
98 | [[Limiter]]
99 | Type = "SlideWindowLimiter"
100 | Key = "user_login"
101 | UnitTime = "60s" # 窗口时间60秒
102 | SmallUnitTime = "1s" # 小窗口时间1秒
103 | MaxCount = 5 # 窗口期允许最大请求数量
104 |
105 | [[Limiter]]
106 | Type = "FixedWindowLimiter"
107 | Key = "filedown"
108 | UnitTime = "1s" # 窗口时间1秒钟
109 | MaxCount = 10 # 窗口期允许最大请求数量
110 |
111 | [[Limiter]]
112 | Type = "RedisTokenLimiter"
113 | Key = "global_rate_limiter" # redis key
114 | IntervalPerPermit = "200ms" # 令牌产生速度
115 | ResetBucketInterval = "1h" # 令牌桶刷新间隔
116 | MaxCount = 1000 # 令牌桶最大容量
117 | InitTokens = 500 # 初始化令牌数量
118 |
119 | ```
120 | `func NewRateLimitService(path string, rdb *redis.Client)` 中`path`是配置路径地址,配置文件参考上述,每一个限流器必须要有Type和Key,其余的参数根据不同的种类有不同的参数,参数是必须填写。rdb是redis客户端;
121 |
122 | ``` go
123 | func TestConfiguration(t *testing.T) {
124 | svr, _ := NewRateLimitService("", NewRedisClient())
125 | // 使用具体的限流器
126 | res := svr.Limiters["api_ai"].TryAcquire(context.Background())
127 | if res.Ok {
128 | log.Println("allow")
129 | } else {
130 | log.Println("reject")
131 | }
132 | }
133 | ```
--------------------------------------------------------------------------------
/app.log:
--------------------------------------------------------------------------------
1 | 2024/03/16 19:00:07 2024/03/16 19:00:07 Key:token Result:allow
2 | 2024/03/16 19:00:07 rejectCnt: 0
3 | 2024/03/16 19:00:07 accessCnt: 1
4 | 2024/03/16 19:00:07 accepted
5 | 2024/03/16 19:00:08 2024/03/16 19:00:08 Key:token Result:reject
6 | 2024/03/16 19:00:08 rejectCnt: 1
7 | 2024/03/16 19:00:08 accessCnt: 1
8 | 2024/03/16 19:00:08 rejected
9 | 2024/03/16 19:00:10 2024/03/16 19:00:10 Key:token Result:reject
10 | 2024/03/16 19:00:10 rejectCnt: 2
11 | 2024/03/16 19:00:10 accessCnt: 1
12 | 2024/03/16 19:00:10 rejected again
13 | 2024/03/16 19:00:11 2024/03/16 19:00:11 Key:token Result:allow
14 | 2024/03/16 19:00:11 rejectCnt: 2
15 | 2024/03/16 19:00:11 accessCnt: 2
16 | 2024/03/16 19:00:11 accepted
17 | 2024/03/16 19:04:51 Initializing TokenBucketLimiter: api_ai
18 | 2024/03/16 19:04:51 Initializing SlideWindowLimiter: user_login
19 | 2024/03/16 19:04:51 Initializing FixedWindowLimiter: filedown
20 | 2024/03/16 19:04:51 Initializing RedisTokenLimiter: global_rate_limiter
21 | 2024/03/16 19:05:21 watchDog tick
22 | 2024/03/16 19:05:21 watchDog: global_rate_limiter AllowCnt: 0 RejectCnt: 0
23 | 2024/03/16 19:05:21 watchDog: api_ai AllowCnt: 0 RejectCnt: 0
24 | 2024/03/16 19:05:21 watchDog: user_login AllowCnt: 0 RejectCnt: 0
25 | 2024/03/16 19:05:21 watchDog: filedown AllowCnt: 0 RejectCnt: 0
26 | 2024/03/16 19:05:22 2024/03/16 19:05:22 Key:api_ai Result:allow
27 | 2024/03/16 19:05:22 rejectCnt: 0
28 | 2024/03/16 19:05:22 accessCnt: 1
29 | 2024/03/16 19:05:22 accepted
30 | 2024/03/16 19:05:22 2024/03/16 19:05:22 Key:api_ai Result:allow
31 | 2024/03/16 19:05:22 rejectCnt: 0
32 | 2024/03/16 19:05:22 accessCnt: 2
33 | 2024/03/16 19:05:22 accepted
34 | 2024/03/16 19:05:23 2024/03/16 19:05:23 Key:api_ai Result:allow
35 | 2024/03/16 19:05:23 rejectCnt: 0
36 | 2024/03/16 19:05:23 accessCnt: 3
37 | 2024/03/16 19:05:23 accepted
38 | 2024/03/16 19:05:24 2024/03/16 19:05:24 Key:api_ai Result:allow
39 | 2024/03/16 19:05:24 rejectCnt: 0
40 | 2024/03/16 19:05:24 accessCnt: 4
41 | 2024/03/16 19:05:24 accepted
42 | 2024/03/16 19:05:24 2024/03/16 19:05:24 Key:api_ai Result:allow
43 | 2024/03/16 19:05:24 rejectCnt: 0
44 | 2024/03/16 19:05:24 accessCnt: 5
45 | 2024/03/16 19:05:24 accepted
46 | 2024/03/16 19:05:25 2024/03/16 19:05:25 Key:api_ai Result:allow
47 | 2024/03/16 19:05:25 rejectCnt: 0
48 | 2024/03/16 19:05:25 accessCnt: 6
49 | 2024/03/16 19:05:25 accepted
50 | 2024/03/16 19:05:25 2024/03/16 19:05:25 Key:api_ai Result:allow
51 | 2024/03/16 19:05:25 rejectCnt: 0
52 | 2024/03/16 19:05:25 accessCnt: 7
53 | 2024/03/16 19:05:25 accepted
54 | 2024/03/16 19:05:26 2024/03/16 19:05:26 Key:api_ai Result:allow
55 | 2024/03/16 19:05:26 rejectCnt: 0
56 | 2024/03/16 19:05:26 accessCnt: 8
57 | 2024/03/16 19:05:26 accepted
58 | 2024/03/16 19:05:27 2024/03/16 19:05:27 Key:api_ai Result:allow
59 | 2024/03/16 19:05:27 rejectCnt: 0
60 | 2024/03/16 19:05:27 accessCnt: 9
61 | 2024/03/16 19:05:27 accepted
62 | 2024/03/16 19:05:28 2024/03/16 19:05:28 Key:api_ai Result:allow
63 | 2024/03/16 19:05:28 rejectCnt: 0
64 | 2024/03/16 19:05:28 accessCnt: 10
65 | 2024/03/16 19:05:28 accepted
66 | 2024/03/16 19:05:29 2024/03/16 19:05:29 Key:api_ai Result:allow
67 | 2024/03/16 19:05:29 rejectCnt: 0
68 | 2024/03/16 19:05:29 accessCnt: 11
69 | 2024/03/16 19:05:29 accepted
70 | 2024/03/16 19:05:30 2024/03/16 19:05:30 Key:api_ai Result:allow
71 | 2024/03/16 19:05:30 rejectCnt: 0
72 | 2024/03/16 19:05:30 accessCnt: 12
73 | 2024/03/16 19:05:30 accepted
74 | 2024/03/16 19:05:30 2024/03/16 19:05:30 Key:api_ai Result:allow
75 | 2024/03/16 19:05:30 rejectCnt: 0
76 | 2024/03/16 19:05:30 accessCnt: 13
77 | 2024/03/16 19:05:30 accepted
78 | 2024/03/16 19:05:31 2024/03/16 19:05:31 Key:api_ai Result:allow
79 | 2024/03/16 19:05:31 rejectCnt: 0
80 | 2024/03/16 19:05:31 accessCnt: 14
81 | 2024/03/16 19:05:31 accepted
82 | 2024/03/16 19:05:31 2024/03/16 19:05:31 Key:api_ai Result:allow
83 | 2024/03/16 19:05:31 rejectCnt: 0
84 | 2024/03/16 19:05:31 accessCnt: 15
85 | 2024/03/16 19:05:31 accepted
86 | 2024/03/16 19:05:32 2024/03/16 19:05:32 Key:api_ai Result:allow
87 | 2024/03/16 19:05:32 rejectCnt: 0
88 | 2024/03/16 19:05:32 accessCnt: 16
89 | 2024/03/16 19:05:32 accepted
90 | 2024/03/16 19:05:34 2024/03/16 19:05:34 Key:api_ai Result:allow
91 | 2024/03/16 19:05:34 rejectCnt: 0
92 | 2024/03/16 19:05:34 accessCnt: 17
93 | 2024/03/16 19:05:34 accepted
94 | 2024/03/16 19:05:36 2024/03/16 19:05:36 Key:filedown Result:allow
95 | 2024/03/16 19:05:36 rejectCnt: 0
96 | 2024/03/16 19:05:36 accessCnt: 1
97 | 2024/03/16 19:05:36 accepted
98 | 2024/03/16 19:05:37 2024/03/16 19:05:37 Key:filedown Result:allow
99 | 2024/03/16 19:05:37 rejectCnt: 0
100 | 2024/03/16 19:05:37 accessCnt: 2
101 | 2024/03/16 19:05:37 accepted
102 | 2024/03/16 19:05:39 2024/03/16 19:05:39 Key:filedown Result:allow
103 | 2024/03/16 19:05:39 rejectCnt: 0
104 | 2024/03/16 19:05:39 accessCnt: 3
105 | 2024/03/16 19:05:39 accepted
106 | 2024/03/16 19:05:39 2024/03/16 19:05:39 Key:filedown Result:allow
107 | 2024/03/16 19:05:39 rejectCnt: 0
108 | 2024/03/16 19:05:39 accessCnt: 4
109 | 2024/03/16 19:05:39 accepted
110 | 2024/03/16 19:05:41 2024/03/16 19:05:41 Key:filedown Result:allow
111 | 2024/03/16 19:05:41 rejectCnt: 0
112 | 2024/03/16 19:05:41 accessCnt: 5
113 | 2024/03/16 19:05:41 accepted
114 | 2024/03/16 19:05:42 2024/03/16 19:05:42 Key:filedown Result:allow
115 | 2024/03/16 19:05:42 rejectCnt: 0
116 | 2024/03/16 19:05:42 accessCnt: 6
117 | 2024/03/16 19:05:42 accepted
118 | 2024/03/16 19:05:45 2024/03/16 19:05:45 Key:user_login Result:allow
119 | 2024/03/16 19:05:45 rejectCnt: 0
120 | 2024/03/16 19:05:45 accessCnt: 1
121 | 2024/03/16 19:05:45 accepted
122 | 2024/03/16 19:05:45 2024/03/16 19:05:45 Key:user_login Result:allow
123 | 2024/03/16 19:05:45 rejectCnt: 0
124 | 2024/03/16 19:05:45 accessCnt: 2
125 | 2024/03/16 19:05:45 accepted
126 | 2024/03/16 19:05:46 2024/03/16 19:05:46 Key:user_login Result:allow
127 | 2024/03/16 19:05:46 rejectCnt: 0
128 | 2024/03/16 19:05:46 accessCnt: 3
129 | 2024/03/16 19:05:46 accepted
130 | 2024/03/16 19:05:51 watchDog tick
131 | 2024/03/16 19:05:51 watchDog: api_ai AllowCnt: 17 RejectCnt: 0
132 | 2024/03/16 19:05:51 watchDog: user_login AllowCnt: 3 RejectCnt: 0
133 | 2024/03/16 19:05:51 watchDog: filedown AllowCnt: 6 RejectCnt: 0
134 | 2024/03/16 19:05:51 watchDog: global_rate_limiter AllowCnt: 0 RejectCnt: 0
135 |
--------------------------------------------------------------------------------
/countlimiter.go:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ajwlforever/go-ratelimit-manager/7aeb2b6dd1a41d06656df5dbc9fad76173da53af/countlimiter.go
--------------------------------------------------------------------------------
/fixedwindow.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "context"
5 | "log"
6 | "sync"
7 | "time"
8 | )
9 |
10 | // fixedWindow 限流算法
11 |
12 | // FixedWindowLimiter
13 | type FixedWindowLimiter struct {
14 | UnitTime time.Duration // 窗口时间
15 | Count int // 实际的请求数量
16 | MaxCount int // number 窗口期允许请求的数量
17 | mu sync.Mutex
18 | Key string //
19 | Record *LimitRecord
20 | }
21 |
22 | // NewFixedWindowLimiter
23 | func NewFixedWindowLimiter(key string, unitTime time.Duration, maxCount int) *FixedWindowLimiter {
24 |
25 | f := &FixedWindowLimiter{
26 | Record: NewLimitRecord(),
27 | UnitTime: unitTime,
28 | Count: 0,
29 | MaxCount: maxCount,
30 | Key: key,
31 | }
32 | go f.resetWindow()
33 | return f
34 |
35 | }
36 |
37 | func (f *FixedWindowLimiter) resetWindow() {
38 | defer func() {
39 | if x := recover(); x != nil {
40 | log.Printf("Failed to reset window: %v", x)
41 | go f.resetWindow()
42 | }
43 | }()
44 | ticker := time.NewTicker(f.UnitTime)
45 | // log.Println("resetWindow")
46 | for range ticker.C {
47 | f.mu.Lock()
48 | // log.Println("reset window")
49 | f.Count = 0
50 | // f.LastReqTime = time.Now().Add(-f.UnitTime)
51 | f.mu.Unlock()
52 |
53 | }
54 | }
55 |
56 | func (limiter *FixedWindowLimiter) TryAcquire(ctx context.Context) (res LimitResult) {
57 | limiter.mu.Lock()
58 | defer limiter.mu.Unlock()
59 |
60 | if limiter.Count < limiter.MaxCount {
61 | limiter.Count += 1
62 | res.Ok = true
63 | limiter.record(res)
64 | return
65 | }
66 |
67 | // curTime := time.Now()
68 | res.Ok = false
69 | limiter.record(res)
70 | return
71 |
72 | }
73 | func (limiter *FixedWindowLimiter) GetRecord() *LimitRecord {
74 | return limiter.Record
75 | }
76 |
77 | func (l *FixedWindowLimiter) record(res LimitResult) {
78 | item := &Item{
79 | Timestamp: time.Now(),
80 | Key: l.Key,
81 | Allowed: res.Ok,
82 | Reason: "FixedWindowLimiter rejected",
83 | }
84 | l.Record.Save(item, DETAIL_LEVEL_1)
85 | log.Println(item.String())
86 | log.Println("rejectCnt: ", l.Record.rejectCnt)
87 | log.Println("accessCnt: ", l.Record.allowCnt)
88 | }
89 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/ajwlforever/go-ratelimit-manager
2 |
3 | go 1.19
4 |
5 | require (
6 | github.com/BurntSushi/toml v1.3.2
7 | github.com/duke-git/lancet/v2 v2.3.0
8 | github.com/go-redis/redis/v8 v8.11.5
9 | )
10 |
11 | require (
12 | github.com/cespare/xxhash/v2 v2.1.2 // indirect
13 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
14 | golang.org/x/exp v0.0.0-20221208152030-732eee02a75a // indirect
15 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
16 | golang.org/x/sys v0.5.0 // indirect
17 | )
18 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
2 | github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
3 | github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
4 | github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
5 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
6 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
7 | github.com/duke-git/lancet/v2 v2.3.0 h1:Ztie0qOnC4QgGYYqmpmQxbxkPcm54kqFXj1bwhiV8zg=
8 | github.com/duke-git/lancet/v2 v2.3.0/go.mod h1:zGa2R4xswg6EG9I6WnyubDbFO/+A/RROxIbXcwryTsc=
9 | github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
10 | github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
11 | github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
12 | github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
13 | github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
14 | github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
15 | golang.org/x/exp v0.0.0-20221208152030-732eee02a75a h1:4iLhBPcpqFmylhnkbY3W0ONLUYYkDAW9xMFLfxgsvCw=
16 | golang.org/x/exp v0.0.0-20221208152030-732eee02a75a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
17 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
18 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
19 | golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
20 | golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
21 | golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
22 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
23 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
24 |
--------------------------------------------------------------------------------
/leakybucket.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import "sync"
4 |
5 | // LeakyBukect
6 | // Cap 桶的容量
7 | // AccessRate
8 | // todo LeakyBucket
9 | type LeakyBukect struct {
10 | Cap int64
11 | AccessRate float64
12 | InRate float64
13 |
14 | Mu sync.Mutex
15 | }
16 |
--------------------------------------------------------------------------------
/limitrecord.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 | "time"
7 | )
8 |
9 | var (
10 | DETAIL_LEVEL_1 = 0 // 简易记录
11 | DETAIL_LEVEL_2 = 1 // 复杂记录
12 | )
13 |
14 | // 设计 简易记录/复杂记录
15 | // LimitRecord 限流情况全部记录下来。
16 | // todo LimitrRecord 限流情况全部记录下来。
17 | // LimitRecord 嵌入到每个限流器结构体内
18 | type LimitRecord struct {
19 | allows []Item
20 | rejects []Item
21 | allowCnt int
22 | rejectCnt int
23 | mu sync.Mutex // 锁住
24 | }
25 |
26 | func NewLimitRecord() *LimitRecord {
27 | return &LimitRecord{
28 | allows: make([]Item, 0, 1000),
29 | rejects: make([]Item, 0, 1000),
30 | }
31 | }
32 |
33 | type Item struct {
34 | Timestamp time.Time `json:"timestamp"`
35 | Key string `json:"Key"`
36 | Allowed bool `json:"allowed"`
37 | Reason string `json:"reason,omitempty"` // 如果Reason为空,则在JSON中省略这个字段
38 | }
39 |
40 | type Record interface {
41 | Save(item *Item, detailLevel int) // item是一条请求,detailLevel是这个条请求记录的级别
42 | }
43 |
44 | // Save
45 | func (record *LimitRecord) Save(item *Item, detailLevel int) {
46 | switch {
47 | case detailLevel == DETAIL_LEVEL_1:
48 | record.easySave(item)
49 | case detailLevel == DETAIL_LEVEL_2:
50 | record.allSave(item)
51 | }
52 | }
53 |
54 | // easySave 只统计请求数量变化
55 | func (record *LimitRecord) easySave(item *Item) {
56 | record.mu.Lock()
57 | defer record.mu.Unlock()
58 |
59 | if !item.Allowed {
60 | record.rejectCnt += 1
61 | } else {
62 | record.allowCnt += 1
63 | }
64 | }
65 |
66 | // allSave item整个存入record
67 | func (record *LimitRecord) allSave(item *Item) {
68 | record.mu.Lock()
69 | defer record.mu.Unlock()
70 |
71 | if !item.Allowed {
72 | record.rejectCnt += 1
73 | record.rejects = append(record.rejects, *item)
74 | } else {
75 | record.allowCnt += 1
76 | record.rejects = append(record.rejects, *item)
77 | }
78 | }
79 |
80 | func (item Item) String() string {
81 | formatString := "2006/01/02 15:04:05"
82 | res := ""
83 | if item.Allowed {
84 | res = "allow"
85 | } else {
86 | res = "reject"
87 | }
88 | return fmt.Sprintf("%v Key:%v Result:%v", item.Timestamp.Format(formatString), item.Key, res)
89 |
90 | }
91 |
--------------------------------------------------------------------------------
/rate_test.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "bufio"
5 | "context"
6 | "io"
7 | "log"
8 | "math/rand"
9 | "net/http"
10 | "os"
11 | "path/filepath"
12 | "reflect"
13 | "testing"
14 | "time"
15 | )
16 |
17 | var c int64
18 | var rejcetCnt int64
19 | var acCnt int64
20 |
21 | func doA(limit Limiter) {
22 | c += 1
23 | ctx := context.Background()
24 | if !limit.TryAcquire(ctx).Ok {
25 | // log.Println("reject")
26 | rejcetCnt += 1
27 | return
28 | }
29 | acCnt += 1
30 | // log.Println("do")
31 | }
32 |
33 | func TestFixedWindow1(t *testing.T) {
34 | interval := time.Millisecond * 100 // 0.1s
35 | ticker := time.NewTicker(interval)
36 | // 1s 5个请求、
37 | limiter := NewFixedWindowLimiter("f1", time.Second, 5)
38 | cnt := 0
39 | for range ticker.C {
40 | doA(limiter)
41 | cnt += 1
42 | if cnt == 1000 {
43 | ticker.Stop()
44 | break
45 | }
46 | }
47 |
48 | }
49 |
50 | func TestFixedWindow2(t *testing.T) {
51 | http.HandleFunc("/h", func(w http.ResponseWriter, r *http.Request) {
52 | w.Header().Set("Content-Type", "text/html")
53 | io.WriteString(w, "
hello, world
")
54 | })
55 |
56 | http.ListenAndServe("0.0.0.0:8080", nil)
57 | for {
58 | time.Sleep(time.Second)
59 | }
60 | }
61 |
62 | func TestTime(t *testing.T) {
63 | now := time.Now()
64 | time.Sleep(time.Second * 5)
65 | after := time.Now()
66 |
67 | log.Println(now)
68 | log.Println(after.Sub(now))
69 | // sub := after.Sub(now).Seconds()
70 |
71 | duration := time.Duration(after.Sub(now).Seconds() * float64(time.Second))
72 | log.Println("duration:", duration)
73 | // 判断两个Duration 是否相等,
74 |
75 | }
76 |
77 | func TestTimer(t *testing.T) {
78 | timer := time.NewTimer(time.Second)
79 | for {
80 | <-timer.C
81 | log.Println("timer")
82 | timer.Reset(time.Second)
83 | }
84 | }
85 |
86 | func TestTicker(t *testing.T) {
87 | ticker := time.NewTicker(time.Second)
88 | for range ticker.C {
89 | log.Println("timer")
90 | ticker.Reset(time.Second)
91 | }
92 | }
93 |
94 | func TestCalulateWindowCnt(t *testing.T) {
95 | s := calculateWindowCount(time.Hour, time.Millisecond*33)
96 | log.Println(s)
97 | }
98 |
99 | func TestSlidWindowLimiter(t *testing.T) {
100 | c = 0
101 | rejcetCnt = 0
102 | acCnt = 0
103 | limiter := NewSlideWindowLimiter("1", time.Second, time.Millisecond*100, 100)
104 |
105 | for i := 0; i < 20; i++ {
106 | go doACircu(limiter)
107 | }
108 | time.Sleep(time.Second * 10)
109 | log.Println("all count:", c)
110 | log.Println("rejectCount:", rejcetCnt)
111 | log.Println("accesscCount:", acCnt)
112 | log.Println("ac+rej:", acCnt+rejcetCnt)
113 |
114 | }
115 |
116 | func doACircu(limiter Limiter) {
117 | ticker := time.NewTicker(time.Microsecond * (100 + time.Duration(rand.Int31n(100))))
118 | for range ticker.C {
119 | doA(limiter)
120 | }
121 | }
122 |
123 | func TestSllep(t *testing.T) {
124 | log.Println(time.Now())
125 | time.Sleep(time.Second * 2)
126 | log.Println(time.Now())
127 | }
128 |
129 | func s(args ...interface{}) {
130 | log.Println(reflect.TypeOf(args))
131 | log.Println(args...)
132 | log.Println(args[0])
133 | }
134 | func TestDot(t *testing.T) {
135 | s([]string{"a", "b", "c", "d"})
136 | s(1, 23, 3, 4)
137 | }
138 |
139 | func TestConfiguration(t *testing.T) {
140 | svr, _ := NewRateLimitService("", NewRedisClient())
141 | // 使用具体的限流器
142 | res := svr.Limiters["api_ai"].TryAcquire(context.Background())
143 | if res.Ok {
144 | log.Println("allow")
145 | } else {
146 | log.Println("reject")
147 | }
148 | }
149 |
150 | func Test123(t *testing.T) {
151 | // 输出当前目录下有多少行代码
152 | test()
153 | }
154 |
155 | func test() {
156 | totalLines, err := countLinesInDir(".")
157 | if err != nil {
158 | log.Printf("Error counting lines: %s\n", err)
159 | return
160 | }
161 |
162 | log.Printf("Total lines of code: %d\n", totalLines)
163 | }
164 |
165 | // countLinesInDir 返回指定目录及其所有子目录中所有Go文件的代码行数总和
166 | func countLinesInDir(dirPath string) (int, error) {
167 | totalLines := 0
168 |
169 | err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {
170 | if err != nil {
171 | return err
172 | }
173 |
174 | if info.IsDir() {
175 | return nil
176 | }
177 |
178 | if filepath.Ext(path) == ".go" {
179 | lines, err := countLinesInFile(path)
180 | if err != nil {
181 | return err
182 | }
183 | totalLines += lines
184 | }
185 |
186 | return nil
187 | })
188 |
189 | if err != nil {
190 | return 0, err
191 | }
192 |
193 | return totalLines, nil
194 | }
195 |
196 | // countLinesInFile 返回文件中的代码行数
197 | func countLinesInFile(filePath string) (int, error) {
198 | file, err := os.Open(filePath)
199 | if err != nil {
200 | return 0, err
201 | }
202 | defer file.Close()
203 |
204 | scanner := bufio.NewScanner(file)
205 | lines := 0
206 | for scanner.Scan() {
207 | lines++
208 | }
209 |
210 | if err := scanner.Err(); err != nil {
211 | return 0, err
212 | }
213 |
214 | return lines, nil
215 | }
216 |
217 | func TestWatchDog(t *testing.T) {
218 | svr, _ := NewRateLimitService("", NewRedisClient(), WithWatchDog(time.Second*5))
219 | log.Println(svr)
220 | time.Sleep(time.Hour * 5)
221 | }
222 |
--------------------------------------------------------------------------------
/ratelimit.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "context"
5 | "log"
6 | "time"
7 |
8 | "github.com/BurntSushi/toml"
9 | "github.com/duke-git/lancet/v2/maputil"
10 | "github.com/go-redis/redis/v8"
11 | )
12 |
13 | var (
14 | ConfigPath = "ratelimit_config.toml"
15 | )
16 |
17 | type RateLimitConfig struct {
18 | Limiters []struct {
19 | Type string `toml:"Type"`
20 | Key string `toml:"Key"`
21 | LimitRate string `toml:"LimitRate,omitempty"`
22 | WaitTime string `toml:"WaitTime,omitempty"`
23 | MaxCount int `toml:"MaxCount"`
24 | UnitTime string `toml:"UnitTime,omitempty"`
25 | SmallUnitTime string `toml:"SmallUnitTime,omitempty"`
26 | IntervalPerPermit string `toml:"IntervalPerPermit,omitempty"`
27 | ResetBucketInterval string `toml:"ResetBucketInterval,omitempty"`
28 | InitTokens int `toml:"InitTokens,omitempty"`
29 | } `toml:"Limiter"`
30 | }
31 | type LimiterOption func() Limiter
32 |
33 | type OptionFunc func(svr *RateLimitService)
34 | type RateLimitService struct {
35 | Limiters map[string]Limiter
36 | WatchDog *watchDog
37 | }
38 | type Limiter interface {
39 | TryAcquire(ctx context.Context) LimitResult
40 | // 有需要用key值来获取分布式令牌的
41 | // todo StopLimiter
42 | GetRecord() *LimitRecord
43 | }
44 |
45 | type LimitResult struct {
46 | Ok bool
47 | WaitTime time.Duration
48 | }
49 |
50 | func NewRateLimitService(path string, rdb *redis.Client, ops ...OptionFunc) (svr *RateLimitService, err error) {
51 | if path != "" {
52 | ConfigPath = path
53 | }
54 | var config RateLimitConfig
55 | // 读取配置文件
56 | if _, err = toml.DecodeFile(ConfigPath, &config); err != nil {
57 | log.Println(err)
58 | return
59 | }
60 | svr = &RateLimitService{
61 | Limiters: make(map[string]Limiter),
62 | }
63 |
64 | // todo log change to log
65 | // Limiters 注入 svr
66 | for idx, c := range config.Limiters {
67 | switch c.Type {
68 | case "TokenBucketLimiter":
69 | if paramsCheck(c.LimitRate, c.WaitTime, c.Key) && c.MaxCount > 0 {
70 | // 根据配置初始化TokenBucketLimiter
71 | log.Println("Initializing TokenBucketLimiter:", c.Key)
72 | var lr, wt time.Duration
73 | lr, err = time.ParseDuration(c.LimitRate)
74 | wt, err = time.ParseDuration(c.WaitTime)
75 | if err != nil {
76 | panicInitRLConfig(idx)
77 | }
78 | svr.Limiters[c.Key] = NewLimiter(WithTokenBucketLimiter(
79 | c.Key,
80 | lr,
81 | c.MaxCount,
82 | wt,
83 | ))
84 | } else {
85 | panicInitRLConfig(idx)
86 | }
87 | case "SlideWindowLimiter":
88 | if paramsCheck(c.Key, c.UnitTime, c.SmallUnitTime) && c.MaxCount > 0 {
89 | // 根据配置初始化SlideWindowLimiter
90 | log.Println("Initializing SlideWindowLimiter:", c.Key)
91 | var ut, st time.Duration
92 | ut, err = time.ParseDuration(c.UnitTime)
93 | st, err = time.ParseDuration(c.SmallUnitTime)
94 | if err != nil {
95 | panicInitRLConfig(idx)
96 | }
97 | svr.Limiters[c.Key] = NewLimiter(WithSlideWindowLimiter(
98 | c.Key, ut, st, c.MaxCount,
99 | ))
100 | } else {
101 | panicInitRLConfig(idx)
102 | }
103 | case "FixedWindowLimiter":
104 | if paramsCheck(c.Key, c.UnitTime) && c.MaxCount > 0 {
105 | log.Println("Initializing FixedWindowLimiter:", c.Key)
106 | var ut time.Duration
107 | ut, err = time.ParseDuration(c.UnitTime)
108 | if err != nil {
109 | panicInitRLConfig(idx)
110 | }
111 | svr.Limiters[c.Key] = NewLimiter(WithFixedWindowLimiter(
112 | c.Key, ut, c.MaxCount,
113 | ))
114 | } else {
115 | panicInitRLConfig(idx)
116 | }
117 | case "RedisTokenLimiter":
118 | if paramsCheck(c.Key, c.IntervalPerPermit, c.ResetBucketInterval) && c.MaxCount > 0 && c.InitTokens <= c.MaxCount {
119 | // 根据配置初始化RedisTokenLimiter
120 | log.Println("Initializing RedisTokenLimiter:", c.Key)
121 | // 确保传递rdb到RedisTokenLimiter的构造函数中
122 | var interval, reset time.Duration
123 | interval, err = time.ParseDuration(c.IntervalPerPermit)
124 | reset, err = time.ParseDuration(c.ResetBucketInterval)
125 | if err != nil {
126 | panicInitRLConfig(idx)
127 | }
128 | svr.Limiters[c.Key] = NewLimiter(WithRedisTokenLimiter(
129 | rdb, c.Key, interval, reset, c.InitTokens, c.MaxCount,
130 | ))
131 | } else {
132 | panicInitRLConfig(idx)
133 | }
134 | default:
135 | log.Println("Unknown Limiter Type:", c.Type)
136 | }
137 | }
138 |
139 | // OptionFunc 扩展--用于增加新功能
140 | for _, f := range ops {
141 | f(svr)
142 | }
143 | if svr.WatchDog != nil {
144 | // start WatchDog
145 | wd := svr.WatchDog
146 | wd.Start(svr.outputRecords())
147 | }
148 | return
149 |
150 | }
151 | func (svr *RateLimitService) outputRecords() watchSomthing {
152 | return func() {
153 | maputil.ForEach(svr.Limiters, func(key string, l Limiter) {
154 | record := l.GetRecord()
155 | log.Println("watchDog:", key, " AllowCnt:", record.allowCnt, " RejectCnt:", record.rejectCnt)
156 | })
157 | }
158 | }
159 | func WithWatchDog(t time.Duration) OptionFunc {
160 | return func(svr *RateLimitService) {
161 | if t < DefaultWatchDogTimeout {
162 | // 小于DefaultWatchDogTimeout 使用默认检测时间DefaultWatchDogTimeout
163 | t = DefaultWatchDogTimeout
164 | }
165 | wd := newWatchDog(t)
166 | svr.WatchDog = wd
167 | }
168 | }
169 |
170 | // 创建固定窗口限流器的Option
171 | func WithFixedWindowLimiter(key string, unitTime time.Duration, maxCount int) LimiterOption {
172 | return func() Limiter {
173 | limiter := NewFixedWindowLimiter(key, unitTime, maxCount)
174 | return limiter
175 | }
176 | }
177 |
178 | // 创建滑动窗口限流器的Option
179 | func WithSlideWindowLimiter(key string, unitTime time.Duration, smallUnitTime time.Duration, maxCount int) LimiterOption {
180 | return func() Limiter {
181 | limiter := NewSlideWindowLimiter(key, unitTime, smallUnitTime, maxCount)
182 | return limiter
183 | }
184 | }
185 |
186 | // 创建令牌桶限流器的Option
187 | func WithTokenBucketLimiter(key string, limitRate time.Duration, maxCount int, waitTime time.Duration) LimiterOption {
188 | return func() Limiter {
189 | limiter := NewTokenBucketLimiter(key, limitRate, maxCount, waitTime)
190 | return limiter
191 | }
192 | }
193 |
194 | // 创建Redis分布式限流器的option
195 | func WithRedisTokenLimiter(rdb *redis.Client, key string, intervalPerPermit time.Duration, resetBucketInterval time.Duration,
196 | initToken int, MaxCount int) LimiterOption {
197 | return func() Limiter {
198 | limiter := NewRedisTokenLimiter(rdb, key, intervalPerPermit, resetBucketInterval,
199 | initToken, MaxCount)
200 | return limiter
201 | }
202 |
203 | }
204 |
205 | func NewLimiter(option LimiterOption) Limiter {
206 | return option()
207 | }
208 |
209 | func paramsCheck(ss ...string) bool {
210 | for _, s := range ss {
211 | if s == "" {
212 | return false
213 | }
214 | }
215 | return true
216 | }
217 |
218 | func panicInitRLConfig(idx int) {
219 | log.Printf("init RateLimitConfiguration error, look at the %v limiter", idx+1)
220 | panic("init RateLimitConfiguration error")
221 | }
222 |
--------------------------------------------------------------------------------
/ratelimit_config.toml:
--------------------------------------------------------------------------------
1 | # RateLimiterService配置文件
2 |
3 | # TokenBucketLimiter
4 | [[Limiter]]
5 | Type = "TokenBucketLimiter"
6 | Key = "api_ai"
7 | LimitRate = "1s" # 每秒产生一个令牌
8 | WaitTime = "500ms" # 最大等待时间500毫秒
9 | MaxCount = 100 # 令牌桶最大容量
10 |
11 | # SlideWindowLimiter
12 | [[Limiter]]
13 | Type = "SlideWindowLimiter"
14 | Key = "user_login"
15 | UnitTime = "60s" # 窗口时间60秒
16 | SmallUnitTime = "1s" # 小窗口时间1秒
17 | MaxCount = 5 # 窗口期允许最大请求数量
18 |
19 | [[Limiter]]
20 | Type = "FixedWindowLimiter"
21 | Key = "filedown"
22 | UnitTime = "1s" # 窗口时间1秒钟
23 | MaxCount = 10 # 窗口期允许最大请求数量
24 |
25 | [[Limiter]]
26 | Type = "RedisTokenLimiter"
27 | Key = "global_rate_limiter" # redis key
28 | IntervalPerPermit = "200ms" # 令牌产生速度
29 | ResetBucketInterval = "1h" # 令牌桶刷新间隔
30 | MaxCount = 1000 # 令牌桶最大容量
31 | InitTokens = 500 # 初始化令牌数量
32 |
--------------------------------------------------------------------------------
/redistoken_test.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "context"
5 | "log"
6 | "strconv"
7 | "testing"
8 | "time"
9 | )
10 |
11 | func TestRedisToken1(t *testing.T) {
12 | l := NewRedisTokenLimiter(
13 | NewRedisClient(),
14 | "test4",
15 | time.Second,
16 | time.Hour,
17 | 1,
18 | 100,
19 | )
20 | ctx := context.Background()
21 | timer := time.NewTicker(time.Second * 2)
22 | for range timer.C {
23 | log.Println(l.TryAcquire(ctx))
24 | }
25 | log.Println(l.TryAcquire(ctx))
26 | }
27 |
28 | func Test1(t *testing.T) {
29 | //td := time.Second
30 | log.Println(strconv.Itoa((100)))
31 | }
32 |
--------------------------------------------------------------------------------
/redistokenlimiter.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "context"
5 | "io"
6 | "log"
7 | "os"
8 | "strconv"
9 | "time"
10 |
11 | "github.com/go-redis/redis/v8"
12 | )
13 |
14 | type RedisTokenLimiter struct {
15 | rdb *redis.Client
16 | intervalPerPermit time.Duration // 令牌产生速度
17 | resetBucketInterval time.Duration // 令牌桶刷新间隔
18 | MaxCount int
19 | initTokens int
20 | key string
21 | Record *LimitRecord
22 | }
23 |
24 | func (r *RedisTokenLimiter) toParams() []any {
25 | res := make([]any, 0, 5)
26 | res = append(res, int64(r.intervalPerPermit/time.Millisecond)) // 转换成以ms为单位 生成令牌的间隔(ms)
27 | res = append(res, time.Now().UnixMilli()) //当前时间
28 | res = append(res, string(strconv.Itoa(r.initTokens))) // 令牌桶初始化的令牌数
29 | res = append(res, string(strconv.Itoa(r.MaxCount))) // 令牌桶的上限
30 | res = append(res, int64(r.resetBucketInterval/time.Millisecond)) // 重置桶内令牌的时间间隔
31 |
32 | return res
33 | }
34 |
35 | func (r *RedisTokenLimiter) TryAcquire(ctx context.Context) (res LimitResult) {
36 | params := r.toParams()
37 |
38 | luaPath := "tokenbucket.lua"
39 | file, _ := os.Open(luaPath)
40 | luas, _ := io.ReadAll(file)
41 | log.Println(params...)
42 | tokenScript := redis.NewScript(string(luas))
43 | n, err := tokenScript.Eval(ctx, *r.rdb, []string{r.key}, params...).Result()
44 | if err != nil {
45 | panic("failed to exec lua script: " + err.Error())
46 | }
47 | log.Println("remaining tokens: ", n)
48 | if n.(int64) <= 0 {
49 | res.Ok = false
50 | } else {
51 | res.Ok = true
52 | }
53 | r.record(res)
54 | return
55 | }
56 |
57 | func (limiter *RedisTokenLimiter) GetRecord() *LimitRecord {
58 | return limiter.Record
59 | }
60 |
61 | // record 记录尝试请求的最终结果
62 | func (s *RedisTokenLimiter) record(res LimitResult) {
63 | item := &Item{
64 | Timestamp: time.Now(),
65 | Key: s.key,
66 | Allowed: res.Ok,
67 | Reason: "RedisTokenLimiter rejected",
68 | }
69 | s.Record.Save(item, DETAIL_LEVEL_1)
70 | log.Println(item.String())
71 | log.Println("rejectCnt: ", s.Record.rejectCnt)
72 | log.Println("accessCnt: ", s.Record.allowCnt)
73 | }
74 |
75 | func NewRedisTokenLimiter(rdb *redis.Client, key string, intervalPerPermit time.Duration, resetBucketInterval time.Duration,
76 | initToken int, MaxCount int) *RedisTokenLimiter {
77 |
78 | limiter := &RedisTokenLimiter{
79 | rdb: rdb, // todo 替换成可自定义配置redis
80 | key: key,
81 | resetBucketInterval: resetBucketInterval,
82 | initTokens: initToken,
83 | MaxCount: MaxCount,
84 | intervalPerPermit: intervalPerPermit,
85 | Record: NewLimitRecord(),
86 | }
87 | return limiter
88 |
89 | }
90 |
91 | func NewRedisClient() *redis.Client {
92 | // create redis client
93 | rdb := redis.NewClient(&redis.Options{
94 | Addr: "localhost:6379",
95 | Password: "", // no password set
96 | DB: 0, // use default DB
97 | })
98 | return rdb
99 | }
100 |
101 | // todo , 分布式redis+lua脚本实现分布式限流
102 | type rediser interface {
103 | Set(ctx context.Context, string, value any, ttl time.Duration) *redis.StatusCmd
104 | }
105 |
--------------------------------------------------------------------------------
/slidewindow.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "context"
5 | "log"
6 | "sync"
7 | "time"
8 | )
9 |
10 | type SlideWindowLimiter struct {
11 | UnitTime time.Duration // 窗口时间
12 | SmallUnitTime time.Duration // 小窗口时间
13 | Cnts []int // 每个小窗口的请求数量 - 固定大小- 模拟循环队列
14 | Index int // 目前在循环队列的index
15 | Count int // 实际的请求数量
16 | MaxCount int // number 窗口期允许请求的数量
17 | Mu sync.Mutex //
18 | Record *LimitRecord
19 | Key string //
20 | }
21 |
22 | func NewSlideWindowLimiter(key string, unitTime time.Duration, smallUnitTime time.Duration, maxCount int) *SlideWindowLimiter {
23 |
24 | windowCount := calculateWindowCount(unitTime, smallUnitTime)
25 | s := &SlideWindowLimiter{
26 | Key: key,
27 | UnitTime: unitTime,
28 | SmallUnitTime: smallUnitTime,
29 | MaxCount: maxCount,
30 | Cnts: make([]int, windowCount),
31 | Index: 0,
32 | Record: NewLimitRecord(),
33 | }
34 |
35 | go s.slideWindow()
36 | return s
37 | }
38 |
39 | func (s *SlideWindowLimiter) slideWindow() {
40 | defer func() {
41 | log.Printf("Failed to slide window")
42 | if x := recover(); x != nil {
43 | log.Printf("Failed to slide window: %v", x)
44 | go s.slideWindow()
45 | }
46 | }()
47 | ticker := time.NewTicker(s.SmallUnitTime) // 每个小窗口时间,就滑动!
48 | for range ticker.C {
49 | s.Mu.Lock()
50 | // 滑动
51 | s.Count -= s.Cnts[s.Index]
52 | s.Cnts[s.Index] = 0
53 | s.Index++
54 | // log.Println(s.Count)
55 | if s.Index >= len(s.Cnts) {
56 | s.Index = 0
57 | }
58 | s.Mu.Unlock()
59 |
60 | }
61 | }
62 | func (s *SlideWindowLimiter) TryAcquire(ctx context.Context) (res LimitResult) {
63 | s.Mu.Lock()
64 | defer s.Mu.Unlock()
65 | if s.Count < s.MaxCount {
66 | s.Count += 1
67 | s.Cnts[s.Index] += 1
68 | res.Ok = true
69 | s.record(res)
70 | return
71 | }
72 | res.Ok = false
73 | s.record(res)
74 | return
75 | }
76 |
77 | func (limiter *SlideWindowLimiter) GetRecord() *LimitRecord {
78 | return limiter.Record
79 | }
80 |
81 | // record 记录尝试请求的最终结果
82 | func (s *SlideWindowLimiter) record(res LimitResult) {
83 | item := &Item{
84 | Timestamp: time.Now(),
85 | Key: s.Key,
86 | Allowed: res.Ok,
87 | Reason: "SlideWindowLimiter rejected",
88 | }
89 | s.Record.Save(item, DETAIL_LEVEL_1)
90 | log.Println(item.String())
91 | log.Println("rejectCnt: ", s.Record.rejectCnt)
92 | log.Println("accessCnt: ", s.Record.allowCnt)
93 | }
94 |
95 | // calculateWindowCount 计算 unitTime 被 smallUnitTime划分为几份
96 | func calculateWindowCount(unitTime time.Duration, smallUnitTime time.Duration) int {
97 | return int(unitTime / smallUnitTime)
98 | }
99 |
--------------------------------------------------------------------------------
/time_rate.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
--------------------------------------------------------------------------------
/tokenbucket.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "context"
5 | "log"
6 | "sync"
7 | "time"
8 | )
9 |
10 | // TokenBucketLimiter
11 | type TokenBucketLimiter struct {
12 | LimitRate time.Duration // 一个令牌产生的时间
13 | TokenChan chan struct{} // 令牌通道,可以理解为桶
14 | WaitTime time.Duration // 没有令牌请求等待时间
15 | MaxCount int // 令牌桶的容量
16 | <<<<<<< Updated upstream
17 |
18 | Mu *sync.Mutex // 令牌桶锁,保证线程安全
19 | Stop bool // 停止标记,结束令牌桶
20 | Key string
21 | Record *LimitRecord
22 | =======
23 | Mu *sync.Mutex // 令牌桶锁,保证线程安全
24 | Stop bool // 停止标记,结束令牌桶
25 | >>>>>>> Stashed changes
26 | }
27 |
28 | // NewTokenBucketLimiter
29 | func NewTokenBucketLimiter(key string, limitRate time.Duration, maxCount int, waitTime time.Duration) *TokenBucketLimiter {
30 | if maxCount < 1 {
31 | panic("token bucket cap must be large 1")
32 | }
33 | l := &TokenBucketLimiter{
34 | Record: NewLimitRecord(),
35 | Key: key,
36 | LimitRate: limitRate,
37 | TokenChan: make(chan struct{}, maxCount),
38 | WaitTime: waitTime,
39 | MaxCount: maxCount,
40 | Mu: &sync.Mutex{},
41 | Stop: false,
42 | }
43 | go l.Start()
44 | return l
45 | }
46 |
47 | // Start 开启限流器
48 | func (b *TokenBucketLimiter) Start() {
49 | go b.produceToken()
50 | // todo rate动态变化
51 | }
52 |
53 | // produceToken
54 | func (b *TokenBucketLimiter) produceToken() {
55 | // 以固定速率生产令牌
56 | ticker := time.NewTicker(b.LimitRate)
57 | for range ticker.C {
58 | b.Mu.Lock()
59 | if b.Stop {
60 | b.Mu.Unlock()
61 | return
62 | }
63 | // log.Println(time.Now())
64 | if cap(b.TokenChan) == len(b.TokenChan) {
65 | // log.Println("桶满了!")
66 | } else {
67 | b.TokenChan <- struct{}{}
68 | }
69 |
70 | b.Mu.Unlock()
71 | }
72 | }
73 |
74 | func (b *TokenBucketLimiter) TryAcquire(ctx context.Context) (res LimitResult) {
75 | // log.Println(time.Now())
76 | select {
77 | case <-b.TokenChan:
78 | res.Ok = true
79 | b.record(res)
80 | return
81 | default:
82 | // tuichu
83 | res.Ok = false
84 | res.WaitTime = b.WaitTime
85 | b.record(res)
86 | return
87 | }
88 | }
89 |
90 | func (limiter *TokenBucketLimiter) GetRecord() *LimitRecord {
91 | return limiter.Record
92 | }
93 |
94 | func (s *TokenBucketLimiter) record(res LimitResult) {
95 | item := &Item{
96 | Timestamp: time.Now(),
97 | Key: s.Key,
98 | Allowed: res.Ok,
99 | Reason: "TokenBucketLimiter rejected",
100 | }
101 | s.Record.Save(item, DETAIL_LEVEL_1)
102 | log.Println(item.String())
103 | log.Println("rejectCnt: ", s.Record.rejectCnt)
104 | log.Println("accessCnt: ", s.Record.allowCnt)
105 | }
106 |
107 | // todo StopLimiter
108 | func (b *TokenBucketLimiter) StopLimiter() {
109 | close(b.TokenChan)
110 | }
111 |
--------------------------------------------------------------------------------
/tokenbucket.lua:
--------------------------------------------------------------------------------
1 | --[[
2 | 1. key - 令牌桶的 key
3 | 2. intervalPerTokens - 生成令牌的间隔(ms)
4 | 3. curTime - 当前时间
5 | 4. initTokens - 令牌桶初始化的令牌数
6 | 5. MaxCount - 令牌桶的上限
7 | 6. resetBucketInterval - 重置桶内令牌的时间间隔
8 | 7. currentTokens - 当前桶内令牌数
9 | 8. bucket - 当前 key 的令牌桶对象
10 | ]] --
11 |
12 | local key = KEYS[1]
13 | local intervalPerTokens = tonumber(ARGV[1])
14 | local curTime = tonumber(ARGV[2])
15 | local initTokens = tonumber(ARGV[3])
16 | local MaxCount = tonumber(ARGV[4])
17 | local resetBucketInterval = tonumber(ARGV[5])
18 | local bucket = redis.call('hgetall', key)
19 | local currentTokens
20 |
21 | -- 若当前桶未初始化,先初始化令牌桶
22 | if table.maxn(bucket) == 0 then
23 | -- 初始桶内令牌
24 | currentTokens = initTokens
25 | -- 设置桶最近的填充时间是当前
26 | redis.call('hset', key, 'lastRefillTime', curTime)
27 | -- 初始化令牌桶的过期时间, 设置为间隔的 1.5 倍
28 | redis.call('pexpire', key, resetBucketInterval * 1.5)
29 |
30 | -- 若桶已初始化,开始计算桶内令牌
31 | -- 为什么等于 4 ? 因为有两对 field, 加起来长度是 4
32 | -- { "lastRefillTime(上一次更新时间)","curTime(更新时间值)","tokensRemaining(当前保留的令牌)","令牌数" }
33 | elseif table.maxn(bucket) == 4 then
34 |
35 | -- 上次填充时间
36 | local lastRefillTime = tonumber(bucket[2])
37 | -- 剩余的令牌数
38 | local tokensRemaining = tonumber(bucket[4])
39 |
40 | -- 当前时间大于上次填充时间
41 | if curTime > lastRefillTime then
42 |
43 | -- 拿到当前时间与上次填充时间的时间间隔
44 | -- 举例理解: curTime = 2620 , lastRefillTime = 2000, intervalSinceLast = 620
45 | local intervalSinceLast = curTime - lastRefillTime
46 |
47 | -- 如果当前时间间隔 大于 令牌的生成间隔
48 | -- 举例理解: intervalSinceLast = 620, resetBucketInterval = 1000
49 | if intervalSinceLast > resetBucketInterval then
50 |
51 | -- 将当前令牌填充满
52 | currentTokens = initTokens
53 |
54 | -- 更新重新填充时间
55 | redis.call('hset', key, 'lastRefillTime', curTime)
56 |
57 | -- 如果当前时间间隔 小于 令牌的生成间隔
58 | else
59 |
60 | -- 可授予的令牌 = 向下取整数( 上次填充时间与当前时间的时间间隔 / 两个令牌许可之间的时间间隔 )
61 | -- 举例理解 : intervalPerTokens = 200 ms , 令牌间隔时间为 200ms
62 | -- intervalSinceLast = 620 ms , 当前距离上一个填充时间差为 620ms
63 | -- grantedTokens = 620/200 = 3.1 = 3
64 | local grantedTokens = math.floor(intervalSinceLast / intervalPerTokens)
65 |
66 | -- 可授予的令牌 > 0 时
67 | -- 举例理解 : grantedTokens = 620/200 = 3.1 = 3
68 | if grantedTokens > 0 then
69 |
70 | -- 生成的令牌 = 上次填充时间与当前时间的时间间隔 % 两个令牌许可之间的时间间隔
71 | -- 举例理解 : padMillis = 620%200 = 20
72 | -- curTime = 2620
73 | -- curTime - padMillis = 2600
74 | local padMillis = math.fmod(intervalSinceLast, intervalPerTokens)
75 |
76 | -- 将当前令牌桶更新到上一次生成时间
77 | redis.call('hset', key, 'lastRefillTime', curTime - padMillis)
78 | end
79 |
80 | -- 更新当前令牌桶中的令牌数
81 | -- Math.min(根据时间生成的令牌数 + 剩下的令牌数, 桶的限制) => 超出桶最大令牌的就丢弃
82 | currentTokens = math.min(grantedTokens + tokensRemaining, MaxCount)
83 | end
84 | else
85 | -- 如果当前时间小于或等于上次更新的时间, 说明刚刚初始化, 当前令牌数量等于桶内令牌数
86 | -- 不需要重新填充
87 | currentTokens = tokensRemaining
88 | end
89 | end
90 |
91 | -- 如果当前桶内令牌小于 0,抛出异常
92 | assert(currentTokens >= 0)
93 |
94 | -- 如果当前令牌 == 0 ,更新桶内令牌, 返回 0
95 | if currentTokens == 0 then
96 | redis.call('hset', key, 'tokensRemaining', currentTokens)
97 | return 0
98 | else
99 | -- 如果当前令牌 大于 0, 更新当前桶内的令牌 -1 , 再返回当前桶内令牌数
100 | redis.call('hset', key, 'tokensRemaining', currentTokens - 1)
101 | return currentTokens
102 | end
--------------------------------------------------------------------------------
/watchdog.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "log"
5 | "time"
6 | )
7 |
8 | var DefaultWatchDogTimeout = 30 * time.Second
9 |
10 | // 你想有一个后台运行的机制,定期检查或执行一些任务,
11 | // 比如监控限流器的状态、自动调整限流参数、记录日志、清理过期的限流器实例等。
12 | type watchDog struct {
13 | ticker *time.Ticker
14 | stopCh chan struct{}
15 | }
16 |
17 | func newWatchDog(d time.Duration) *watchDog {
18 | return &watchDog{
19 | ticker: time.NewTicker(d),
20 | stopCh: make(chan struct{}),
21 | }
22 | }
23 |
24 | type watchSomthing func()
25 |
26 | func (wd *watchDog) Start(ops ...watchSomthing) {
27 | go wd.watch(ops...)
28 | }
29 |
30 | func (wd *watchDog) watch(ops ...watchSomthing) {
31 | defer func() {
32 | log.Printf("Failed to WatchDog\n")
33 | if x := recover(); x != nil {
34 | log.Printf("Restart WatchDog: %v\n", x)
35 | go wd.Start()
36 | }
37 | }()
38 |
39 | for {
40 | select {
41 | case <-wd.ticker.C:
42 | //todo watchDog 在这里执行你的周期性任务
43 | log.Println("watchDog tick")
44 | for _, op := range ops {
45 | op()
46 | }
47 | case <-wd.stopCh:
48 | return
49 | }
50 | }
51 | }
52 |
53 | // Stop stops the watchDog
54 | func (wd *watchDog) Stop() {
55 | close(wd.stopCh)
56 | wd.ticker.Stop()
57 | }
58 |
--------------------------------------------------------------------------------
/web.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "context"
5 | "io"
6 | "log"
7 | "net/http"
8 | "time"
9 | )
10 |
11 | // web 服务器使用ratelimit中间价
12 | // 测试 性能;
13 |
14 | var limiterSvr *RateLimitService
15 |
16 | type MiddleWire func(http.HandlerFunc) http.HandlerFunc
17 |
18 | // 使用自定义限流器-slideLimiter
19 | func RateLimiting(key string) MiddleWire {
20 | return func(f http.HandlerFunc) http.HandlerFunc {
21 |
22 | return func(w http.ResponseWriter, r *http.Request) {
23 | // 限流
24 | ctx := context.Background()
25 | res := limiterSvr.Limiters[key].TryAcquire(ctx)
26 | if !res.Ok {
27 | log.Println("rejected")
28 | // 有些限流策略允许请求在 WaitTime后重试
29 | if res.WaitTime != 0 {
30 | time.Sleep(res.WaitTime)
31 | if res = limiterSvr.Limiters[key].TryAcquire(ctx); !res.Ok {
32 | log.Println("rejected again")
33 | w.WriteHeader(http.StatusTooManyRequests)
34 | return
35 | }
36 | } else {
37 | w.WriteHeader(http.StatusTooManyRequests)
38 | return
39 | }
40 | }
41 |
42 | log.Println("accepted")
43 | // 调用下一个HandlerFunc
44 | f(w, r)
45 |
46 | }
47 | }
48 | }
49 |
50 | func sayHello(w http.ResponseWriter, r *http.Request) {
51 | // log.Println("ing")
52 | w.Header().Set("Content-Type", "text/html")
53 | io.WriteString(w, "hello, world
")
54 | return
55 | }
56 |
57 | // 一层层的中间件,按顺序包围f
58 | func ChaninFunc(f http.HandlerFunc, middleWires ...MiddleWire) http.HandlerFunc {
59 | for _, mw := range middleWires {
60 | f = mw(f)
61 | }
62 |
63 | return f
64 | }
65 |
66 | func StartWeb() {
67 | // 滑动窗口算法 1s为大窗口 0.1s 为小窗口
68 | key1 := "slide1"
69 | limiterSvr.Limiters[key1] = NewSlideWindowLimiter(key1, time.Second*10, time.Second*1, 100)
70 | // 固定窗口算法 5s 只允许通过一个请求
71 | key2 := "fixed" //利用key值实现 某个接口的 自定义限流器
72 | limiterSvr.Limiters[key2] = NewLimiter(WithFixedWindowLimiter(key2, time.Second*5, 1))
73 | key3 := "token"
74 | // 5s 产生一个令牌,最多1个令牌 请求不到令牌阻塞2s
75 | limiterSvr.Limiters[key3] = NewLimiter(WithTokenBucketLimiter(key3, time.Second*5, 1, 2*time.Second))
76 | //
77 | key4 := "redis"
78 | limiterSvr.Limiters[key4] = NewRedisTokenLimiter(
79 | NewRedisClient(),
80 | key4,
81 | time.Second,
82 | time.Hour,
83 | 1,
84 | 100,
85 | )
86 | http.HandleFunc("/slide1", ChaninFunc(sayHello, RateLimiting(key1)))
87 | http.HandleFunc("/fixed", ChaninFunc(sayHello, RateLimiting(key2)))
88 | http.HandleFunc("/token", ChaninFunc(sayHello, RateLimiting(key3)))
89 | http.HandleFunc("/redis", ChaninFunc(sayHello, RateLimiting(key4)))
90 |
91 | err := http.ListenAndServe(":8080", nil)
92 | if err != nil {
93 | log.Println(" http.ListenAndServe Error: ")
94 | panic(err)
95 | }
96 |
97 | }
98 |
99 | func StartWebConfigurationAndWatchDog() {
100 | key1 := "api_ai"
101 | key2 := "user_login"
102 | key3 := "filedown"
103 | key4 := "global_rate_limiter"
104 | limiterSvr, _ = NewRateLimitService("", NewRedisClient(), WithWatchDog(time.Second*5))
105 | http.HandleFunc("/slide1", ChaninFunc(sayHello, RateLimiting(key2)))
106 | http.HandleFunc("/fixed", ChaninFunc(sayHello, RateLimiting(key3)))
107 | http.HandleFunc("/token", ChaninFunc(sayHello, RateLimiting(key1)))
108 | http.HandleFunc("/redis", ChaninFunc(sayHello, RateLimiting(key4)))
109 |
110 | err := http.ListenAndServe(":8080", nil)
111 | if err != nil {
112 | log.Println(" http.ListenAndServe Error: ")
113 | panic(err)
114 | }
115 |
116 | }
117 | func init() {
118 | limiterSvr = &RateLimitService{
119 | Limiters: make(map[string]Limiter, 0),
120 | }
121 | }
122 |
--------------------------------------------------------------------------------
/web_test.go:
--------------------------------------------------------------------------------
1 | package goratelimitmanager
2 |
3 | import (
4 | "log"
5 | "os"
6 | "testing"
7 | "time"
8 | )
9 |
10 | func TestWeb(t *testing.T) {
11 | // 打开一个文件用于写入日志,如果文件不存在则创建,如果存在则追加内容
12 | logFile, err := os.OpenFile("app.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
13 | if err != nil {
14 | log.Fatalf("error opening file: %v", err)
15 | }
16 | defer logFile.Close()
17 |
18 | // 将日志输出设置为之前打开的文件
19 | log.SetOutput(logFile)
20 | StartWebConfigurationAndWatchDog()
21 | for {
22 | time.Sleep(time.Hour)
23 | }
24 | }
25 |
--------------------------------------------------------------------------------