├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .luacheckrc ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── docker-compose.yaml ├── examples ├── README.md ├── examples.lua └── nginx.conf ├── lib └── resty │ ├── global_throttle.lua │ └── global_throttle │ ├── sliding_window.lua │ ├── store.lua │ └── store │ ├── memcached.lua │ └── shared_dict.lua ├── lua-resty-global-throttle-0.2.0-1.rockspec ├── scripts ├── check ├── spec └── test ├── spec ├── resty │ ├── global_throttle │ │ ├── sliding_window_spec.lua │ │ └── store │ │ │ └── memcached_spec.lua │ └── global_throttle_spec.lua └── run.lua └── t └── sanity.t /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: push 4 | 5 | jobs: 6 | spec: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v2 10 | 11 | - name: Build and start necessary containers 12 | run: make dev-up 13 | 14 | - name: Lint 15 | run: make check 16 | 17 | - name: Spec 18 | run: | 19 | make spec 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | notes 2 | t/servroot/ 3 | 4 | # Compiled Lua sources 5 | luac.out 6 | 7 | # luarocks build files 8 | *.src.rock 9 | *.zip 10 | *.tar.gz 11 | 12 | # Object files 13 | *.o 14 | *.os 15 | *.ko 16 | *.obj 17 | *.elf 18 | 19 | # Precompiled Headers 20 | *.gch 21 | *.pch 22 | 23 | # Libraries 24 | *.lib 25 | *.a 26 | *.la 27 | *.lo 28 | *.def 29 | *.exp 30 | 31 | # Shared objects (inc. Windows DLLs) 32 | *.dll 33 | *.so 34 | *.so.* 35 | *.dylib 36 | 37 | # Executables 38 | *.exe 39 | *.out 40 | *.app 41 | *.i*86 42 | *.x86_64 43 | *.hex 44 | 45 | -------------------------------------------------------------------------------- /.luacheckrc: -------------------------------------------------------------------------------- 1 | std = "ngx_lua" 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openresty/openresty:stretch-fat 2 | 3 | RUN apt-get update && \ 4 | apt-get -yq install cpanminus build-essential libreadline-dev unzip && \ 5 | curl -sSL https://luarocks.org/releases/luarocks-3.1.3.tar.gz -o luarocks-3.1.3.tar.gz && \ 6 | tar zxpf luarocks-3.1.3.tar.gz && \ 7 | cd luarocks-3.1.3 && \ 8 | ./configure --prefix=/usr/local/openresty/luajit \ 9 | --with-lua=/usr/local/openresty/luajit/ \ 10 | --lua-suffix=jit \ 11 | --with-lua-include=/usr/local/openresty/luajit/include/luajit-2.1 && \ 12 | make build && \ 13 | make install && \ 14 | cd ../ && \ 15 | rm -rf luarocks-3.1.3.tar.gz && \ 16 | curl -sSL https://raw.githubusercontent.com/openresty/openresty-devel-utils/master/lj-releng -o lj-releng && \ 17 | chmod +x lj-releng && \ 18 | mv lj-releng /usr/local/openresty/bin/ && \ 19 | cpanm --notest Test::Nginx && \ 20 | luarocks install busted && \ 21 | luarocks install luacheck 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Elvin Efendi 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PREFIX ?= /usr/local 2 | LUA_INCLUDE_DIR ?= $(PREFIX)/include 3 | LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) 4 | INSTALL ?= install 5 | 6 | .PHONY: install misc dev-up dev-down check spec release 7 | 8 | install: 9 | $(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty 10 | $(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/global_throttle 11 | $(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/global_throttle/store 12 | $(INSTALL) lib/resty/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty 13 | $(INSTALL) lib/resty/global_throttle/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/global_throttle 14 | $(INSTALL) lib/resty/global_throttle/store/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/global_throttle/store 15 | 16 | misc: 17 | brew install hey 18 | dev-up: 19 | docker-compose up -d 20 | dev-down: 21 | docker-compose down 22 | reload-proxy: 23 | docker-compose exec proxy openresty -s reload 24 | 25 | check: 26 | docker-compose exec -T -w /global_throttle proxy scripts/check 27 | 28 | # use --filter PATTERN flag to focus on matching tests only 29 | spec: 30 | docker-compose exec -T -w /global_throttle proxy scripts/spec $(ARGS) 31 | 32 | release: 33 | @echo "bump version and tag in rockspec" 34 | @echo "rename rockspec to include the new version" 35 | @echo "grep for VERSION in lib and bump the versions there" 36 | @echo "create a release tag in github interface" 37 | @echo "run 'luarocks pack lua-resty-global-throttle-.rockspec' to generate .rock file" 38 | @echo "Open https://luarocks.org/upload and upload the new .rockspec and .rock there" 39 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # lua-resty-global-throttle 2 | 3 | [![Build Status](https://github.com/ElvinEfendi/lua-resty-global-throttle/workflows/CI/badge.svg?branch=main)](https://github.com/ElvinEfendi/lua-resty-global-throttle/actions?query=workflow%3ACI) 4 | 5 | ### Installation 6 | 7 | ``` 8 | luarocks install lua-resty-global-throttle 9 | ``` 10 | 11 | ### Usage 12 | 13 | A generic, distributed throttling implementation for Openresty. It can be used to throttle any action let it be a request or a function call. 14 | Currently only approximate sliding window rate limiting is implemented. 15 | 16 | First require the module: 17 | 18 | ``` 19 | local global_throttle = require "resty.global_throttle" 20 | ``` 21 | 22 | After that you can create an instance of throttle like following where 100 is the limit that will be enforced per 2 seconds window. 23 | The third parameter tells the throttler what store provider it should use to store its internal statistics. 24 | 25 | ``` 26 | local memc_host = os.getenv("MEMCACHED_HOST") 27 | local memc_port = os.getenv("MEMCACHED_PORT") 28 | 29 | ... 30 | 31 | local my_throttle, err = global_throttle.new(namespace, 10, 2, { 32 | provider = "memcached", 33 | host = memc_host, 34 | port = memc_port, 35 | connect_timeout = 15, 36 | max_idle_timeout = 10000, 37 | pool_size = 100, 38 | }) 39 | ``` 40 | 41 | Finally you call following everytime before whatever it is you're throttling: 42 | 43 | ``` 44 | local estimated_final_count, desired_delay, err = my_throttle:process("identifier of whatever it is your are throttling") 45 | ``` 46 | 47 | When `desired_delay` exists, it means the limit is exceeding and client should be throttled for `desired_delay` seconds. 48 | 49 | For more complete understanding of how to use this library, refer to `examples` directory. 50 | 51 | ### Production considerations 52 | 53 | 1. Ensure you configure the connection pool size properly. Basically if your store (i.e memcached) can handle `n` concurrent connections and your NGINX has `m` workers, 54 | then the connection pool size should be configured as `n/m`. That is because the configured pool size is per NGINX worker. 55 | For example, if your store usually handles 1000 concurrent requests and you have 10 NGINX workers, 56 | then the connection pool size should be 100. Similarly if you have `p` different NGINX instances, then connection pool size should be `n/m/p`. 57 | 2. Be careful when caching decisions based on `desired_delay`, sometimes it is too small that your cache can interpret it as 0 and cache indefinitely. 58 | Also caching for very little time probably does not add any benefit. 59 | 60 | ### Contributions and Development 61 | 62 | The library is designed to be extendable. Currently only approximate sliding window algorithm is implemented in `lib/resty/global_throttle/sliding_window.lua`. It can be used as a reference point to implement other algorithms. 63 | 64 | Storage providers are implemented in `lib/resty/global_throttle/store/`. 65 | 66 | ### TODO 67 | 68 | - [ ] Redis store provider 69 | - [ ] Support Sliding Window algorithm (where bursts are allowed) 70 | - [ ] Implement Leaky Bucket 71 | 72 | ### References 73 | 74 | - Cloudflare's blog post on approximate sliding window: https://blog.cloudflare.com/counting-things-a-lot-of-different-things/ 75 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | services: 3 | proxy: 4 | build: 5 | context: ./ 6 | dockerfile: ./Dockerfile 7 | volumes: 8 | - ./examples/nginx.conf:/etc/openresty/nginx.conf 9 | - ./:/global_throttle 10 | networks: 11 | - lua_resty_global_throttle 12 | expose: 13 | - "8080" 14 | ports: 15 | - "8080:8080" 16 | environment: 17 | - MEMCACHED_HOST=memcached 18 | - MEMCACHED_PORT=11211 19 | memcached: 20 | image: bitnami/memcached:latest 21 | networks: 22 | - lua_resty_global_throttle 23 | expose: 24 | - "11211" 25 | ports: 26 | - "11211:11211" 27 | networks: 28 | lua_resty_global_throttle: {} 29 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Instructions 2 | 3 | In the root directory of the library where `docker-compose.yaml` is run: 4 | 5 | ``` 6 | docker-compose build 7 | ``` 8 | 9 | then: 10 | 11 | ``` 12 | docker-compose up 13 | ``` 14 | 15 | After this you should be able to `curl` `localhost:8080` and get successful response back: 16 | 17 | ``` 18 | > lua-resty-global-throttle (master)$ curl localhost:8080 19 | ok 20 | ``` 21 | 22 | The server has `lua-resty-global-throttle` configured and it will start responding with 23 | HTTP status code 429 when limit is exceeded. Server will use the latest code, which means 24 | you can change code and test quickly using this server. 25 | When you change the code or configuration make sure you run `make reload-proxy` 26 | so the NGINX picks the latest configuration and Lua code. 27 | 28 | You can use `hey` (or any other load generator) to test throttling: 29 | 30 | ``` 31 | > lua-resty-global-throttle (main)$ hey -c 2 -q 100 -z 6s http://localhost:8080/memc?key=client 32 | 33 | Summary: 34 | Total: 6.0077 secs 35 | Slowest: 0.0415 secs 36 | Fastest: 0.0015 secs 37 | Average: 0.0035 secs 38 | Requests/sec: 198.9098 39 | 40 | Total data: 202825 bytes 41 | Size/request: 169 bytes 42 | 43 | Response time histogram: 44 | 0.002 [1] | 45 | 0.006 [1110] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■ 46 | 0.010 [75] |■■■ 47 | 0.014 [6] | 48 | 0.018 [1] | 49 | 0.022 [0] | 50 | 0.026 [0] | 51 | 0.030 [0] | 52 | 0.033 [0] | 53 | 0.037 [0] | 54 | 0.041 [2] | 55 | 56 | 57 | Latency distribution: 58 | 10% in 0.0020 secs 59 | 25% in 0.0024 secs 60 | 50% in 0.0033 secs 61 | 75% in 0.0041 secs 62 | 90% in 0.0051 secs 63 | 95% in 0.0060 secs 64 | 99% in 0.0095 secs 65 | 66 | Details (average, fastest, slowest): 67 | DNS+dialup: 0.0000 secs, 0.0015 secs, 0.0415 secs 68 | DNS-lookup: 0.0000 secs, 0.0000 secs, 0.0030 secs 69 | req write: 0.0000 secs, 0.0000 secs, 0.0005 secs 70 | resp wait: 0.0034 secs, 0.0015 secs, 0.0358 secs 71 | resp read: 0.0000 secs, 0.0000 secs, 0.0011 secs 72 | 73 | Status code distribution: 74 | [200] 36 responses 75 | [429] 1159 responses 76 | ``` 77 | -------------------------------------------------------------------------------- /examples/examples.lua: -------------------------------------------------------------------------------- 1 | local memcached = require("resty.memcached") 2 | local global_throttle = require("resty.global_throttle") 3 | 4 | local _M = {} 5 | 6 | -- it does not make sense to cache decision for too little time 7 | -- the benefit of caching likely is negated if we cache for too little time 8 | local CACHE_THRESHOLD = 0.001 9 | 10 | local lrucache = require("resty.lrucache") 11 | local process_cache, err = lrucache.new(200) 12 | if not process_cache then 13 | error("failed to create cache: " .. (err or "unknown")) 14 | end 15 | 16 | local memc_host = os.getenv("MEMCACHED_HOST") 17 | local memc_port = os.getenv("MEMCACHED_PORT") 18 | 19 | local function rewrite_memc(namespace, cache) 20 | --ngx.log(ngx.NOTICE, "timestamp: ", ngx.now()) 21 | 22 | local key = ngx.req.get_uri_args()['key'] 23 | 24 | local limit_exceeding 25 | if cache then 26 | limit_exceeding = cache:get(key) 27 | if limit_exceeding then 28 | return ngx.exit(429) 29 | end 30 | end 31 | 32 | local my_throttle, err = global_throttle.new(namespace, 10, 2, { 33 | provider = "memcached", 34 | host = memc_host, 35 | port = memc_port, 36 | connect_timeout = 15, 37 | max_idle_timeout = 10000, 38 | pool_size = 100, 39 | }) 40 | if err then 41 | ngx.log(ngx.ERR, err) 42 | return ngx.exit(500) 43 | end 44 | 45 | local _estimated_final_count, desired_delay, err = my_throttle:process(key) 46 | if err then 47 | ngx.log(ngx.ERR, "error while processing key: ", err) 48 | return ngx.exit(500) 49 | end 50 | 51 | if desired_delay then 52 | if cache then 53 | if desired_delay > CACHE_THRESHOLD then 54 | cache:add(key, value, desired_delay) 55 | end 56 | end 57 | 58 | return ngx.exit(429) 59 | end 60 | end 61 | 62 | function _M.rewrite_memc_with_lru() 63 | rewrite_memc("memc-lru", process_cache) 64 | end 65 | 66 | function _M.rewrite_memc_with_dict() 67 | rewrite_memc("memc-dict", ngx.shared.memc_decision_cache) 68 | end 69 | 70 | function _M.rewrite_memc() 71 | rewrite_memc("memc") 72 | end 73 | 74 | function _M.rewrite_dict() 75 | local my_throttle, err = global_throttle.new("dict", 10, 2, { 76 | provider = "shared_dict", 77 | name = "counters" 78 | }) 79 | 80 | local key = ngx.req.get_uri_args()['key'] 81 | 82 | local _estimated_final_count, desired_delay, err = my_throttle:process(key) 83 | if err then 84 | ngx.log(ngx.ERR, "error while processing key: ", err) 85 | return ngx.exit(500) 86 | end 87 | 88 | if desired_delay then 89 | return ngx.exit(429) 90 | end 91 | end 92 | 93 | 94 | -- This can be used to inspect what is in the 95 | -- store. It assumes you disable expiry in the store. 96 | -- You can obtain `ts` and `te` by logging ngx.now(). 97 | function _M.stats() 98 | local test_start = ngx.req.get_uri_args()['ts'] 99 | local test_end = ngx.req.get_uri_args()['te'] 100 | local namespace = ngx.req.get_uri_args()['ns'] 101 | local sample = ngx.req.get_uri_args()['s'] 102 | local window_size = ngx.req.get_uri_args()['ws'] 103 | 104 | local memc, err = memcached:new() 105 | if err then 106 | ngx.log(ngx.ERR, err) 107 | return ngx.exit(500) 108 | end 109 | 110 | local ok 111 | ok, err = memc:connect(memc_host, memc_port) 112 | if not ok then 113 | ngx.log(ngx.ERR, err) 114 | return ngx.exit(500) 115 | end 116 | 117 | local namespace = "memc" 118 | local sample = "client" 119 | local window_size = 2 120 | local window_id_start = math.floor(test_start / 2) 121 | local window_id_end = math.floor(test_end / 2) 122 | 123 | local response = "" 124 | for i=window_id_start,window_id_end,1 do 125 | local counter_key = string.format("%s.%s.%s.counter", namespace, sample, i) 126 | local value, _, err = memc:get(counter_key) 127 | if err then 128 | ngx.log(ngx.ERR, "error when getting key: ", err) 129 | end 130 | response = response .. "\n" .. counter_key .. " : " .. tostring(value) 131 | end 132 | 133 | ok, err = memc:set_keepalive(10000, 100) 134 | if not ok then 135 | ngx.log(ngx.ERR, err) 136 | return ngx.exit(500) 137 | end 138 | 139 | ngx.say(response) 140 | end 141 | 142 | return _M 143 | -------------------------------------------------------------------------------- /examples/nginx.conf: -------------------------------------------------------------------------------- 1 | worker_processes 1; 2 | 3 | events { 4 | worker_connections 1024; 5 | } 6 | 7 | error_log /dev/stdout notice; 8 | 9 | env MEMCACHED_HOST; 10 | env MEMCACHED_PORT; 11 | 12 | http { 13 | include mime.types; 14 | default_type application/octet-stream; 15 | 16 | lua_package_path "/global_throttle/examples/?.lua;/global_throttle/lib/?.lua;;"; 17 | 18 | lua_shared_dict counters 1m; 19 | lua_shared_dict memc_decision_cache 1m; 20 | 21 | keepalive_timeout 65; 22 | 23 | # Docker embedded DNS resolver IP is hardcoded: 24 | # https://github.com/moby/libnetwork/blob/d0951081b35fa4216fc4f0064bf065beeb55a74b/sandbox.go#L136. 25 | # So hardcoding here is not a problem. 26 | resolver 127.0.0.11; 27 | 28 | server { 29 | listen 8080; 30 | server_name localhost; 31 | 32 | location /dict { 33 | rewrite_by_lua_block { 34 | require("examples").rewrite_dict() 35 | } 36 | 37 | content_by_lua_block { 38 | ngx.say("ok") 39 | } 40 | } 41 | 42 | location /memc { 43 | rewrite_by_lua_block { 44 | require("examples").rewrite_memc() 45 | } 46 | 47 | content_by_lua_block { 48 | ngx.say("ok") 49 | } 50 | } 51 | 52 | location /memclru { 53 | rewrite_by_lua_block { 54 | require("examples").rewrite_memc_with_lru() 55 | } 56 | 57 | content_by_lua_block { 58 | ngx.say("ok") 59 | } 60 | } 61 | 62 | location /memcdict { 63 | rewrite_by_lua_block { 64 | require("examples").rewrite_memc_with_dict() 65 | } 66 | 67 | content_by_lua_block { 68 | ngx.say("ok") 69 | } 70 | } 71 | 72 | location /stats { 73 | content_by_lua_block { 74 | require("examples").stats() 75 | } 76 | } 77 | 78 | location /all { 79 | content_by_lua_block { 80 | ngx.say("ok") 81 | } 82 | } 83 | 84 | location /nothing { 85 | rewrite_by_lua_block { 86 | return ngx.exit(429) 87 | } 88 | } 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /lib/resty/global_throttle.lua: -------------------------------------------------------------------------------- 1 | local store_new = require("resty.global_throttle.store").new 2 | local sliding_window_new = require("resty.global_throttle.sliding_window").new 3 | 4 | local setmetatable = setmetatable 5 | local string_format = string.format 6 | 7 | local _M = { _VERSION = "0.2.0" } 8 | local mt = { __index = _M } 9 | 10 | local MAX_NAMESPACE_LEN = 35 11 | 12 | function _M.new(namespace, limit, window_size_in_seconds, store_options) 13 | if not namespace then 14 | return nil, "'namespace' param is missing" 15 | end 16 | 17 | namespace = namespace:lower() 18 | 19 | if namespace ~= namespace:match("[%a%d-]+") then 20 | return nil, "'namespace' can have only letters, digits and hyphens" 21 | end 22 | 23 | if namespace:len() > MAX_NAMESPACE_LEN then 24 | return nil, 25 | string_format("'namespace' can be at most %s characters", 26 | MAX_NAMESPACE_LEN) 27 | end 28 | 29 | if not store_options then 30 | return nil, "'store_options' param is missing" 31 | end 32 | 33 | local store, err = store_new(store_options) 34 | if not store then 35 | return nil, string_format("error initiating the store: %s", err) 36 | end 37 | 38 | local sw 39 | sw, err = sliding_window_new(namespace, store, limit, window_size_in_seconds) 40 | if not sw then 41 | return nil, "error while creating sliding window instance: " .. err 42 | end 43 | 44 | return setmetatable({ 45 | sliding_window = sw, 46 | }, mt), nil 47 | end 48 | 49 | function _M.process(self, key) 50 | return self.sliding_window:process_sample(key) 51 | end 52 | 53 | return _M 54 | -------------------------------------------------------------------------------- /lib/resty/global_throttle/sliding_window.lua: -------------------------------------------------------------------------------- 1 | local tostring = tostring 2 | local string_format = string.format 3 | local math_floor = math.floor 4 | local ngx_now = ngx.now 5 | local ngx_log = ngx.log 6 | local ngx_ERR = ngx.ERR 7 | local setmetatable = setmetatable 8 | 9 | local _M = {} 10 | local mt = { __index = _M } 11 | 12 | -- uniquely identifies the window associated with given time 13 | local function get_id(self, time) 14 | return tostring(math_floor(time / self.window_size)) 15 | end 16 | 17 | -- counter key is made of the identifier of current sliding window instance, 18 | -- and identifier of the current window. This makes sure it is unique 19 | -- per given sliding window instance in the given window. 20 | local function get_counter_key(self, sample, time) 21 | local id = get_id(self, time) 22 | return string_format("%s.%s.%s.counter", self.namespace, sample, id) 23 | end 24 | 25 | local function get_last_rate(self, sample, now_ms) 26 | local a_window_ago_from_now = now_ms - self.window_size 27 | local last_counter_key = get_counter_key(self, sample, a_window_ago_from_now) 28 | 29 | local last_count, err = self.store:get(last_counter_key) 30 | if err then 31 | return nil, err 32 | end 33 | if not last_count then 34 | -- NOTE(elvinefendi): returning 0 as a default value here means 35 | -- we will allow spike in the first window or in the window that 36 | -- has no immediate previous window with samples. 37 | -- What if we default to self.limit here? 38 | last_count = 0 39 | end 40 | if last_count > self.limit then 41 | -- in process_sample we also reactively check for exceeding limit 42 | -- after icnrementing the counter. So even though counter can be higher 43 | -- than the limit as a result of racy behaviour we would still throttle 44 | -- anyway. That is way it is important to correct the last count here 45 | -- to avoid over-punishment. 46 | last_count = self.limit 47 | end 48 | 49 | return last_count / self.window_size 50 | end 51 | 52 | function _M.new(namespace, store, limit, window_size) 53 | if not namespace then 54 | return nil, "'namespace' parameter is missing" 55 | end 56 | 57 | if not store then 58 | return nil, "'store' parameter is missing" 59 | end 60 | if not store.incr then 61 | return nil, "'store' has to implement 'incr' function" 62 | end 63 | if not store.get then 64 | return nil, "'store' has to implement 'get' function" 65 | end 66 | 67 | return setmetatable({ 68 | namespace = namespace, 69 | store = store, 70 | limit = limit, 71 | window_size = window_size 72 | }, mt), nil 73 | end 74 | 75 | local function get_desired_delay(self, remaining_time, last_rate, count) 76 | if last_rate == 0 then 77 | return remaining_time 78 | end 79 | 80 | local desired_delay = remaining_time - (self.limit - count) / last_rate 81 | 82 | if desired_delay == 0 then 83 | -- no delay 84 | return nil 85 | end 86 | 87 | if desired_delay < 0 or desired_delay > self.window_size then 88 | ngx_log(ngx_ERR, "unexpected value for delay: ", desired_delay, 89 | ", when remaining_time = ", remaining_time, 90 | " last_rate = ", last_rate, 91 | " count = ", count, 92 | " limit = ", self.limit, 93 | " window_size = ", self.window_size) 94 | return nil 95 | end 96 | 97 | return desired_delay 98 | end 99 | 100 | -- process_sample first checks if limit exceeding for the given sample. 101 | -- If so then, it calculates for how long this sample 102 | -- should be delayed/rejected and returns estimated total count for 103 | -- the current window for this sample along with suggested delay time to bring 104 | -- the rate down below the limit. 105 | -- If limit is not exceeding yet, it increments the counter corresponding 106 | -- to the sample in the current window. Finally it checks if the limit is 107 | -- exceeding again. This check is necessary because between the first check and 108 | -- increment another sliding window instances might have processed enough 109 | -- occurences of this sample to exceed the limit. Therefore if this check shows 110 | -- that the limit is exceeding then we again calculate necessary delay. 111 | -- 112 | -- Return values: estimated_count, delay, err 113 | -- `estimated_count` - this is what the algorithm expects number of occurences 114 | -- will be for the sample by the end of current window excluding the current 115 | -- occurence of the sample. It is calculated based 116 | -- on the rate from previous window and extrapolated to the current window. 117 | -- If estimated_count is bigger than the configured limit, then the function 118 | -- will also return delay > 0 to suggest that the sample has to be throttled. 119 | -- `delay` - this is either strictly bigger than 0 in case limit is 120 | -- exceeding, or nil in case rate of occurences of the sample is under the 121 | -- limit. The unit is second. 122 | -- `err` - in case there is a problem with processing the sample 123 | -- this will be a string explaining the problem. In all other cases it is nil. 124 | function _M.process_sample(self, sample) 125 | local now = ngx_now() 126 | local counter_key = get_counter_key(self, sample, now) 127 | local remaining_time = self.window_size - now % self.window_size 128 | 129 | local count, err = self.store:get(counter_key) 130 | if err then 131 | return nil, nil, err 132 | end 133 | if not count then 134 | count = 0 135 | end 136 | if count >= self.limit then 137 | -- count can be over the limit because of the racy nature 138 | -- when it is at/over the limit we know for sure what is the final 139 | -- count and desired delay for the current window, so no need to proceed 140 | return count, remaining_time, nil 141 | end 142 | 143 | local last_rate 144 | last_rate, err = get_last_rate(self, sample, now) 145 | if err then 146 | return nil, nil, err 147 | end 148 | 149 | local estimated_final_count = last_rate * remaining_time + count 150 | if estimated_final_count >= self.limit then 151 | local desired_delay = 152 | get_desired_delay(self, remaining_time, last_rate, count) 153 | return estimated_final_count, desired_delay, nil 154 | end 155 | 156 | local expiry = self.window_size * 2 157 | local new_count 158 | new_count, err = self.store:incr(counter_key, 1, expiry) 159 | if err then 160 | return nil, nil, err 161 | end 162 | 163 | -- The below limit checking is only to cope with a racy behaviour where 164 | -- counter for the given sample is incremented at the same time by multiple 165 | -- sliding_window instances. That is we re-adjust the new count by ignoring 166 | -- the current occurence of the sample. Otherwise the limit would 167 | -- unncessarily be exceeding. 168 | local new_adjusted_count = new_count - 1 169 | 170 | if new_adjusted_count >= self.limit then 171 | -- incr above might take long enough to make difference, so 172 | -- we recalculate time-dependant variables. 173 | remaining_time = self.window_size - ngx_now() % self.window_size 174 | 175 | return new_adjusted_count, remaining_time, nil 176 | end 177 | 178 | return estimated_final_count, nil, nil 179 | end 180 | 181 | return _M 182 | -------------------------------------------------------------------------------- /lib/resty/global_throttle/store.lua: -------------------------------------------------------------------------------- 1 | local require = require 2 | local string_format = string.format 3 | 4 | -- Providers are lazily loaded based on given options. 5 | -- Every store provider should implement `:incr(key, delta, expiry)` 6 | -- that returns new value and an error and `:get(key)` that returns value 7 | -- corresponding to given `ket` and an error if there's any. 8 | local providers = {} 9 | 10 | local _M = {} 11 | 12 | function _M.new(options) 13 | if not options then 14 | return nil, "'options' param is missing" 15 | end 16 | 17 | if not options.provider then 18 | return nil, "'provider' attribute is missing" 19 | end 20 | 21 | if not providers[options.provider] then 22 | local provider_implementation_path = 23 | string_format("resty.global_throttle.store.%s", options.provider) 24 | local provider_implementation = require(provider_implementation_path) 25 | 26 | if not provider_implementation then 27 | return nil, 28 | string_format("given 'store' implementation was not found in: '%s'", 29 | provider_implementation_path) 30 | end 31 | 32 | providers[options.provider] = provider_implementation 33 | end 34 | 35 | local provider_implementation_instance, err = 36 | providers[options.provider].new(options) 37 | if not provider_implementation_instance then 38 | return nil, err 39 | end 40 | 41 | return provider_implementation_instance, nil 42 | end 43 | 44 | return _M 45 | -------------------------------------------------------------------------------- /lib/resty/global_throttle/store/memcached.lua: -------------------------------------------------------------------------------- 1 | local memcached = require("resty.memcached") 2 | 3 | local string_format = string.format 4 | local ngx_log = ngx.log 5 | local ngx_ERR = ngx.ERR 6 | local setmetatable = setmetatable 7 | local tonumber = tonumber 8 | 9 | local _M = {} 10 | local mt = { __index = _M } 11 | 12 | function _M.new(options) 13 | if not options.host or not options.port then 14 | return nil, "'host' and 'port' options are required" 15 | end 16 | 17 | return setmetatable({ 18 | options = options, 19 | }, mt), nil 20 | end 21 | 22 | local function with_client(self, action) 23 | local memc, err = memcached:new() 24 | if not memc then 25 | return nil, string_format("failed to instantiate memcached: %s", err) 26 | end 27 | 28 | if self.options.connect_timeout and self.options.connect_timeout > 0 then 29 | local ok 30 | ok, err = memc:set_timeout(self.options.connect_timeout) 31 | if not ok then 32 | return nil, string_format("error setting connect timeout: %s", err) 33 | end 34 | end 35 | 36 | local ok 37 | ok, err = memc:connect(self.options.host, self.options.port) 38 | if not ok then 39 | return nil, string_format("failed to connect: %s", err) 40 | end 41 | 42 | local ret1, ret2 = action(memc) 43 | 44 | if self.options.max_idle_timeout and self.options.pool_size then 45 | ok, err = 46 | memc:set_keepalive(self.options.max_idle_timeout, self.options.pool_size) 47 | else 48 | ok, err = memc:close() 49 | end 50 | if not ok then 51 | ngx_log(ngx_ERR, err) 52 | end 53 | 54 | return ret1, ret2 55 | end 56 | 57 | function _M.incr(self, key, delta, expiry) 58 | return with_client(self, function(memc) 59 | local err_pattern = 60 | string_format("%%s failed for key '%s', expiry '%s': %%s", key, expiry) 61 | local new_value, err = memc:incr(key, delta) 62 | if err then 63 | if err ~= "NOT_FOUND" then 64 | return nil, string_format(err_pattern, "increment", err) 65 | end 66 | 67 | local ok 68 | ok, err = memc:add(key, delta, expiry) 69 | if ok then 70 | new_value = delta 71 | elseif err == "NOT_STORED" then 72 | -- possibly the other worker added the key, so attempting to incr again 73 | new_value, err = memc:incr(key, delta) 74 | if err then 75 | return nil, string_format(err_pattern, "increment", err) 76 | end 77 | else 78 | return nil, string_format(err_pattern, "add", err) 79 | end 80 | end 81 | 82 | return tonumber(new_value), nil 83 | end) 84 | end 85 | 86 | function _M.get(self, key) 87 | return with_client(self, function(memc) 88 | local value, flags, err = memc:get(key) 89 | if err then 90 | return nil, string_format("'get' failed for '%s': %s", key, err) 91 | end 92 | if value == nil and flags == nil and err == nil then 93 | return nil, nil 94 | end 95 | return tonumber(value), nil 96 | end) 97 | end 98 | 99 | function _M.__flush_all(self) 100 | return with_client(self, function(memc) 101 | return memc:flush_all() 102 | end) 103 | end 104 | 105 | return _M 106 | -------------------------------------------------------------------------------- /lib/resty/global_throttle/store/shared_dict.lua: -------------------------------------------------------------------------------- 1 | local ngx = ngx 2 | local ngx_log = ngx.log 3 | local ngx_WARN = ngx.WARN 4 | local string_format = string.format 5 | local setmetatable = setmetatable 6 | 7 | local _M = {} 8 | local mt = { __index = _M } 9 | 10 | function _M.new(options) 11 | if not options.name then 12 | return nil, "shared dictionary name is mandatory" 13 | end 14 | 15 | local dict = ngx.shared[options.name] 16 | if not dict then 17 | return nil, 18 | string_format("shared dictionary with name \"%s\" is not configured", 19 | options.name) 20 | end 21 | 22 | return setmetatable({ 23 | dict = dict, 24 | }, mt), nil 25 | end 26 | 27 | function _M.incr(self, key, delta, expiry) 28 | local new_value, err, forcible = self.dict:incr(key, delta, 0, expiry) 29 | if err then 30 | return nil, err 31 | end 32 | 33 | if forcible then 34 | ngx_log(ngx_WARN, 35 | "shared dictionary is full, removed valid key(s) to store the new one") 36 | end 37 | 38 | return new_value, nil 39 | end 40 | 41 | function _M.get(self, key) 42 | local value = self.dict:get(key) 43 | if not value == nil then 44 | return nil, "not found" 45 | end 46 | 47 | return value, nil 48 | end 49 | 50 | return _M 51 | -------------------------------------------------------------------------------- /lua-resty-global-throttle-0.2.0-1.rockspec: -------------------------------------------------------------------------------- 1 | package = "lua-resty-global-throttle" 2 | version = "0.2.0-1" 3 | source = { 4 | url = "git://github.com/ElvinEfendi/lua-resty-global-throttle", 5 | tag = "v0.2.0" 6 | } 7 | description = { 8 | summary = "Distributed flow control middleware for Openresty.", 9 | detailed = [[ 10 | A generic, distributed throttle implementation for Openresty with memcached storage support among others. 11 | It can be used to throttle any action let it be a request or a function call. 12 | ]], 13 | homepage = "https://github.com/ElvinEfendi/lua-resty-global-throttle", 14 | license = "MIT" 15 | } 16 | build = { 17 | type = "builtin", 18 | modules = { 19 | ["resty.global_throttle.store.memcached"] = "lib/resty/global_throttle/store/memcached.lua", 20 | ["resty.global_throttle.store.shared_dict"] = "lib/resty/global_throttle/store/shared_dict.lua", 21 | ["resty.global_throttle.store"] = "lib/resty/global_throttle/store.lua", 22 | ["resty.global_throttle.sliding_window"] = "lib/resty/global_throttle/sliding_window.lua", 23 | ["resty.global_throttle"] = "lib/resty/global_throttle.lua" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /scripts/check: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | luacheck -q lib 8 | lj-releng lib/resty/*.lua lib/resty/**/*.lua lib/resty/**/**/*.lua 9 | -------------------------------------------------------------------------------- /scripts/spec: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | resty \ 8 | -I /global_throttle/lib \ 9 | --shdict "my_global_throttle 1M" \ 10 | spec/run.lua -o gtest --shuffle -v spec/**/ "$@" 11 | -------------------------------------------------------------------------------- /scripts/test: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | docker run -w /lua --rm -it -v ${PWD}:/lua test-cli prove -r t/ 8 | -------------------------------------------------------------------------------- /spec/resty/global_throttle/sliding_window_spec.lua: -------------------------------------------------------------------------------- 1 | local memcached_store = require("resty.global_throttle.store.memcached") 2 | local sliding_window_new = require("resty.global_throttle.sliding_window").new 3 | 4 | local function new_sliding_window(namespace, limit, window_size) 5 | local store, err = memcached_store.new({ host = memcached.host, port = memcached.port }) 6 | assert.is_nil(err) 7 | assert.is_not_nil(store) 8 | 9 | local sw 10 | sw, err = sliding_window_new(namespace, store, limit, window_size) 11 | assert.is_nil(err) 12 | assert.is_not_nil(sw) 13 | 14 | return sw 15 | end 16 | 17 | local function get_counter_key(namespace, sample, ngx_now, window_size) 18 | return string.format("%s.%s.%s.counter", namespace, sample, tostring(math.floor(ngx_now / window_size))) 19 | end 20 | 21 | local function exhaust_limit_and_assert_without_previous_window(sw, sample, limit) 22 | for i=1,limit,1 do 23 | local estimated_count, delay, err = sw:process_sample(sample) 24 | assert.is_nil(err) 25 | assert.is_nil(delay) 26 | assert.are.same(i-1, estimated_count) 27 | end 28 | end 29 | 30 | local function assert_floats_are_equal(expected, actual) 31 | local epsilon = 1e-6 32 | assert.are.same("number", type(expected)) 33 | assert.are.same("number", type(actual)) 34 | local is_equal = math.abs(expected - actual) < epsilon 35 | assert.is_true(is_equal, string.format("expected %s, got %s", expected, actual)) 36 | end 37 | 38 | describe("sliding_window", function() 39 | describe("new", function() 40 | it("requires 'store' argument", function() 41 | end) 42 | 43 | it("requires 'store' to response to 'incr' and 'get'", function() 44 | end) 45 | 46 | it("requires 'limit' argument", function() 47 | end) 48 | 49 | it("requires 'window_size' argument", function() 50 | end) 51 | 52 | it("is successful when all arguments are provided correctly", function() 53 | end) 54 | end) 55 | 56 | describe("process_sample", function() 57 | local sw 58 | local namespace = "login-endpoint" 59 | local limit = 5 60 | local window_size = 1 61 | local frozen_ngx_now = 1608261277.678 62 | local window_start = 1608261277 -- math.floor(frozen_ngx_now / window_size) 63 | local elapsed_time = 0.678 -- (frozen_ngx_now - window_start) 64 | local remaining_time = 0.322 -- (window_size - elapsed_time) 65 | local sample = "client1" 66 | local counter_key = get_counter_key(namespace, sample, frozen_ngx_now, window_size) 67 | 68 | before_each(function() 69 | local ok, er = memcached.flush_all() 70 | assert.is_nil(err) 71 | assert.are.same(1, ok) 72 | 73 | sw = new_sliding_window(namespace, limit, window_size) 74 | end) 75 | 76 | describe("when there's no previous window", function() 77 | it("returns precise count and no delay when limit is not exceeding", function() 78 | ngx_freeze_time(frozen_ngx_now, function() 79 | local estimated_count, delay, err = sw:process_sample(sample) 80 | assert.is_nil(err) 81 | assert.is_nil(delay) 82 | assert.are.same(0, estimated_count) 83 | 84 | estimated_count, delay, err = sw:process_sample(sample) 85 | assert.is_nil(err) 86 | assert.is_nil(delay) 87 | assert.are.same(1, estimated_count) 88 | end) 89 | end) 90 | 91 | it("returns precise count and delay when limit is exceeding", function() 92 | ngx_freeze_time(frozen_ngx_now, function() 93 | exhaust_limit_and_assert_without_previous_window(sw, sample, limit) 94 | 95 | local estimated_count, delay, err = sw:process_sample(sample) 96 | assert.is_nil(err) 97 | assert_floats_are_equal(remaining_time, delay) 98 | assert.are.same(limit, estimated_count) 99 | end) 100 | end) 101 | 102 | it("differentiates samples from one another", function() 103 | ngx_freeze_time(frozen_ngx_now, function() 104 | exhaust_limit_and_assert_without_previous_window(sw, sample, limit) 105 | exhaust_limit_and_assert_without_previous_window(sw, "another_sample", limit) 106 | end) 107 | end) 108 | 109 | it("differentiates namespaces from one another", function() 110 | local another_sw = new_sliding_window("signup-page", limit, window_size) 111 | 112 | ngx_freeze_time(frozen_ngx_now, function() 113 | exhaust_limit_and_assert_without_previous_window(sw, sample, limit) 114 | exhaust_limit_and_assert_without_previous_window(another_sw, sample, limit) 115 | end) 116 | end) 117 | 118 | it("detects exceeding limit in case other sliding window instances increments counter right before the current instance increments", function() 119 | ngx_freeze_time(frozen_ngx_now, function() 120 | for i=1,(limit-2),1 do 121 | local estimated_count, delay, err = sw:process_sample(sample) 122 | assert.is_nil(err) 123 | assert.is_nil(delay) 124 | assert.are.same(i-1, estimated_count) 125 | end 126 | 127 | -- this way we simulate other sliding window 128 | -- instances increments the same key after this 129 | -- sliding window instance "get"s it and before it increments it. 130 | local original_memc_get = memcached_store.get 131 | local mocked_memc_get = function(self, key) 132 | local value, err = original_memc_get(self, key) 133 | memcached.with_client(function(memc) 134 | memc:incr(key, 2) 135 | end) 136 | return value, err 137 | end 138 | memcached_store.get = mocked_memc_get 139 | 140 | local estimated_count, delay, err = sw:process_sample(sample) 141 | assert.is_nil(err) 142 | assert_floats_are_equal(remaining_time, delay) 143 | assert.are.same(limit, estimated_count) 144 | 145 | -- unmock 146 | memcached_store.get = original_memc_get 147 | end) 148 | end) 149 | end) 150 | 151 | describe("when a window is over and a new one starts", function() 152 | it("returns estimated count and no delay when limit is not exceeding", function() 153 | ngx_freeze_time(frozen_ngx_now, function() 154 | exhaust_limit_and_assert_without_previous_window(sw, sample, limit) 155 | 156 | -- we travel to the next window 157 | local new_elapsed_time = 0.2 158 | ngx_time_travel(remaining_time + new_elapsed_time, function() 159 | local estimated_count, delay, err = sw:process_sample(sample) 160 | assert.is_nil(err) 161 | -- in the previous window the rate was 5/1 = 5 occurences per second 162 | -- and in the current window we have had only one occurences of the sample 163 | -- so our estimated count for current window would be following 164 | local expected_estimated_count = 4 -- 0.8 * 5 + 0 165 | -- where 0.8 is (window_size - new_elapsed_time), i.e new remaining_time 166 | assert_floats_are_equal(expected_estimated_count, estimated_count) 167 | 168 | -- since limit is not exceeding, delay should be nil 169 | assert.is_nil(delay) 170 | end) 171 | end) 172 | end) 173 | 174 | it("returns estimated count and correct delay when limit is exceeding", function() 175 | ngx_freeze_time(frozen_ngx_now, function() 176 | exhaust_limit_and_assert_without_previous_window(sw, sample, limit) 177 | 178 | -- we travel to the next window 179 | local new_elapsed_time = 0.2 180 | ngx_time_travel(remaining_time + new_elapsed_time, function() 181 | local estimated_count, delay, err = sw:process_sample(sample) 182 | assert.is_nil(err) 183 | assert.is_nil(delay) 184 | 185 | estimated_count, delay, err = sw:process_sample(sample) 186 | assert.is_nil(err) 187 | 188 | estimated_count, delay, err = sw:process_sample(sample) 189 | assert.is_nil(err) 190 | 191 | -- in the previous window the rate was 5/1 = 5 occurences per second 192 | -- and in the current window we have had two occurences of the sample 193 | -- so our estimated count for current window would be following 194 | local expected_estimated_count = 6 -- 0.8 * 5 + 2 195 | -- where 0.8 is (window_size - new_elapsed_time), i.e new remaining_time 196 | assert_floats_are_equal(expected_estimated_count, estimated_count) 197 | 198 | -- since limit is exceeding, we will also have the following delay 199 | local expected_delay = 0.8 - (5 - 2) / 5 200 | -- the formula above is obtained by solving (0.8 - elapsed_time) * 5 + 2 = 5 201 | assert_floats_are_equal(expected_delay, delay) 202 | end) 203 | end) 204 | end) 205 | 206 | it("looks back at only immediate previous window", function() 207 | ngx_freeze_time(frozen_ngx_now, function() 208 | exhaust_limit_and_assert_without_previous_window(sw, sample, limit) 209 | 210 | -- travel to next to next window 211 | ngx_time_travel(remaining_time + window_size, function() 212 | exhaust_limit_and_assert_without_previous_window(sw, sample, limit) 213 | end) 214 | end) 215 | end) 216 | 217 | it("does not over-punish when counter value in the previous window is over the limit as a result of racy behaviour", function() 218 | local ok, err = memcached.with_client(function(memc) 219 | return memc:add(counter_key, limit + 3, window_size * 2) 220 | end) 221 | assert.is_nil(err) 222 | assert.are.same(1, ok) 223 | 224 | -- the above counter key was for previous window, and now we move to next window 225 | ngx_freeze_time(frozen_ngx_now + remaining_time + 0.1, function() 226 | local estimated_count, delay, err = sw:process_sample(sample) 227 | estimated_count, delay, err = sw:process_sample(sample) 228 | assert.is_nil(err) 229 | -- the main point in the below expectation is that previous rate is 230 | -- calculated as 5(correct counter value)/1 and not as 8(actual counter value: limit + 3)/1. 231 | local expected_estimated_count = 5 * 0.9 + 1 232 | assert_floats_are_equal(expected_estimated_count, estimated_count) 233 | local expected_delay = 0.9 - (5 - 1) / 5 234 | assert_floats_are_equal(expected_delay, delay) 235 | end) 236 | end) 237 | 238 | it("returns correct values when current counter value is already at the limit as a result of racy behaviour from before", function() 239 | ngx_freeze_time(frozen_ngx_now, function() 240 | exhaust_limit_and_assert_without_previous_window(sw, sample, limit) 241 | 242 | -- travel to next to next window 243 | ngx_time_travel(remaining_time + 0.2, function() 244 | local new_counter_key = get_counter_key(namespace, sample, ngx.now(), window_size) 245 | local ok, err = memcached.with_client(function(memc) 246 | return memc:add(new_counter_key, limit, window_size * 2) 247 | end) 248 | assert.is_nil(err) 249 | assert.are.same(1, ok) 250 | 251 | local estimated_count, delay, err = sw:process_sample(sample) 252 | assert.is_nil(err) 253 | assert_floats_are_equal(window_size - 0.2, delay) 254 | assert.are.same(limit, estimated_count) 255 | end) 256 | end) 257 | end) 258 | 259 | it("returns correct values when current counter value is already over the limit as a result of racy behaviour from before", function() 260 | ngx_freeze_time(frozen_ngx_now, function() 261 | exhaust_limit_and_assert_without_previous_window(sw, sample, limit) 262 | 263 | -- travel to next to next window 264 | ngx_time_travel(remaining_time + 0.1, function() 265 | local new_counter_key = get_counter_key(namespace, sample, ngx.now(), window_size) 266 | local ok, err = memcached.with_client(function(memc) 267 | return memc:add(new_counter_key, limit + 3, window_size * 2) 268 | end) 269 | assert.is_nil(err) 270 | assert.are.same(1, ok) 271 | 272 | local estimated_count, delay, err = sw:process_sample(sample) 273 | assert.is_nil(err) 274 | assert_floats_are_equal(window_size - 0.1, delay) 275 | assert.are.same(limit + 3, estimated_count) 276 | end) 277 | end) 278 | end) 279 | end) 280 | 281 | describe("when interaction with store fails or deminishes", function() 282 | it("returns nil and relevant error when store:get fails") 283 | it("returns nil and relevant error when store:incr fails") 284 | it("calculates correct estimated final count and desired delay when store:incr takes long") 285 | end) 286 | end) 287 | end) 288 | -------------------------------------------------------------------------------- /spec/resty/global_throttle/store/memcached_spec.lua: -------------------------------------------------------------------------------- 1 | local memcached_store = require("resty.global_throttle.store.memcached") 2 | 3 | local function incr_and_assert(store, key, delta, expected_value, expiry) 4 | local new_value, err = store:incr(key, delta, expiry) 5 | 6 | assert.is_nil(err) 7 | assert.are.same(expected_value, new_value) 8 | 9 | local actual_value, flags, err = memcached.get(key) 10 | assert.are.same('0', flags) 11 | assert.are.same(expected_value, tonumber(actual_value)) 12 | end 13 | 14 | describe("memcached", function() 15 | describe("new", function() 16 | it("requires host and port options", function() 17 | local store, err = memcached_store.new({}) 18 | assert.is_nil(store) 19 | assert.are.equals("'host' and 'port' options are required", err) 20 | 21 | stire, err = memcached_store.new({ host = "127.0.0.1" }) 22 | assert.is_nil(store) 23 | assert.are.equals("'host' and 'port' options are required", err) 24 | 25 | store, err = memcached_store.new({ port = "11211" }) 26 | assert.is_nil(stire) 27 | assert.are.equals("'host' and 'port' options are required", err) 28 | 29 | store, err = memcached_store.new({ host = "127.0.0.1", port = "11211" }) 30 | assert.is_not_nil(store) 31 | assert.is_nil(err) 32 | end) 33 | end) 34 | 35 | describe("incr and get", function() 36 | local store 37 | before_each(function() 38 | memcached.flush_all() 39 | 40 | local err 41 | store, err = memcached_store.new({ host = memcached.host, port = memcached.port }) 42 | assert.is_nil(err) 43 | end) 44 | 45 | it("adds new key", function() 46 | incr_and_assert(store, "client1", 1, 1, 2) 47 | end) 48 | 49 | it("increments existing key", function() 50 | incr_and_assert(store, "client2", 1, 1, 2) 51 | incr_and_assert(store, "client2", 2, 3, 2) 52 | end) 53 | 54 | it("sets correct expiry", function() 55 | incr_and_assert(store, "client3", 1, 1, 1) 56 | ngx.sleep(1) 57 | local value, flags, err = memcached.get("client3") 58 | assert.is_nil(value) 59 | assert.is_nil(flags) 60 | assert.is_nil(err) 61 | end) 62 | 63 | it("returns value for existing key", function() 64 | local key = "client4" 65 | local expected_value = 2 66 | 67 | incr_and_assert(store, key, 2, expected_value, 4) 68 | 69 | local value, err = store:get(key) 70 | assert.is_nil(err) 71 | assert.are.same(expected_value, value) 72 | end) 73 | 74 | it("returns value for existing key", function() 75 | local key = "client4" 76 | local value, err = store:get(key) 77 | assert.is_nil(err) 78 | assert.is_nil(value) 79 | end) 80 | 81 | it("fails to add when expiry is decimal", function() 82 | local new_value, err = store:incr("client", 1, 1.5) 83 | assert.is_nil(new_value) 84 | assert.are.same("add failed for key 'client', expiry '1.5': CLIENT_ERROR bad command line format", err) 85 | end) 86 | 87 | it("ignores expiry when incrementing existing counter", function() 88 | incr_and_assert(store, "client5", 1, 1, 1) 89 | incr_and_assert(store, "client5", 1, 2, 1.5) 90 | end) 91 | end) 92 | end) 93 | -------------------------------------------------------------------------------- /spec/resty/global_throttle_spec.lua: -------------------------------------------------------------------------------- 1 | describe("global_throttle", function() 2 | local global_throttle 3 | 4 | setup(function() 5 | global_throttle = require("resty.global_throttle") 6 | end) 7 | 8 | describe("new", function() 9 | it("requires namespace param", function() 10 | local my_throttle, err = global_throttle.new(nil, 10, 5) 11 | assert.are.same("'namespace' param is missing", err) 12 | assert.is_nil(my_throttle) 13 | end) 14 | 15 | it("requires namespace to have only letters and hyphen", function() 16 | local my_throttle, err = global_throttle.new("my throttle", 10, 5) 17 | assert.are.same("'namespace' can have only letters, digits and hyphens", err) 18 | assert.is_nil(my_throttle) 19 | 20 | my_throttle, err = global_throttle.new("my?throttle", 10, 5) 21 | assert.are.same("'namespace' can have only letters, digits and hyphens", err) 22 | assert.is_nil(my_throttle) 23 | 24 | my_throttle, err = global_throttle.new("my.throttle", 10, 5) 25 | assert.are.same("'namespace' can have only letters, digits and hyphens", err) 26 | assert.is_nil(my_throttle) 27 | 28 | my_throttle, err = global_throttle.new("my-thro-ttle122", 10, 5, { provider = "shared_dict", name = "my_global_throttle" }) 29 | assert.is_nil(err) 30 | assert.is_not_nil(my_throttle) 31 | end) 32 | 33 | it("requires namespace to have maximum length of 35 characters", function() 34 | local ns = "my-throttle-is-somethig-b-b-b-b-b-b-bb" 35 | local my_throttle, err = global_throttle.new(ns, 10, 5, { provider = "shared_dict", name = "my_global_throttle" }) 36 | assert.are.same("'namespace' can be at most 35 characters", err) 37 | assert.is_nil(my_throttle) 38 | end) 39 | 40 | it("requires store parameter", function() 41 | local my_throttle, err = global_throttle.new("my-throttle", 100, 5) 42 | 43 | assert.is_nil(my_throttle) 44 | assert.are.equals("'store_options' param is missing", err) 45 | end) 46 | 47 | it("requires store param to have provider attribute defined", function() 48 | local my_throttle, err = global_throttle.new("my-throttle", 100, 5, {}) 49 | 50 | assert.is_nil(my_throttle) 51 | assert.are.equals("error initiating the store: 'provider' attribute is missing", err) 52 | end) 53 | 54 | it("returns global throttle instance for Lua shared dict backend store", function() 55 | local my_throttle, err = global_throttle.new("my-throttle", 100, 5, { provider = "shared_dict", name = "my_global_throttle" } ) 56 | 57 | assert.is_nil(err) 58 | assert.is_not_nil(my_throttle) 59 | end) 60 | 61 | it("returns global throttle instance for memcached backend store", function() 62 | local my_throttle, err = global_throttle.new("my-throttle", 100, 5, 63 | { provider = "memcached", host = os.getenv("MEMCACHED_HOST"), port = "11211" } ) 64 | 65 | assert.is_nil(err) 66 | assert.is_not_nil(my_throttle) 67 | end) 68 | end) 69 | end) 70 | -------------------------------------------------------------------------------- /spec/run.lua: -------------------------------------------------------------------------------- 1 | local busted_runner 2 | do 3 | -- avoid warning during test runs caused by 4 | -- https://github.com/openresty/lua-nginx-module/blob/2524330e59f0a385a9c77d4d1b957476dce7cb33/src/ngx_http_lua_util.c#L810 5 | 6 | local traceback = require "debug".traceback 7 | 8 | setmetatable(_G, { __newindex = function(table, key, value) rawset(table, key, value) end }) 9 | busted_runner = require "busted.runner" 10 | 11 | -- if there's more constants need to be whitelisted for test runs, add here. 12 | local GLOBALS_ALLOWED_IN_TEST = { 13 | _TEST = true, 14 | ngx_time_travel = true, 15 | ngx_freeze_time = true, 16 | memcached = true, 17 | } 18 | local newindex = function(table, key, value) 19 | rawset(table, key, value) 20 | 21 | local phase = ngx.get_phase() 22 | if phase == "init_worker" or phase == "init" then 23 | return 24 | end 25 | 26 | -- we check only timer phase because resty-cli runs everything in timer phase 27 | if phase == "timer" and GLOBALS_ALLOWED_IN_TEST[key] then 28 | return 29 | end 30 | 31 | local message = "writing a global lua variable " .. key .. 32 | " which may lead to race conditions between concurrent requests, so prefer the use of 'local' variables " .. traceback('', 2) 33 | -- it's important to do print here because ngx.log is mocked below 34 | print(message) 35 | end 36 | setmetatable(_G, { __newindex = newindex }) 37 | end 38 | 39 | do 40 | -- following mocking let's us travel in time 41 | -- and freeze time 42 | 43 | local time_travel = 0 44 | local frozen_time 45 | 46 | local ngx_now = ngx.now 47 | _G.ngx.now = function() 48 | if frozen_time then 49 | return frozen_time + time_travel 50 | end 51 | return ngx_now() + time_travel 52 | end 53 | 54 | -- this function can be used in tests to travel in time 55 | _G.ngx_time_travel = function(offset, f) 56 | time_travel = offset 57 | f() 58 | time_travel = 0 59 | end 60 | 61 | _G.ngx_freeze_time = function(time, f) 62 | frozen_time = time 63 | f() 64 | frozen_time = nil 65 | end 66 | 67 | local memcached_host = os.getenv("MEMCACHED_HOST") 68 | local memcached_port = os.getenv("MEMCACHED_PORT") 69 | local with_memcached_client = function(command) 70 | local rm = require("resty.memcached") 71 | local memc, err = rm:new() 72 | local ok, err = memc:connect(memcached_host, memcached_port) 73 | if err then 74 | assert(err, "failed to connect to memcached: " .. err) 75 | end 76 | 77 | local ret1, ret2, ret3 = command(memc) 78 | 79 | memc:close() 80 | 81 | return ret1, ret2, ret3 82 | end 83 | 84 | _G.memcached = { 85 | host = memcached_host, 86 | port = memcached_port, 87 | with_client = with_memcached_client, 88 | flush_all = function() 89 | return with_memcached_client(function(memc) 90 | return memc:flush_all() 91 | end) 92 | end, 93 | get = function(key) 94 | return with_memcached_client(function(memc) 95 | return memc:get(key) 96 | end) 97 | end, 98 | } 99 | end 100 | 101 | busted_runner({ standalone = false }) 102 | -------------------------------------------------------------------------------- /t/sanity.t: -------------------------------------------------------------------------------- 1 | use Test::Nginx::Socket::Lua 'no_plan'; 2 | use Cwd qw(cwd); 3 | 4 | my $pwd = cwd(); 5 | 6 | our $HttpConfig = qq( 7 | lua_package_path "$pwd/t/lib/?.lua;$pwd/lib/?.lua;;"; 8 | lua_shared_dict counters 1M; 9 | ); 10 | 11 | run_tests(); 12 | 13 | __DATA__ 14 | 15 | === TEST 1: all cases 16 | --- http_config eval: $::HttpConfig 17 | --- config 18 | location /protected { 19 | content_by_lua_block { 20 | local global_throttle = require "resty.global_throttle" 21 | local client_throttle = global_throttle.new(10, 0.2, { provider = "shared_dict", name = "counters" }) 22 | 23 | local args, err = ngx.req.get_uri_args() 24 | if err then 25 | ngx.status = 500 26 | ngx.say(err) 27 | return ngx.exit(ngx.HTTP_OK) 28 | end 29 | 30 | local key = args.api_client_id 31 | local should_throttle, err = client_throttle:process(key) 32 | if should_throttle then 33 | ngx.status = 429 34 | ngx.say("throttled") 35 | return ngx.exit(ngx.HTTP_OK) 36 | end 37 | 38 | ngx.exit(ngx.HTTP_OK) 39 | } 40 | } 41 | 42 | location = /t { 43 | content_by_lua_block { 44 | local res 45 | 46 | ngx.log(ngx.NOTICE, "Expect spike to be allowed in the beginning.") 47 | for i=1,10 do 48 | res = ngx.location.capture("/protected?api_client_id=2") 49 | if res.status ~= 200 then 50 | ngx.status = res.status 51 | return ngx.exit(ngx.HTTP_OK) 52 | end 53 | end 54 | 55 | ngx.log(ngx.NOTICE, "Expect no throttling since requests will be sent under the configured rate.") 56 | ngx.sleep(0.19) -- we have to wait here because the first 10 requests were sent too fast 57 | for i=1,12 do 58 | -- ensure we are sending requests under the configured rate 59 | local jitter = math.random(10) / 10000 60 | local delay = 0.2 / 12 + jitter 61 | ngx.sleep(delay) 62 | 63 | res = ngx.location.capture("/protected?api_client_id=2") 64 | if res.status ~= 200 then 65 | ngx.status = res.status 66 | return ngx.exit(ngx.HTTP_OK) 67 | end 68 | end 69 | 70 | ngx.log(ngx.NOTICE, "Expect spike to be throttled because the algorithm remembers previous rate and smothen the load.") 71 | ngx.sleep(0.15) 72 | local throttled = false 73 | for i=1,10 do 74 | res = ngx.location.capture("/protected?api_client_id=2") 75 | if res.status == 429 then 76 | throttled = true 77 | goto continue1 78 | end 79 | end 80 | ::continue1:: 81 | if not throttled then 82 | ngx.status = 500 83 | return ngx.exit(ngx.HTTP_OK) 84 | end 85 | 86 | ngx.log(ngx.NOTICE, "Expect requests to be throttled because they will be sent faster.") 87 | ngx.sleep(0.15) 88 | throttled = false 89 | for i=1,15 do 90 | res = ngx.location.capture("/protected?api_client_id=2") 91 | if res.status == 429 then 92 | throttled = true 93 | goto continue2 94 | end 95 | -- ensure we are sending requests over the configured rate 96 | local delay = 0.15 / 15 97 | 98 | ngx.sleep(delay) 99 | end 100 | ::continue2:: 101 | if not throttled then 102 | ngx.status = 500 103 | return ngx.exit(ngx.HTTP_OK) 104 | end 105 | 106 | ngx.log(ngx.NOTICE, "Expect spike when using different key because this will be the first spike.") 107 | for i=1,10 do 108 | res = ngx.location.capture("/protected?api_client_id=1") 109 | if res.status ~= 200 then 110 | ngx.status = res.status 111 | return ngx.exit(ngx.HTTP_OK) 112 | end 113 | end 114 | 115 | ngx.status = res.status 116 | ngx.print(res.body) 117 | ngx.exit(ngx.HTTP_OK) 118 | } 119 | } 120 | --- request 121 | GET /t 122 | --- response_body 123 | --- error_code: 200 124 | --------------------------------------------------------------------------------