├── .gitattributes ├── .gitignore ├── dist.ini ├── Makefile ├── valgrind.suppress ├── lib └── resty │ └── limit │ ├── traffic.lua │ ├── conn.lua │ ├── count.lua │ ├── req.lua │ ├── count.md │ ├── traffic.md │ ├── req.md │ └── conn.md ├── .travis.yml ├── t ├── count.t ├── conn.t ├── req.t └── traffic.t └── README.md /.gitattributes: -------------------------------------------------------------------------------- 1 | *.t linguist-language=Text 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *.swo 3 | *~ 4 | go 5 | t/servroot/ 6 | reindex 7 | nginx 8 | ctags 9 | tags 10 | *.t_ 11 | -------------------------------------------------------------------------------- /dist.ini: -------------------------------------------------------------------------------- 1 | name=lua-resty-limit-traffic 2 | abstract=Lua library for limiting and controlling traffic in OpenResty/ngx_lua 3 | author=Yichun "agentzh" Zhang (agentzh) 4 | is_original=yes 5 | license=2bsd 6 | lib_dir=lib 7 | doc_dir=lib 8 | repo_link=https://github.com/openresty/lua-resty-limit-traffic 9 | main_module=lib/resty/limit/traffic.lua 10 | requires = luajit >= 2.1.0, ngx_http_lua >= 0.10.6 11 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | OPENRESTY_PREFIX=/usr/local/openresty 2 | 3 | #LUA_VERSION := 5.1 4 | PREFIX ?= /usr/local 5 | LUA_INCLUDE_DIR ?= $(PREFIX)/include 6 | LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) 7 | INSTALL ?= install 8 | 9 | .PHONY: all test install 10 | 11 | all: ; 12 | 13 | install: all 14 | $(INSTALL) -d $(DESTDIR)$(LUA_LIB_DIR)/resty/limit/ 15 | $(INSTALL) lib/resty/limit/*.lua $(DESTDIR)$(LUA_LIB_DIR)/resty/limit/ 16 | 17 | test: all 18 | PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r t 19 | 20 | -------------------------------------------------------------------------------- /valgrind.suppress: -------------------------------------------------------------------------------- 1 | { 2 | 3 | Memcheck:Leak 4 | fun:malloc 5 | fun:ngx_alloc 6 | fun:ngx_event_process_init 7 | } 8 | { 9 | 10 | Memcheck:Param 11 | epoll_ctl(event) 12 | fun:epoll_ctl 13 | fun:ngx_epoll_add_event 14 | } 15 | { 16 | 17 | Memcheck:Cond 18 | fun:index 19 | fun:expand_dynamic_string_token 20 | fun:_dl_map_object 21 | fun:map_doit 22 | fun:_dl_catch_error 23 | fun:do_preload 24 | fun:dl_main 25 | fun:_dl_sysdep_start 26 | fun:_dl_start 27 | } 28 | { 29 | 30 | Memcheck:Param 31 | epoll_ctl(event) 32 | fun:epoll_ctl 33 | fun:ngx_epoll_init 34 | fun:ngx_event_process_init 35 | } 36 | { 37 | 38 | Memcheck:Param 39 | epoll_ctl(event) 40 | fun:epoll_ctl 41 | fun:ngx_epoll_notify_init 42 | fun:ngx_epoll_init 43 | fun:ngx_event_process_init 44 | } 45 | { 46 | 47 | Memcheck:Param 48 | epoll_ctl(event) 49 | fun:epoll_ctl 50 | fun:ngx_epoll_test_rdhup 51 | } 52 | { 53 | 54 | Memcheck:Leak 55 | match-leak-kinds: definite 56 | fun:malloc 57 | fun:ngx_alloc 58 | fun:ngx_set_environment 59 | fun:ngx_single_process_cycle 60 | } 61 | { 62 | 63 | Memcheck:Leak 64 | match-leak-kinds: definite 65 | fun:malloc 66 | fun:ngx_alloc 67 | fun:ngx_set_environment 68 | fun:ngx_worker_process_init 69 | fun:ngx_worker_process_cycle 70 | } 71 | -------------------------------------------------------------------------------- /lib/resty/limit/traffic.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Yichun Zhang (agentzh) 2 | -- 3 | -- This is an aggregator for various concrete traffic limiter instances 4 | -- (like instances of the resty.limit.req, resty.limit.count and 5 | -- resty.limit.conn classes). 6 | 7 | 8 | local max = math.max 9 | 10 | 11 | local _M = { 12 | _VERSION = '0.09' 13 | } 14 | 15 | 16 | -- the states table is user supplied. each element stores the 2nd return value 17 | -- of each limiter if there is no error returned. for resty.limit.req, the state 18 | -- is the "excess" value (i.e., the number of excessive requests each second), 19 | -- and for resty.limit.conn, the state is the current concurrency level 20 | -- (including the current new connection). 21 | function _M.combine(limiters, keys, states) 22 | local n = #limiters 23 | local max_delay = 0 24 | for i = 1, n do 25 | local lim = limiters[i] 26 | local delay, err = lim:incoming(keys[i], i == n) 27 | if not delay then 28 | return nil, err 29 | end 30 | if i == n then 31 | if states then 32 | states[i] = err 33 | end 34 | max_delay = delay 35 | end 36 | end 37 | for i = 1, n - 1 do 38 | local lim = limiters[i] 39 | local delay, err = lim:incoming(keys[i], true) 40 | if not delay then 41 | for j = 1, i - 1 do 42 | -- we intentionally ignore any errors returned below. 43 | limiters[j]:uncommit(keys[j]) 44 | end 45 | limiters[n]:uncommit(keys[n]) 46 | return nil, err 47 | end 48 | if states then 49 | states[i] = err 50 | end 51 | 52 | max_delay = max(max_delay, delay) 53 | end 54 | return max_delay 55 | end 56 | 57 | 58 | return _M 59 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: required 2 | dist: focal 3 | 4 | branches: 5 | only: 6 | - "master" 7 | 8 | os: linux 9 | 10 | language: c 11 | 12 | compiler: 13 | - gcc 14 | 15 | env: 16 | global: 17 | - JOBS=3 18 | - NGX_BUILD_JOBS=$JOBS 19 | - LUAJIT_PREFIX=/opt/luajit21 20 | - LUAJIT_LIB=$LUAJIT_PREFIX/lib 21 | - LUAJIT_INC=$LUAJIT_PREFIX/include/luajit-2.1 22 | - LUA_INCLUDE_DIR=$LUAJIT_INC 23 | - LUA_CMODULE_DIR=/lib 24 | - OPENSSL_PREFIX=/usr/local/openresty/openssl3 25 | - OPENSSL_LIB=$OPENSSL_PREFIX/lib 26 | - OPENSSL_INC=$OPENSSL_PREFIX/include 27 | - TEST_NGINX_MYSQL_PATH=/var/run/mysqld/mysqld.sock 28 | - LD_LIBRARY_PATH=$LUAJIT_LIB:$LD_LIBRARY_PATH 29 | - TEST_NGINX_SLEEP=0.006 30 | matrix: 31 | - NGINX_VERSION=1.29.4 OPENSSL_VER=1.1.1w OPENSSL_PATCH_VER=1.1.1f 32 | 33 | before_install: 34 | - sudo apt-get install -qq -y axel cpanminus > build.log 2>&1 || (cat build.log && exit 1) 35 | - wget -O - https://openresty.org/package/pubkey.gpg | sudo apt-key add - 36 | - echo "deb http://openresty.org/package/ubuntu $(lsb_release -sc) main" | sudo tee /etc/apt/sources.list.d/openresty.list 37 | - sudo apt-get update 38 | - sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends openresty-pcre2 openresty-openssl3 openresty-pcre2-dev openresty-openssl3-dev 39 | 40 | install: 41 | - git clone https://github.com/openresty/test-nginx.git 42 | - git clone https://github.com/openresty/openresty.git ../openresty 43 | - git clone https://github.com/openresty/nginx-devel-utils.git 44 | - git clone https://github.com/openresty/lua-cjson.git 45 | - git clone https://github.com/openresty/lua-nginx-module.git ../lua-nginx-module 46 | - git clone https://github.com/openresty/no-pool-nginx.git ../no-pool-nginx 47 | - git clone https://github.com/openresty/lua-resty-lrucache.git ../lua-resty-lrucache 48 | - git clone https://github.com/openresty/lua-resty-core.git ../lua-resty-core 49 | - git clone -b v2.1-agentzh https://github.com/openresty/luajit2.git 50 | 51 | script: 52 | - cd test-nginx && (sudo cpanm . > build.log 2>&1 || (cat build.log && exit 1)) && cd .. 53 | - cd luajit2/ 54 | - make -j$JOBS CCDEBUG=-g Q= PREFIX=$LUAJIT_PREFIX CC=$CC XCFLAGS='-DLUA_USE_APICHECK -DLUA_USE_ASSERT -msse4.2' > build.log 2>&1 || (cat build.log && exit 1) 55 | - sudo make install PREFIX=$LUAJIT_PREFIX > build.log 2>&1 || (cat build.log && exit 1) 56 | - cd ../lua-cjson && make && sudo PATH=$PATH make install && cd .. 57 | - export PATH=$PWD/work/nginx/sbin:$PWD/nginx-devel-utils:$PATH 58 | - ngx-build $NGINX_VERSION --without-pcre2 --with-ipv6 --with-http_realip_module --with-http_ssl_module --add-module=../lua-nginx-module --with-debug --with-cc-opt="-I$OPENSSL_INC" --with-ld-opt="-L$OPENSSL_LIB -Wl,-rpath,$OPENSSL_LIB" > build.log 2>&1 || (cat build.log && exit 1) 59 | - prove -I. -r t 60 | 61 | -------------------------------------------------------------------------------- /lib/resty/limit/conn.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Yichun Zhang (agentzh) 2 | -- 3 | -- This library is an enhanced Lua port of the standard ngx_limit_conn 4 | -- module. 5 | 6 | 7 | local math = require "math" 8 | 9 | 10 | local setmetatable = setmetatable 11 | local floor = math.floor 12 | local ngx_shared = ngx.shared 13 | local assert = assert 14 | 15 | 16 | local _M = { 17 | _VERSION = '0.09' 18 | } 19 | 20 | 21 | local mt = { 22 | __index = _M 23 | } 24 | 25 | 26 | function _M.new(dict_name, max, burst, default_conn_delay) 27 | local dict = ngx_shared[dict_name] 28 | if not dict then 29 | return nil, "shared dict not found" 30 | end 31 | 32 | assert(max > 0 and burst >= 0 and default_conn_delay > 0) 33 | 34 | local self = { 35 | dict = dict, 36 | max = max + 0, -- just to ensure the param is good 37 | burst = burst, 38 | unit_delay = default_conn_delay, 39 | } 40 | 41 | return setmetatable(self, mt) 42 | end 43 | 44 | 45 | function _M.incoming(self, key, commit) 46 | local dict = self.dict 47 | local max = self.max 48 | 49 | self.committed = false 50 | 51 | local conn, err 52 | if commit then 53 | conn, err = dict:incr(key, 1, 0) 54 | if not conn then 55 | return nil, err 56 | end 57 | 58 | if conn > max + self.burst then 59 | conn, err = dict:incr(key, -1) 60 | if not conn then 61 | return nil, err 62 | end 63 | return nil, "rejected" 64 | end 65 | self.committed = true 66 | 67 | else 68 | conn = (dict:get(key) or 0) + 1 69 | if conn > max + self.burst then 70 | return nil, "rejected" 71 | end 72 | end 73 | 74 | if conn > max then 75 | -- make the excessive connections wait 76 | return self.unit_delay * floor((conn - 1) / max), conn 77 | end 78 | 79 | -- we return a 0 delay by default 80 | return 0, conn 81 | end 82 | 83 | 84 | function _M.is_committed(self) 85 | return self.committed 86 | end 87 | 88 | 89 | function _M.leaving(self, key, req_latency) 90 | assert(key) 91 | local dict = self.dict 92 | 93 | local conn, err = dict:incr(key, -1) 94 | if not conn then 95 | return nil, err 96 | end 97 | 98 | if req_latency then 99 | local unit_delay = self.unit_delay 100 | self.unit_delay = (req_latency + unit_delay) / 2 101 | end 102 | 103 | return conn 104 | end 105 | 106 | 107 | function _M.uncommit(self, key) 108 | assert(key) 109 | local dict = self.dict 110 | 111 | return dict:incr(key, -1) 112 | end 113 | 114 | 115 | function _M.set_conn(self, conn) 116 | self.max = conn 117 | end 118 | 119 | 120 | function _M.set_burst(self, burst) 121 | self.burst = burst 122 | end 123 | 124 | 125 | return _M 126 | -------------------------------------------------------------------------------- /lib/resty/limit/count.lua: -------------------------------------------------------------------------------- 1 | -- implement GitHub request rate limiting: 2 | -- https://developer.github.com/v3/#rate-limiting 3 | 4 | local ngx_shared = ngx.shared 5 | local setmetatable = setmetatable 6 | local assert = assert 7 | 8 | 9 | local _M = { 10 | _VERSION = '0.09' 11 | } 12 | 13 | 14 | local mt = { 15 | __index = _M 16 | } 17 | 18 | local incr_support_init_ttl 19 | local ngx_config = ngx.config 20 | local ngx_lua_v = ngx.config.ngx_lua_version 21 | if not ngx_config or not ngx_lua_v or (ngx_lua_v < 10012) then 22 | incr_support_init_ttl = false 23 | else 24 | incr_support_init_ttl = true 25 | end 26 | 27 | 28 | -- the "limit" argument controls number of request allowed in a time window. 29 | -- time "window" argument controls the time window in seconds. 30 | function _M.new(dict_name, limit, window) 31 | local dict = ngx_shared[dict_name] 32 | if not dict then 33 | return nil, "shared dict not found" 34 | end 35 | 36 | assert(limit > 0 and window > 0) 37 | 38 | local self = { 39 | dict = dict, 40 | limit = limit, 41 | window = window, 42 | } 43 | 44 | return setmetatable(self, mt) 45 | end 46 | 47 | -- incoming function using incr with init_ttl 48 | -- need OpenResty version > v0.10.12rc2 49 | local function incoming_new(self, key, commit) 50 | local dict = self.dict 51 | local limit = self.limit 52 | local window = self.window 53 | 54 | local remaining, err 55 | 56 | if commit then 57 | remaining, err = dict:incr(key, -1, limit, window) 58 | if not remaining then 59 | return nil, err 60 | end 61 | else 62 | remaining = (dict:get(key) or limit) - 1 63 | end 64 | 65 | if remaining < 0 then 66 | return nil, "rejected" 67 | end 68 | 69 | return 0, remaining 70 | end 71 | 72 | -- incoming function using incr and expire 73 | local function incoming_old(self, key, commit) 74 | local dict = self.dict 75 | local limit = self.limit 76 | local window = self.window 77 | 78 | local remaining, ok, err 79 | 80 | if commit then 81 | remaining, err = dict:incr(key, -1, limit) 82 | if not remaining then 83 | return nil, err 84 | end 85 | 86 | if remaining == limit - 1 then 87 | ok, err = dict:expire(key, window) 88 | if not ok then 89 | if err == "not found" then 90 | remaining, err = dict:incr(key, -1, limit) 91 | if not remaining then 92 | return nil, err 93 | end 94 | 95 | ok, err = dict:expire(key, window) 96 | if not ok then 97 | return nil, err 98 | end 99 | 100 | else 101 | return nil, err 102 | end 103 | end 104 | end 105 | 106 | else 107 | remaining = (dict:get(key) or limit) - 1 108 | end 109 | 110 | if remaining < 0 then 111 | return nil, "rejected" 112 | end 113 | 114 | return 0, remaining 115 | end 116 | 117 | _M.incoming = incr_support_init_ttl and incoming_new or incoming_old 118 | 119 | -- uncommit remaining and return remaining value 120 | function _M.uncommit(self, key) 121 | assert(key) 122 | local dict = self.dict 123 | local limit = self.limit 124 | 125 | local remaining, err = dict:incr(key, 1) 126 | if not remaining then 127 | if err == "not found" then 128 | remaining = limit 129 | else 130 | return nil, err 131 | end 132 | end 133 | 134 | return remaining 135 | end 136 | 137 | 138 | return _M 139 | -------------------------------------------------------------------------------- /lib/resty/limit/req.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) Yichun Zhang (agentzh) 2 | -- 3 | -- This library is an approximate Lua port of the standard ngx_limit_req 4 | -- module. 5 | 6 | 7 | local ffi = require "ffi" 8 | local math = require "math" 9 | 10 | 11 | local ngx_shared = ngx.shared 12 | local ngx_now = ngx.now 13 | local setmetatable = setmetatable 14 | local ffi_cast = ffi.cast 15 | local ffi_str = ffi.string 16 | local abs = math.abs 17 | local tonumber = tonumber 18 | local type = type 19 | local assert = assert 20 | local max = math.max 21 | 22 | 23 | -- TODO: we could avoid the tricky FFI cdata when lua_shared_dict supports 24 | -- hash-typed values as in redis. 25 | ffi.cdef[[ 26 | struct lua_resty_limit_req_rec { 27 | unsigned long excess; 28 | uint64_t last; /* time in milliseconds */ 29 | /* integer value, 1 corresponds to 0.001 r/s */ 30 | }; 31 | ]] 32 | local const_rec_ptr_type = ffi.typeof("const struct lua_resty_limit_req_rec*") 33 | local rec_size = ffi.sizeof("struct lua_resty_limit_req_rec") 34 | 35 | -- we can share the cdata here since we only need it temporarily for 36 | -- serialization inside the shared dict: 37 | local rec_cdata = ffi.new("struct lua_resty_limit_req_rec") 38 | 39 | 40 | local _M = { 41 | _VERSION = '0.09' 42 | } 43 | 44 | 45 | local mt = { 46 | __index = _M 47 | } 48 | 49 | 50 | function _M.new(dict_name, rate, burst) 51 | local dict = ngx_shared[dict_name] 52 | if not dict then 53 | return nil, "shared dict not found" 54 | end 55 | 56 | assert(rate > 0 and burst >= 0) 57 | 58 | local self = { 59 | dict = dict, 60 | rate = rate * 1000, 61 | burst = burst * 1000, 62 | } 63 | 64 | return setmetatable(self, mt) 65 | end 66 | 67 | 68 | -- sees an new incoming event 69 | -- the "commit" argument controls whether should we record the event in shm. 70 | -- FIXME we have a (small) race-condition window between dict:get() and 71 | -- dict:set() across multiple nginx worker processes. The size of the 72 | -- window is proportional to the number of workers. 73 | function _M.incoming(self, key, commit) 74 | local dict = self.dict 75 | local rate = self.rate 76 | local now = ngx_now() * 1000 77 | 78 | local excess 79 | 80 | -- it's important to anchor the string value for the read-only pointer 81 | -- cdata: 82 | local v = dict:get(key) 83 | if v then 84 | if type(v) ~= "string" or #v ~= rec_size then 85 | return nil, "shdict abused by other users" 86 | end 87 | local rec = ffi_cast(const_rec_ptr_type, v) 88 | local elapsed = now - tonumber(rec.last) 89 | 90 | -- print("elapsed: ", elapsed, "ms") 91 | 92 | -- we do not handle changing rate values specifically. the excess value 93 | -- can get automatically adjusted by the following formula with new rate 94 | -- values rather quickly anyway. 95 | excess = max(tonumber(rec.excess) - rate * abs(elapsed) / 1000 + 1000, 96 | 0) 97 | 98 | -- print("excess: ", excess) 99 | 100 | if excess > self.burst then 101 | return nil, "rejected" 102 | end 103 | 104 | else 105 | excess = 0 106 | end 107 | 108 | if commit then 109 | rec_cdata.excess = excess 110 | rec_cdata.last = now 111 | dict:set(key, ffi_str(rec_cdata, rec_size)) 112 | end 113 | 114 | -- return the delay in seconds, as well as excess 115 | return excess / rate, excess / 1000 116 | end 117 | 118 | 119 | function _M.uncommit(self, key) 120 | assert(key) 121 | local dict = self.dict 122 | 123 | local v = dict:get(key) 124 | if not v then 125 | return nil, "not found" 126 | end 127 | 128 | if type(v) ~= "string" or #v ~= rec_size then 129 | return nil, "shdict abused by other users" 130 | end 131 | 132 | local rec = ffi_cast(const_rec_ptr_type, v) 133 | 134 | local excess = max(tonumber(rec.excess) - 1000, 0) 135 | 136 | rec_cdata.excess = excess 137 | rec_cdata.last = rec.last 138 | dict:set(key, ffi_str(rec_cdata, rec_size)) 139 | return true 140 | end 141 | 142 | 143 | function _M.set_rate(self, rate) 144 | self.rate = rate * 1000 145 | end 146 | 147 | 148 | function _M.set_burst(self, burst) 149 | self.burst = burst * 1000 150 | end 151 | 152 | 153 | return _M 154 | -------------------------------------------------------------------------------- /t/count.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(2); 7 | 8 | plan tests => repeat_each() * (blocks() * 4); 9 | 10 | #no_diff(); 11 | #no_long_string(); 12 | 13 | my $pwd = cwd(); 14 | 15 | our $HttpConfig = <<_EOC_; 16 | lua_package_path "$pwd/../lua-resty-core/lib/?.lua;../lua-resty-lrucache/lib/?.lua;$pwd/lib/?.lua;;"; 17 | init_by_lua_block { 18 | local v = require "jit.v" 19 | -- v.on("/tmp/a.dump") 20 | require "resty.core" 21 | } 22 | lua_shared_dict store 1m; 23 | _EOC_ 24 | 25 | no_long_string(); 26 | run_tests(); 27 | 28 | __DATA__ 29 | 30 | === TEST 1: a single key (always commit) 31 | --- http_config eval: $::HttpConfig 32 | --- config 33 | location = /t { 34 | content_by_lua_block { 35 | local limit_count = require "resty.limit.count" 36 | ngx.shared.store:flush_all() 37 | local lim = limit_count.new("store", 10, 100) 38 | local uri = ngx.var.uri 39 | for i = 1, 12 do 40 | local delay, err = lim:incoming(uri, true) 41 | if not delay then 42 | ngx.say(err) 43 | else 44 | local remaining = err 45 | ngx.say("remaining: ", remaining) 46 | end 47 | end 48 | } 49 | } 50 | --- request 51 | GET /t 52 | --- response_body 53 | remaining: 9 54 | remaining: 8 55 | remaining: 7 56 | remaining: 6 57 | remaining: 5 58 | remaining: 4 59 | remaining: 3 60 | remaining: 2 61 | remaining: 1 62 | remaining: 0 63 | rejected 64 | rejected 65 | --- no_error_log 66 | [error] 67 | [lua] 68 | 69 | 70 | 71 | === TEST 2: multiple keys 72 | --- http_config eval: $::HttpConfig 73 | --- config 74 | location = /t { 75 | content_by_lua_block { 76 | local limit_count = require "resty.limit.count" 77 | ngx.shared.store:flush_all() 78 | local lim = limit_count.new("store", 1, 10) 79 | local delay1, err1 = lim:incoming("foo", true) 80 | local delay2, err2 = lim:incoming("foo", true) 81 | local delay3, err3 = lim:incoming("bar", true) 82 | local delay4, err4 = lim:incoming("bar", true) 83 | if not delay1 then 84 | ngx.say(err1) 85 | else 86 | local remaining1 = err1 87 | ngx.say("remaining1: ", remaining1) 88 | end 89 | 90 | if not delay2 then 91 | ngx.say(err2) 92 | else 93 | local remaining2 = err2 94 | ngx.say("remaining2: ", remaining2) 95 | end 96 | 97 | if not delay3 then 98 | ngx.say(err3) 99 | else 100 | local remaining3 = err3 101 | ngx.say("remaining3: ", remaining3) 102 | end 103 | 104 | if not delay4 then 105 | ngx.say(err4) 106 | else 107 | local remaining4 = err4 108 | ngx.say("remaining4: ", remaining4) 109 | end 110 | } 111 | } 112 | --- request 113 | GET /t 114 | --- response_body 115 | remaining1: 0 116 | rejected 117 | remaining3: 0 118 | rejected 119 | --- no_error_log 120 | [error] 121 | [lua] 122 | 123 | 124 | 125 | === TEST 3: reset limit window 126 | --- http_config eval: $::HttpConfig 127 | --- config 128 | location = /t { 129 | content_by_lua_block { 130 | local limit_count = require "resty.limit.count" 131 | ngx.shared.store:flush_all() 132 | local lim = limit_count.new("store", 1, 1) 133 | 134 | local uri = ngx.var.uri 135 | for i = 1, 2 do 136 | local delay, err = lim:incoming(uri, true) 137 | if not delay then 138 | ngx.say(err) 139 | else 140 | local remaining = err 141 | ngx.say("remaining: ", remaining) 142 | end 143 | 144 | local delay, err = lim:incoming(uri, true) 145 | if not delay then 146 | ngx.say(err) 147 | else 148 | local remaining = err 149 | ngx.say("remaining: ", remaining) 150 | end 151 | ngx.sleep(1) 152 | end 153 | } 154 | } 155 | --- request 156 | GET /t 157 | --- response_body 158 | remaining: 0 159 | rejected 160 | remaining: 0 161 | rejected 162 | --- no_error_log 163 | [error] 164 | [lua] 165 | 166 | 167 | 168 | === TEST 4: a single key (do not commit since the 3rd time) 169 | --- http_config eval: $::HttpConfig 170 | --- config 171 | location = /t { 172 | content_by_lua_block { 173 | local limit_count = require "resty.limit.count" 174 | ngx.shared.store:flush_all() 175 | local lim = limit_count.new("store", 5, 10) 176 | local begin = ngx.time() 177 | 178 | for i = 1, 4 do 179 | local delay, err = lim:incoming("foo", i < 3) 180 | if not delay then 181 | ngx.say(err) 182 | else 183 | local remaining = err 184 | ngx.say("remaining: ", remaining) 185 | end 186 | end 187 | } 188 | } 189 | --- request 190 | GET /t 191 | --- response_body 192 | remaining: 4 193 | remaining: 3 194 | remaining: 2 195 | remaining: 2 196 | --- no_error_log 197 | [error] 198 | [lua] 199 | 200 | 201 | 202 | === TEST 5: a single key (commit & uncommit) 203 | --- http_config eval: $::HttpConfig 204 | --- config 205 | location = /t { 206 | content_by_lua_block { 207 | local limit_count = require "resty.limit.count" 208 | local lim = limit_count.new("store", 2, 10) 209 | ngx.shared.store:flush_all() 210 | local key = "foo" 211 | for i = 1, 3 do 212 | local delay, err = lim:incoming(key, true) 213 | if not delay then 214 | ngx.say("failed to limit count: ", err) 215 | else 216 | local remaining = err 217 | ngx.say("remaining: ", remaining) 218 | end 219 | local ok, err = lim:uncommit(key) 220 | if not ok then 221 | ngx.say("failed to uncommit: ", err) 222 | end 223 | end 224 | } 225 | } 226 | --- request 227 | GET /t 228 | --- response_body 229 | remaining: 1 230 | remaining: 1 231 | remaining: 1 232 | --- no_error_log 233 | [error] 234 | [lua] 235 | -------------------------------------------------------------------------------- /t/conn.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(2); 7 | 8 | plan tests => repeat_each() * (blocks() * 4); 9 | 10 | #no_diff(); 11 | #no_long_string(); 12 | 13 | my $pwd = cwd(); 14 | 15 | our $HttpConfig = <<_EOC_; 16 | lua_package_path "$pwd/lib/?.lua;;"; 17 | _EOC_ 18 | 19 | no_long_string(); 20 | run_tests(); 21 | 22 | __DATA__ 23 | 24 | === TEST 1: a single key (always commit, and no leaving) 25 | --- http_config eval 26 | " 27 | $::HttpConfig 28 | 29 | lua_shared_dict store 1m; 30 | " 31 | --- config 32 | location = /t { 33 | content_by_lua ' 34 | local limit_conn = require "resty.limit.conn" 35 | local lim = limit_conn.new("store", 2, 8, 1) 36 | ngx.shared.store:flush_all() 37 | local key = "foo" 38 | for i = 1, 12 do 39 | local delay, err = lim:incoming(key, true) 40 | if not delay then 41 | ngx.say("failed to limit conn: ", err) 42 | else 43 | local conn = err 44 | ngx.say(i, ": ", delay, ", conn: ", conn) 45 | ngx.say("committed: ", lim:is_committed()) 46 | end 47 | end 48 | '; 49 | } 50 | --- request 51 | GET /t 52 | --- response_body 53 | 1: 0, conn: 1 54 | committed: true 55 | 2: 0, conn: 2 56 | committed: true 57 | 3: 1, conn: 3 58 | committed: true 59 | 4: 1, conn: 4 60 | committed: true 61 | 5: 2, conn: 5 62 | committed: true 63 | 6: 2, conn: 6 64 | committed: true 65 | 7: 3, conn: 7 66 | committed: true 67 | 8: 3, conn: 8 68 | committed: true 69 | 9: 4, conn: 9 70 | committed: true 71 | 10: 4, conn: 10 72 | committed: true 73 | failed to limit conn: rejected 74 | failed to limit conn: rejected 75 | --- no_error_log 76 | [error] 77 | [lua] 78 | 79 | 80 | 81 | === TEST 2: a single key (sometimes not commit, and no leaving) 82 | --- http_config eval 83 | " 84 | $::HttpConfig 85 | 86 | lua_shared_dict store 1m; 87 | " 88 | --- config 89 | location = /t { 90 | content_by_lua ' 91 | local limit_conn = require "resty.limit.conn" 92 | local lim = limit_conn.new("store", 2, 8, 1) 93 | ngx.shared.store:flush_all() 94 | local key = "foo" 95 | for i = 1, 12 do 96 | local delay, err = lim:incoming(key, i == 3 or i == 5) 97 | if not delay then 98 | ngx.say("failed to limit conn: ", err) 99 | else 100 | local conn = err 101 | ngx.say(i, ": ", delay, ", conn: ", conn) 102 | ngx.say("committed: ", lim:is_committed()) 103 | end 104 | end 105 | '; 106 | } 107 | --- request 108 | GET /t 109 | --- response_body 110 | 1: 0, conn: 1 111 | committed: false 112 | 2: 0, conn: 1 113 | committed: false 114 | 3: 0, conn: 1 115 | committed: true 116 | 4: 0, conn: 2 117 | committed: false 118 | 5: 0, conn: 2 119 | committed: true 120 | 6: 1, conn: 3 121 | committed: false 122 | 7: 1, conn: 3 123 | committed: false 124 | 8: 1, conn: 3 125 | committed: false 126 | 9: 1, conn: 3 127 | committed: false 128 | 10: 1, conn: 3 129 | committed: false 130 | 11: 1, conn: 3 131 | committed: false 132 | 12: 1, conn: 3 133 | committed: false 134 | --- no_error_log 135 | [error] 136 | [lua] 137 | 138 | 139 | 140 | === TEST 3: a single key (always commit, and with random leaving) 141 | --- http_config eval 142 | " 143 | $::HttpConfig 144 | 145 | lua_shared_dict store 1m; 146 | " 147 | --- config 148 | location = /t { 149 | content_by_lua ' 150 | local limit_conn = require "resty.limit.conn" 151 | local lim = limit_conn.new("store", 2, 8, 1) 152 | ngx.shared.store:flush_all() 153 | local key = "foo" 154 | for i = 1, 12 do 155 | local delay, err = lim:incoming(key, true) 156 | if not delay then 157 | ngx.say("failed to limit conn: ", err) 158 | else 159 | local conn = err 160 | ngx.say(i, ": ", delay, ", conn: ", conn) 161 | if i == 4 or i == 7 then 162 | local conn, err = lim:leaving(key) 163 | if not conn then 164 | ngx.say("leaving failed: ", err) 165 | else 166 | ngx.say("leaving. conn: ", conn) 167 | end 168 | end 169 | end 170 | end 171 | '; 172 | } 173 | --- request 174 | GET /t 175 | --- response_body 176 | 1: 0, conn: 1 177 | 2: 0, conn: 2 178 | 3: 1, conn: 3 179 | 4: 1, conn: 4 180 | leaving. conn: 3 181 | 5: 1, conn: 4 182 | 6: 2, conn: 5 183 | 7: 2, conn: 6 184 | leaving. conn: 5 185 | 8: 2, conn: 6 186 | 9: 3, conn: 7 187 | 10: 3, conn: 8 188 | 11: 4, conn: 9 189 | 12: 4, conn: 10 190 | --- no_error_log 191 | [error] 192 | [lua] 193 | 194 | 195 | 196 | === TEST 4: a single key (commit & uncommit) 197 | --- http_config eval 198 | " 199 | $::HttpConfig 200 | 201 | lua_shared_dict store 1m; 202 | " 203 | --- config 204 | location = /t { 205 | content_by_lua ' 206 | local limit_conn = require "resty.limit.conn" 207 | local lim = limit_conn.new("store", 2, 8, 1) 208 | ngx.shared.store:flush_all() 209 | local key = "foo" 210 | for i = 1, 3 do 211 | local delay, err = lim:incoming(key, true) 212 | if not delay then 213 | ngx.say("failed to limit conn: ", err) 214 | else 215 | local conn = err 216 | ngx.say(i, ": ", delay, ", conn: ", conn) 217 | ngx.say("committed: ", lim:is_committed()) 218 | end 219 | local ok, err = lim:uncommit(key) 220 | if not ok then 221 | ngx.say("failed to uncommit: ", err) 222 | end 223 | end 224 | '; 225 | } 226 | --- request 227 | GET /t 228 | --- response_body 229 | 1: 0, conn: 1 230 | committed: true 231 | 2: 0, conn: 1 232 | committed: true 233 | 3: 0, conn: 1 234 | committed: true 235 | --- no_error_log 236 | [error] 237 | [lua] 238 | 239 | 240 | 241 | === TEST 5: a single key (set_conn && set_burst) 242 | --- http_config eval 243 | " 244 | $::HttpConfig 245 | 246 | lua_shared_dict store 1m; 247 | " 248 | --- config 249 | location = /t { 250 | content_by_lua_block { 251 | local limit_conn = require "resty.limit.conn" 252 | local lim = limit_conn.new("store", 2, 1, 1) 253 | ngx.shared.store:flush_all() 254 | local key = "foo" 255 | for i = 1, 10 do 256 | local delay, err = lim:incoming(key, true) 257 | if not delay then 258 | ngx.say("failed to limit conn: ", err) 259 | else 260 | local conn = err 261 | ngx.say(i, ": ", delay, ", conn: ", conn) 262 | end 263 | if i == 4 then 264 | ngx.say("set_conn() && set_burst()") 265 | lim:set_conn(5) 266 | lim:set_burst(2) 267 | end 268 | end 269 | } 270 | } 271 | --- request 272 | GET /t 273 | --- response_body 274 | 1: 0, conn: 1 275 | 2: 0, conn: 2 276 | 3: 1, conn: 3 277 | failed to limit conn: rejected 278 | set_conn() && set_burst() 279 | 5: 0, conn: 4 280 | 6: 0, conn: 5 281 | 7: 1, conn: 6 282 | 8: 1, conn: 7 283 | failed to limit conn: rejected 284 | failed to limit conn: rejected 285 | --- no_error_log 286 | [error] 287 | [lua] 288 | -------------------------------------------------------------------------------- /t/req.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(2); 7 | 8 | plan tests => repeat_each() * (blocks() * 4); 9 | 10 | #no_diff(); 11 | #no_long_string(); 12 | 13 | my $pwd = cwd(); 14 | 15 | our $HttpConfig = <<_EOC_; 16 | lua_package_path "$pwd/lib/?.lua;;"; 17 | _EOC_ 18 | 19 | no_long_string(); 20 | run_tests(); 21 | 22 | __DATA__ 23 | 24 | === TEST 1: a single key (always commit) 25 | --- http_config eval 26 | qq{ 27 | $::HttpConfig 28 | 29 | init_by_lua_block { 30 | local v = require "jit.v" 31 | -- v.on("/tmp/a.dump") 32 | } 33 | lua_shared_dict store 1m; 34 | } 35 | --- config 36 | location = /t { 37 | content_by_lua ' 38 | local limit_req = require "resty.limit.req" 39 | ngx.shared.store:flush_all() 40 | local lim = limit_req.new("store", 40, 40) 41 | local begin = ngx.now() 42 | local uri = ngx.var.uri 43 | for i = 1, 80 do 44 | local delay, err = lim:incoming(uri, true) 45 | if not delay then 46 | ngx.say("failed to limit request: ", err) 47 | return 48 | end 49 | ngx.sleep(delay) 50 | end 51 | ngx.say("elapsed: ", ngx.now() - begin, " sec.") 52 | '; 53 | } 54 | --- request 55 | GET /t 56 | --- response_body_like eval 57 | qr/^elapsed: 1\.9[6-9]\d* sec\.$/ 58 | --- no_error_log 59 | [error] 60 | [lua] 61 | --- timeout: 10 62 | 63 | 64 | 65 | === TEST 2: multiple keys 66 | --- http_config eval 67 | " 68 | $::HttpConfig 69 | 70 | lua_shared_dict store 1m; 71 | " 72 | --- config 73 | location = /t { 74 | content_by_lua ' 75 | local limit_req = require "resty.limit.req" 76 | ngx.shared.store:flush_all() 77 | local lim = limit_req.new("store", 2, 10) 78 | local delay1, excess1 = lim:incoming("foo", true) 79 | local delay2, excess2 = lim:incoming("foo", true) 80 | local delay3 = lim:incoming("bar", true) 81 | local delay4 = lim:incoming("bar", true) 82 | ngx.say("delay1: ", delay1) 83 | ngx.say("excess1: ", excess1) 84 | ngx.say("delay2: ", delay2) 85 | ngx.say("excess2: ", excess2) 86 | ngx.say("delay3: ", delay3) 87 | ngx.say("delay4: ", delay4) 88 | '; 89 | } 90 | --- request 91 | GET /t 92 | --- response_body 93 | delay1: 0 94 | excess1: 0 95 | delay2: 0.5 96 | excess2: 1 97 | delay3: 0 98 | delay4: 0.5 99 | --- no_error_log 100 | [error] 101 | [lua] 102 | 103 | 104 | 105 | === TEST 3: burst 106 | --- http_config eval 107 | " 108 | $::HttpConfig 109 | 110 | lua_shared_dict store 1m; 111 | " 112 | --- config 113 | location = /t { 114 | content_by_lua ' 115 | local limit_req = require "resty.limit.req" 116 | local lim = limit_req.new("store", 2, 0) 117 | 118 | for burst = 0, 2 do 119 | ngx.shared.store:flush_all() 120 | if burst > 0 then 121 | lim:set_burst(burst) 122 | end 123 | 124 | for i = 1, 10 do 125 | local delay, err = lim:incoming("foo", true) 126 | if not delay then 127 | ngx.say(i, ": error: ", err) 128 | break 129 | end 130 | end 131 | end 132 | '; 133 | } 134 | --- request 135 | GET /t 136 | --- response_body 137 | 2: error: rejected 138 | 3: error: rejected 139 | 4: error: rejected 140 | --- no_error_log 141 | [error] 142 | [lua] 143 | 144 | 145 | 146 | === TEST 4: a single key (do not commit since the 3rd time) 147 | --- http_config eval 148 | " 149 | $::HttpConfig 150 | 151 | lua_shared_dict store 1m; 152 | " 153 | --- config 154 | location = /t { 155 | content_by_lua ' 156 | local limit_req = require "resty.limit.req" 157 | ngx.shared.store:flush_all() 158 | local lim = limit_req.new("store", 2, 10) 159 | local key = "bar" 160 | for i = 1, 4 do 161 | local delay, err = lim:incoming(key, i < 3 and true or false) 162 | if not delay then 163 | ngx.say("failed to limit request: ", err) 164 | else 165 | ngx.say("delay: ", delay) 166 | end 167 | end 168 | '; 169 | } 170 | --- request 171 | GET /t 172 | --- response_body 173 | delay: 0 174 | delay: 0.5 175 | delay: 1 176 | delay: 1 177 | --- no_error_log 178 | [error] 179 | [lua] 180 | 181 | 182 | 183 | === TEST 5: bad value in shdict (integer type) 184 | --- http_config eval 185 | " 186 | $::HttpConfig 187 | 188 | lua_shared_dict store 1m; 189 | " 190 | --- config 191 | location = /t { 192 | content_by_lua ' 193 | local limit_req = require "resty.limit.req" 194 | ngx.shared.store:flush_all() 195 | local key = "bar" 196 | ngx.shared.store:set("bar", 32) 197 | local lim = limit_req.new("store", 2, 10) 198 | local delay, err = lim:incoming(key, true) 199 | if not delay then 200 | ngx.say("failed to limit request: ", err) 201 | else 202 | ngx.say("delay: ", delay) 203 | end 204 | '; 205 | } 206 | --- request 207 | GET /t 208 | --- response_body 209 | failed to limit request: shdict abused by other users 210 | --- no_error_log 211 | [error] 212 | [lua] 213 | 214 | 215 | 216 | === TEST 6: bad value in shdict (string type, and wrong size) 217 | --- http_config eval 218 | " 219 | $::HttpConfig 220 | 221 | lua_shared_dict store 1m; 222 | " 223 | --- config 224 | location = /t { 225 | content_by_lua ' 226 | local limit_req = require "resty.limit.req" 227 | ngx.shared.store:flush_all() 228 | local key = "bar" 229 | ngx.shared.store:set("bar", "a") 230 | local lim = limit_req.new("store", 2, 10) 231 | local delay, err = lim:incoming(key, true) 232 | if not delay then 233 | ngx.say("failed to limit request: ", err) 234 | else 235 | ngx.say("delay: ", delay) 236 | end 237 | '; 238 | } 239 | --- request 240 | GET /t 241 | --- response_body 242 | failed to limit request: shdict abused by other users 243 | --- no_error_log 244 | [error] 245 | [lua] 246 | 247 | 248 | 249 | === TEST 7: a single key (commit & uncommit) 250 | --- http_config eval 251 | " 252 | $::HttpConfig 253 | 254 | lua_shared_dict store 1m; 255 | " 256 | --- config 257 | location = /t { 258 | content_by_lua ' 259 | local limit_req = require "resty.limit.req" 260 | ngx.shared.store:flush_all() 261 | local lim = limit_req.new("store", 40, 40) 262 | local begin = ngx.now() 263 | local uri = ngx.var.uri 264 | for i = 1, 5 do 265 | local delay, err = lim:incoming(uri, true) 266 | if not delay then 267 | ngx.say("failed to limit request: ", err) 268 | return 269 | end 270 | ngx.say(i, ": delay: ", delay) 271 | -- --[[ 272 | local ok, err = lim:uncommit(uri) 273 | if not ok then 274 | ngx.say("failed to uncommit: ", err) 275 | end 276 | -- ]] 277 | end 278 | '; 279 | } 280 | --- request 281 | GET /t 282 | --- response_body 283 | 1: delay: 0 284 | 2: delay: 0.025 285 | 3: delay: 0.025 286 | 4: delay: 0.025 287 | 5: delay: 0.025 288 | --- no_error_log 289 | [error] 290 | [lua] 291 | -------------------------------------------------------------------------------- /lib/resty/limit/count.md: -------------------------------------------------------------------------------- 1 | Name 2 | ==== 3 | 4 | resty.limit.count - Lua module for limiting request counts for OpenResty/ngx_lua. 5 | 6 | Table of Contents 7 | ================= 8 | 9 | * [Name](#name) 10 | * [Synopsis](#synopsis) 11 | * [Description](#description) 12 | * [Methods](#methods) 13 | * [new](#new) 14 | * [incoming](#incoming) 15 | * [uncommit](#uncommit) 16 | * [Limiting Granularity](#limiting-granularity) 17 | * [Installation](#installation) 18 | * [Bugs and Patches](#bugs-and-patches) 19 | * [Authors](#authors) 20 | * [Copyright and License](#copyright-and-license) 21 | * [See Also](#see-also) 22 | 23 | Synopsis 24 | ======== 25 | 26 | ```nginx 27 | http { 28 | lua_shared_dict my_limit_count_store 100m; 29 | 30 | init_by_lua_block { 31 | require "resty.core" 32 | } 33 | 34 | server { 35 | location / { 36 | access_by_lua_block { 37 | local limit_count = require "resty.limit.count" 38 | 39 | -- rate: 5000 requests per 3600s 40 | local lim, err = limit_count.new("my_limit_count_store", 5000, 3600) 41 | if not lim then 42 | ngx.log(ngx.ERR, "failed to instantiate a resty.limit.count object: ", err) 43 | return ngx.exit(500) 44 | end 45 | 46 | -- use the Authorization header as the limiting key 47 | local key = ngx.req.get_headers()["Authorization"] or "public" 48 | local delay, err = lim:incoming(key, true) 49 | 50 | if not delay then 51 | if err == "rejected" then 52 | ngx.header["X-RateLimit-Limit"] = "5000" 53 | ngx.header["X-RateLimit-Remaining"] = 0 54 | return ngx.exit(503) 55 | end 56 | ngx.log(ngx.ERR, "failed to limit count: ", err) 57 | return ngx.exit(500) 58 | end 59 | 60 | -- the 2nd return value holds the current remaining number 61 | -- of requests for the specified key. 62 | local remaining = err 63 | 64 | ngx.header["X-RateLimit-Limit"] = "5000" 65 | ngx.header["X-RateLimit-Remaining"] = remaining 66 | } 67 | } 68 | } 69 | } 70 | ``` 71 | 72 | Description 73 | =========== 74 | 75 | This module provides APIs to help the OpenResty/ngx_lua user programmers limit request 76 | rate by a fixed number of requests in given time window. 77 | 78 | It is included by default in [OpenResty](https://openresty.org/) 1.13.6.1+. 79 | 80 | This Lua module's implementation is similar to [GitHub API Rate Limiting](https://developer.github.com/v3/#rate-limiting) But this Lua 81 | module is flexible in that it can be configured with different rates and window sizes. 82 | 83 | This module depends on [lua-resty-core](https://github.com/openresty/lua-resty-core), you should enable it like so: 84 | 85 | ```nginx 86 | init_by_lua_block { 87 | require "resty.core" 88 | } 89 | ``` 90 | 91 | Methods 92 | ======= 93 | 94 | [Back to TOC](#table-of-contents) 95 | 96 | new 97 | --- 98 | **syntax:** `obj, err = class.new(shdict_name, count, time_window)` 99 | 100 | Instantiates an object of this class. The `class` value is returned by the call `require "resty.limit.count"`. 101 | 102 | This method takes the following arguments: 103 | 104 | * `shdict_name` is the name of the [lua_shared_dict](https://github.com/openresty/lua-nginx-module#lua_shared_dict) shm zone. 105 | 106 | It is best practice to use separate shm zones for different kinds of limiters. 107 | 108 | * `count` is the specified number of requests threshold. 109 | 110 | * `time_window` is the time window in seconds before the request count is reset. 111 | 112 | [Back to TOC](#table-of-contents) 113 | 114 | incoming 115 | -------- 116 | **syntax:** `delay, err = obj:incoming(key, commit)` 117 | 118 | Fires a new request incoming event and calculates the delay needed (if any) for the current request 119 | upon the specified key or whether the user should reject it immediately. 120 | 121 | This method accepts the following arguments: 122 | 123 | * `key` is the user specified key to limit the rate. 124 | 125 | For example, one can use the host name (or server zone) 126 | as the key so that we limit rate per host name. Otherwise, we can also use the authorization header value as the 127 | key so that we can set a rate for individual user. 128 | 129 | Please note that this module does not prefix nor suffix the user key so it is the user's responsibility to ensure the key is unique in the `lua_shared_dict` shm zone). 130 | * `commit` is a boolean value. If set to `true`, the object will actually record the event 131 | in the shm zone backing the current object; otherwise it would just be a "dry run" (which is the default). 132 | 133 | The return values depend on the following cases: 134 | 135 | 1. If the request does not exceed the `count` value specified in the [new](#new) method, then 136 | this method returns `0` as the delay and the remaining count of allowed requests at the current time (as the 2nd return value). 137 | 138 | 2. If the request exceeds the `count` limit specified in the [new](#new) method then 139 | this method returns `nil` and the error string `"rejected"`. 140 | 141 | 3. If an error occurred (like failures when accessing the `lua_shared_dict` shm zone backing 142 | the current object), then this method returns `nil` and a string describing the error. 143 | 144 | [Back to TOC](#table-of-contents) 145 | 146 | uncommit 147 | -------- 148 | **syntax:** `remaining, err = obj:uncommit(key)` 149 | 150 | Undo the commit of the count of incoming call. This method is mainly for excluding specified requests from counting 151 | against limit like conditional requests. 152 | 153 | [Back to TOC](#table-of-contents) 154 | 155 | Limiting Granularity 156 | ==================== 157 | 158 | The limiting works on the granularity of an individual NGINX server instance (including all 159 | its worker processes). Thanks to the shm mechanism; we can share state cheaply across 160 | all the workers in a single NGINX server instance. 161 | 162 | [Back to TOC](#table-of-contents) 163 | 164 | Installation 165 | ============ 166 | 167 | Please see [library installation instructions](../../../README.md#installation). 168 | 169 | [Back to TOC](#table-of-contents) 170 | 171 | Bugs and Patches 172 | ================ 173 | 174 | Please report bugs or submit patches by 175 | 176 | 1. creating a ticket on the [GitHub Issue Tracker](https://github.com/openresty/lua-resty-limit-traffic/issues), 177 | 178 | [Back to TOC](#table-of-contents) 179 | 180 | Authors 181 | ======= 182 | 183 | * Ke Zhu 184 | * Ming Wen 185 | 186 | [Back to TOC](#table-of-contents) 187 | 188 | Copyright and License 189 | ===================== 190 | 191 | This module is licensed under the BSD license. 192 | 193 | Copyright (C) 2016-2017, by Yichun "agentzh" Zhang, OpenResty Inc. 194 | 195 | All rights reserved. 196 | 197 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 198 | 199 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 200 | 201 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 202 | 203 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 204 | 205 | [Back to TOC](#table-of-contents) 206 | 207 | See Also 208 | ======== 209 | * module [resty.limit.conn](./conn.md) 210 | * module [resty.limit.traffic](./traffic.md) 211 | * library [lua-resty-limit-traffic](../../../README.md) 212 | * the ngx_lua module: https://github.com/openresty/lua-nginx-module 213 | * OpenResty: https://openresty.org/ 214 | 215 | [Back to TOC](#table-of-contents) 216 | -------------------------------------------------------------------------------- /lib/resty/limit/traffic.md: -------------------------------------------------------------------------------- 1 | Name 2 | ==== 3 | 4 | resty.limit.traffic - Lua module for aggregating multiple instances of limiter classes 5 | 6 | Table of Contents 7 | ================= 8 | 9 | * [Name](#name) 10 | * [Synopsis](#synopsis) 11 | * [Description](#description) 12 | * [Methods](#methods) 13 | * [combine](#combine) 14 | * [Instance Sharing](#instance-sharing) 15 | * [Limiting Granularity](#limiting-granularity) 16 | * [Installation](#installation) 17 | * [Community](#community) 18 | * [English Mailing List](#english-mailing-list) 19 | * [Chinese Mailing List](#chinese-mailing-list) 20 | * [Bugs and Patches](#bugs-and-patches) 21 | * [Author](#author) 22 | * [Copyright and License](#copyright-and-license) 23 | * [See Also](#see-also) 24 | 25 | Synopsis 26 | ======== 27 | 28 | ```nginx 29 | http { 30 | lua_shared_dict my_req_store 100m; 31 | lua_shared_dict my_conn_store 100m; 32 | 33 | server { 34 | location / { 35 | access_by_lua_block { 36 | local limit_conn = require "resty.limit.conn" 37 | local limit_req = require "resty.limit.req" 38 | local limit_traffic = require "resty.limit.traffic" 39 | 40 | local lim1, err = limit_req.new("my_req_store", 300, 200) 41 | assert(lim1, err) 42 | local lim2, err = limit_req.new("my_req_store", 200, 100) 43 | assert(lim2, err) 44 | local lim3, err = limit_conn.new("my_conn_store", 1000, 1000, 0.5) 45 | assert(lim3, err) 46 | 47 | local limiters = {lim1, lim2, lim3} 48 | 49 | local host = ngx.var.host 50 | local client = ngx.var.binary_remote_addr 51 | local keys = {host, client, client} 52 | 53 | local states = {} 54 | 55 | local delay, err = limit_traffic.combine(limiters, keys, states) 56 | if not delay then 57 | if err == "rejected" then 58 | return ngx.exit(503) 59 | end 60 | ngx.log(ngx.ERR, "failed to limit traffic: ", err) 61 | return ngx.exit(500) 62 | end 63 | 64 | if lim3:is_committed() then 65 | local ctx = ngx.ctx 66 | ctx.limit_conn = lim3 67 | ctx.limit_conn_key = keys[3] 68 | end 69 | 70 | print("sleeping ", delay, " sec, states: ", table.concat(states, ", ")) 71 | 72 | if delay >= 0.001 then 73 | ngx.sleep(delay) 74 | end 75 | } 76 | 77 | # content handler goes here. if it is content_by_lua, then you can 78 | # merge the Lua code above in access_by_lua into your 79 | # content_by_lua's Lua handler to save a little bit of CPU time. 80 | 81 | log_by_lua_block { 82 | local ctx = ngx.ctx 83 | local lim = ctx.limit_conn 84 | if lim then 85 | -- if you are using an upstream module in the content phase, 86 | -- then you probably want to use $upstream_response_time 87 | -- instead of $request_time below. 88 | local latency = tonumber(ngx.var.request_time) 89 | local key = ctx.limit_conn_key 90 | assert(key) 91 | local conn, err = lim:leaving(key, latency) 92 | if not conn then 93 | ngx.log(ngx.ERR, 94 | "failed to record the connection leaving ", 95 | "request: ", err) 96 | return 97 | end 98 | end 99 | } 100 | } 101 | } 102 | } 103 | ``` 104 | 105 | Description 106 | =========== 107 | 108 | This module can combine multiple limiters at once. For example, you may want to use 109 | two request rate limiters for different keys (one for host names and one for the remote 110 | client''s IP address), as well as one limiter for concurrency level at a key of the remote 111 | client address. This module can take into account all the limiters involved without 112 | introducing any extra delays for the current request. 113 | 114 | The concrete limiters supplied can be an instance of the [resty.limit.req](./req.md) class 115 | or an instance of the [resty.limit.conn](./conn.md) class, or an instance of the [resty.limit.count](./count.md) class, or an instance of any user class 116 | which has a compatible API (see the [combine](#combine) class method for more details). 117 | 118 | Methods 119 | ======= 120 | 121 | [Back to TOC](#table-of-contents) 122 | 123 | combine 124 | ------- 125 | **syntax:** `delay, err = class.combine(limiters, keys)` 126 | 127 | **syntax:** `delay, err = class.combine(limiters, keys, states)` 128 | 129 | Combines all the concrete limiter objects and the limiting keys specified, calculates 130 | the over-all delay across all the limiters, and (optionally) records any current 131 | state information returned by each concrete limiter object (if any). 132 | 133 | This method takes the following parameters: 134 | 135 | * `limiters` is an array-shaped Lua table that holds all the concrete limiter objects 136 | (for example, instances of the [resty.limit.req](lib/resty/limit/req.md) and/or 137 | [resty.limit.conn](lib/resty/limit/conn.md) and/or 138 | [resty.limit.count](lib/resty/limit/count.md) classes or other compatible objects). 139 | 140 | The limiter object must have a method named `incoming` which takes two parameters, 141 | `key` and `commit`, just like the [resty.limit.req](lib/resty/limit/req.md) objects. 142 | In addition, this `incoming` method must return a delay and another opaque value representing 143 | the current state (or a string describing the error when the first return value is `nil`). 144 | 145 | In addition, the limiter object should also take a method named `uncommit` which can be 146 | used to undo whatever is committed in the `incoming` method call (approximately if not possible to do precisely). 147 | * `keys` is an array-shaped Lua table that holds all the user keys corresponding to each of the 148 | concrete limiter object specified in the (previous) `limiters` parameter. The number of elements 149 | in this table must equate that of the `limiters` table. 150 | * `states` is an optional user-supplied Lua table that can be used to output all the 151 | state information returned by each of the concrete limiter object. 152 | 153 | For example, instances 154 | of the [resty.limit.req](lib/resty/limit/req.md) class return the current number of excessive 155 | requests per second (if exceeding the rate threshold) while instances of the [resty.limit.conn](lib/resty/conn.md) class return the current concurrency level. 156 | 157 | When missing or set to `nil`, this method does not bother outputting any state information. 158 | 159 | This method returns the delay in seconds (the caller should sleep before processing 160 | the current request) across all the concrete limiter objects specified upon each 161 | of the corresponding limiting keys (under the hood, the delay is just the maximum of all the delays dictated by the limiters). 162 | 163 | If any of the limiters reject the current request immediately, then this method ensure 164 | the current request incoming event is not committed in any of these concrete limiters. 165 | In this case, this method returns `nil` and the error string `"rejected"`. 166 | 167 | In case of other errors, it returns `nil` and a string describing the error. 168 | 169 | Like each of concrete limiter objects, this method never sleeps itself. It simply returns a delay if necessary and requires the caller 170 | to later invoke the [ngx.sleep](https://github.com/openresty/lua-nginx-module#ngxsleep) 171 | method to sleep. 172 | 173 | [Back to TOC](#table-of-contents) 174 | 175 | Instance Sharing 176 | ================ 177 | 178 | This class itself carries no state information at all. 179 | The states are stored in each of the concrete limiter objects. Thus, as long as 180 | all those user-supplied concrete limiters support [worker-level sharing](https://github.com/openresty/lua-nginx-module#data-sharing-within-an-nginx-worker), 181 | this class does. 182 | 183 | [Back to TOC](#table-of-contents) 184 | 185 | Limiting Granularity 186 | ==================== 187 | 188 | All the concrete limiter objects must follow the same granularity (usually being the 189 | NGINX server instance level, across all its worker processes). 190 | 191 | Unmatched limiting granularity can cause unexpected results (which cannot happen if you 192 | limit yourself to the concrete limiter classes provided by this library, which is always 193 | on the NGINX server instance level). 194 | 195 | [Back to TOC](#table-of-contents) 196 | 197 | Installation 198 | ============ 199 | 200 | Please see [library installation instructions](../../../README.md#installation). 201 | 202 | [Back to TOC](#table-of-contents) 203 | 204 | Community 205 | ========= 206 | 207 | [Back to TOC](#table-of-contents) 208 | 209 | English Mailing List 210 | -------------------- 211 | 212 | The [openresty-en](https://groups.google.com/group/openresty-en) mailing list is for English speakers. 213 | 214 | [Back to TOC](#table-of-contents) 215 | 216 | Chinese Mailing List 217 | -------------------- 218 | 219 | The [openresty](https://groups.google.com/group/openresty) mailing list is for Chinese speakers. 220 | 221 | [Back to TOC](#table-of-contents) 222 | 223 | Bugs and Patches 224 | ================ 225 | 226 | Please report bugs or submit patches by 227 | 228 | 1. creating a ticket on the [GitHub Issue Tracker](https://github.com/openresty/lua-resty-limit-traffic/issues), 229 | 1. or posting to the [OpenResty community](#community). 230 | 231 | [Back to TOC](#table-of-contents) 232 | 233 | Author 234 | ====== 235 | 236 | Yichun "agentzh" Zhang (章亦春) , CloudFlare Inc. 237 | 238 | [Back to TOC](#table-of-contents) 239 | 240 | Copyright and License 241 | ===================== 242 | 243 | This module is licensed under the BSD license. 244 | 245 | Copyright (C) 2015-2016, by Yichun "agentzh" Zhang, CloudFlare Inc. 246 | 247 | All rights reserved. 248 | 249 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 250 | 251 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 252 | 253 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 254 | 255 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 256 | 257 | [Back to TOC](#table-of-contents) 258 | 259 | See Also 260 | ======== 261 | * module [resty.limit.req](./req.md) 262 | * module [resty.limit.conn](./conn.md) 263 | * module [resty.limit.count](./count.md) 264 | * library [lua-resty-limit-traffic](../../../README.md) 265 | * the ngx_lua module: https://github.com/openresty/lua-nginx-module 266 | * OpenResty: https://openresty.org/ 267 | 268 | [Back to TOC](#table-of-contents) 269 | 270 | -------------------------------------------------------------------------------- /t/traffic.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | 3 | use Test::Nginx::Socket::Lua; 4 | use Cwd qw(cwd); 5 | 6 | repeat_each(2); 7 | 8 | plan tests => repeat_each() * (blocks() * 4); 9 | 10 | #no_diff(); 11 | #no_long_string(); 12 | 13 | my $pwd = cwd(); 14 | 15 | our $HttpConfig = <<_EOC_; 16 | lua_package_path "$pwd/../lua-resty-core/lib/?.lua;../lua-resty-lrucache/lib/?.lua;$pwd/lib/?.lua;;"; 17 | init_by_lua_block { 18 | local v = require "jit.v" 19 | -- v.on("/tmp/a.dump") 20 | require "resty.core" 21 | } 22 | _EOC_ 23 | 24 | no_long_string(); 25 | run_tests(); 26 | 27 | __DATA__ 28 | 29 | === TEST 1: sanity (output states) 30 | --- http_config eval 31 | " 32 | $::HttpConfig 33 | 34 | lua_shared_dict req 1m; 35 | lua_shared_dict conn 1m; 36 | lua_shared_dict count 1m; 37 | " 38 | --- config 39 | location = /t { 40 | content_by_lua_block { 41 | local limit_conn = require "resty.limit.conn" 42 | local limit_req = require "resty.limit.req" 43 | local limit_count = require "resty.limit.count" 44 | local limit_traffic = require "resty.limit.traffic" 45 | 46 | local lim1 = limit_req.new("req", 3, 2) 47 | local lim2 = limit_req.new("req", 2, 3) 48 | local lim3 = limit_conn.new("conn", 4, 1, 2) 49 | local lim4 = limit_count.new("count", 10, 100) 50 | 51 | local limiters = {lim1, lim2, lim3, lim4} 52 | 53 | ngx.shared.req:flush_all() 54 | ngx.shared.conn:flush_all() 55 | ngx.shared.count:flush_all() 56 | 57 | local keys = {"foo", "bar", "foo", "bar"} 58 | local states = {} 59 | for i = 1, 6 do 60 | local delay, err = limit_traffic.combine(limiters, keys, states) 61 | if not delay then 62 | ngx.say("failed to limit traffic: ", err) 63 | else 64 | ngx.say(i, ": ", delay, 65 | ", conn committed: ", lim3:is_committed(), 66 | ", states: ", table.concat(states, ", ")) 67 | end 68 | if i == 4 then 69 | ngx.sleep(1) 70 | end 71 | end 72 | } 73 | } 74 | --- request 75 | GET /t 76 | --- response_body_like eval 77 | qr/^1: 0, conn committed: true, states: 0, 0, 1, 9 78 | 2: 0\.5, conn committed: true, states: 1, 1, 2, 8 79 | 3: 1, conn committed: true, states: 2, 2, 3, 7 80 | failed to limit traffic: rejected 81 | 5: 0\.(?:4[6-9]|5|5[0-4])\d*, conn committed: true, states: 0(?:\.0\d+)?, (?:1|1\.0[0-4]\d*|0\.9[6-9]\d*), 4, 6 82 | 6: 2, conn committed: true, states: 1(?:\.0\d+)?, (?:2|2\.0[0-4]\d*|1\.9[6-9]\d*), 5, 5 83 | $/s 84 | --- no_error_log 85 | [error] 86 | [lua] 87 | 88 | 89 | 90 | === TEST 2: sanity (no output states) 91 | --- http_config eval 92 | " 93 | $::HttpConfig 94 | 95 | lua_shared_dict req 1m; 96 | lua_shared_dict conn 1m; 97 | lua_shared_dict count 1m; 98 | " 99 | --- config 100 | location = /t { 101 | content_by_lua_block { 102 | local limit_conn = require "resty.limit.conn" 103 | local limit_req = require "resty.limit.req" 104 | local limit_count = require "resty.limit.count" 105 | local limit_traffic = require "resty.limit.traffic" 106 | 107 | local lim1 = limit_req.new("req", 3, 2) 108 | local lim2 = limit_req.new("req", 2, 3) 109 | local lim3 = limit_conn.new("conn", 4, 1, 2) 110 | local lim4 = limit_count.new("count", 10, 100) 111 | 112 | local limiters = {lim1, lim2, lim3, lim4} 113 | 114 | ngx.shared.req:flush_all() 115 | ngx.shared.conn:flush_all() 116 | ngx.shared.count:flush_all() 117 | 118 | local keys = {"foo", "bar", "foo", "bar"} 119 | for i = 1, 6 do 120 | local delay, err = limit_traffic.combine(limiters, keys) 121 | if not delay then 122 | ngx.say("failed to limit traffic: ", err) 123 | else 124 | ngx.say(i, ": ", delay, 125 | ", conn committed: ", lim3:is_committed()) 126 | end 127 | if i == 4 then 128 | ngx.sleep(1) 129 | end 130 | end 131 | } 132 | } 133 | --- request 134 | GET /t 135 | --- response_body_like eval 136 | qr/^1: 0, conn committed: true 137 | 2: 0\.5, conn committed: true 138 | 3: 1, conn committed: true 139 | failed to limit traffic: rejected 140 | 5: 0\.(?:4[6-9]|5|5[0-4])\d*, conn committed: true 141 | 6: 2, conn committed: true 142 | $/s 143 | --- no_error_log 144 | [error] 145 | [lua] 146 | 147 | 148 | 149 | === TEST 3: block by limit-count (output states) 150 | --- http_config eval 151 | " 152 | $::HttpConfig 153 | 154 | lua_shared_dict req 1m; 155 | lua_shared_dict conn 1m; 156 | lua_shared_dict count 1m; 157 | " 158 | --- config 159 | location = /t { 160 | content_by_lua_block { 161 | local limit_conn = require "resty.limit.conn" 162 | local limit_req = require "resty.limit.req" 163 | local limit_count = require "resty.limit.count" 164 | local limit_traffic = require "resty.limit.traffic" 165 | 166 | local lim1 = limit_req.new("req", 3, 2) 167 | local lim2 = limit_req.new("req", 2, 3) 168 | local lim3 = limit_conn.new("conn", 4, 1, 2) 169 | local lim4 = limit_count.new("count", 2, 100) 170 | 171 | local limiters = {lim1, lim2, lim3, lim4} 172 | 173 | ngx.shared.req:flush_all() 174 | ngx.shared.conn:flush_all() 175 | ngx.shared.count:flush_all() 176 | 177 | local keys = {"foo", "bar", "foo", "bar"} 178 | local states = {} 179 | for i = 1, 6 do 180 | local delay, err = limit_traffic.combine(limiters, keys, states) 181 | if not delay then 182 | ngx.say("failed to limit traffic: ", err) 183 | ngx.say("states: ", table.concat(states, ", ")) 184 | else 185 | ngx.say(i, ": ", delay, 186 | ", conn committed: ", lim3:is_committed(), 187 | ", states: ", table.concat(states, ", ")) 188 | end 189 | end 190 | } 191 | } 192 | --- request 193 | GET /t 194 | --- response_body_like eval 195 | qr/^1: 0, conn committed: true, states: 0, 0, 1, 1 196 | 2: 0\.5, conn committed: true, states: 1, 1, 2, 0 197 | failed to limit traffic: rejected 198 | states: 1, 1, 2, 0 199 | failed to limit traffic: rejected 200 | states: 1, 1, 2, 0 201 | failed to limit traffic: rejected 202 | states: 1, 1, 2, 0 203 | failed to limit traffic: rejected 204 | states: 1, 1, 2, 0 205 | $/s 206 | --- no_error_log 207 | [error] 208 | [lua] 209 | 210 | 211 | 212 | === TEST 4: sanity (uncommit() previous limiters if a limiter rejects while committing a state) 213 | --- http_config eval: $::HttpConfig 214 | --- config 215 | location = /t { 216 | content_by_lua_block { 217 | local limit_traffic = require "resty.limit.traffic" 218 | 219 | local limit_mock = {} 220 | limit_mock.__index = limit_mock 221 | 222 | function limit_mock.new(_, _, reject_on_commit) 223 | return setmetatable({ 224 | counters = {}, 225 | reject_on_commit = reject_on_commit, 226 | }, limit_mock) 227 | end 228 | 229 | function limit_mock:incoming(key, commit) 230 | local count = self.counters[key] or 0 231 | 232 | count = count + 1 233 | 234 | if commit then 235 | self.counters[key] = count 236 | 237 | if self.reject_on_commit then 238 | return nil, "rejected by mock limiter" 239 | end 240 | end 241 | 242 | return count 243 | end 244 | 245 | function limit_mock:uncommit(key) 246 | local count = self.counters[key] or 0 247 | if count > 0 then 248 | count = count - 1 249 | end 250 | 251 | self.counters[key] = count 252 | end 253 | 254 | local lim1 = limit_mock.new(nil, 2) 255 | local lim2 = limit_mock.new(nil, 2) 256 | local lim3 = limit_mock.new(nil, 2, true) 257 | local lim4 = limit_mock.new(nil, 2) 258 | 259 | local limiters = {lim1, lim2, lim3, lim4} 260 | 261 | local keys = {"foo", "bar", "baz", "bat"} 262 | 263 | local delay, err = limit_traffic.combine(limiters, keys) 264 | if not delay then 265 | ngx.say(err) 266 | end 267 | 268 | ngx.say("state lim1: ", lim1:incoming(keys[1])) -- should be 1 because previous combine() call was uncommitted 269 | ngx.say("state lim2: ", lim2:incoming(keys[2])) -- should be 1 because previous combine() call was uncommitted 270 | ngx.say("state lim3: ", lim3:incoming(keys[3])) 271 | } 272 | } 273 | --- request 274 | GET /t 275 | --- response_body 276 | rejected by mock limiter 277 | state lim1: 1 278 | state lim2: 1 279 | state lim3: 2 280 | --- no_error_log 281 | [error] 282 | [lua] 283 | 284 | 285 | 286 | === TEST 5: sanity (uncommit() the previous limiters and the last limiter if a limiter rejects while committing a state) 287 | --- http_config eval: $::HttpConfig 288 | --- config 289 | location = /t { 290 | content_by_lua_block { 291 | local limit_traffic = require "resty.limit.traffic" 292 | 293 | local limit_mock = {} 294 | limit_mock.__index = limit_mock 295 | 296 | function limit_mock.new(_, _, reject_on_commit) 297 | return setmetatable({ 298 | counters = {}, 299 | reject_on_commit = reject_on_commit, 300 | }, limit_mock) 301 | end 302 | 303 | function limit_mock:incoming(key, commit) 304 | local count = self.counters[key] or 0 305 | 306 | count = count + 1 307 | 308 | if commit then 309 | self.counters[key] = count 310 | 311 | if self.reject_on_commit then 312 | return nil, "rejected by mock limiter" 313 | end 314 | end 315 | 316 | return count 317 | end 318 | 319 | function limit_mock:uncommit(key) 320 | local count = self.counters[key] or 0 321 | if count > 0 then 322 | count = count - 1 323 | end 324 | 325 | self.counters[key] = count 326 | end 327 | 328 | local lim1 = limit_mock.new(nil, 2) 329 | local lim2 = limit_mock.new(nil, 2, true) 330 | local lim3 = limit_mock.new(nil, 2) 331 | local lim4 = limit_mock.new(nil, 2) 332 | 333 | local limiters = {lim1, lim2, lim3, lim4} 334 | 335 | local keys = {"foo", "bar", "baz", "bat"} 336 | 337 | local delay, err = limit_traffic.combine(limiters, keys) 338 | if not delay then 339 | ngx.say(err) 340 | end 341 | 342 | ngx.say("state lim1: ", lim1:incoming(keys[1])) -- should be 1 because previous combine() call was uncommitted 343 | ngx.say("state lim2: ", lim2:incoming(keys[2])) 344 | ngx.say("state lim3: ", lim3:incoming(keys[3])) -- should be 1 because previous combine() call was uncommitted 345 | ngx.say("state lim4: ", lim4:incoming(keys[4])) -- should be 1 because previous combine() call was uncommitted 346 | } 347 | } 348 | --- request 349 | GET /t 350 | --- response_body 351 | rejected by mock limiter 352 | state lim1: 1 353 | state lim2: 2 354 | state lim3: 1 355 | state lim4: 1 356 | --- no_error_log 357 | [error] 358 | [lua] 359 | -------------------------------------------------------------------------------- /lib/resty/limit/req.md: -------------------------------------------------------------------------------- 1 | Name 2 | ==== 3 | 4 | resty.limit.req - Lua module for limiting request rate for OpenResty/ngx_lua. 5 | 6 | Table of Contents 7 | ================= 8 | 9 | * [Name](#name) 10 | * [Synopsis](#synopsis) 11 | * [Description](#description) 12 | * [Methods](#methods) 13 | * [new](#new) 14 | * [incoming](#incoming) 15 | * [set_rate](#set_rate) 16 | * [set_burst](#set_burst) 17 | * [uncommit](#uncommit) 18 | * [Instance Sharing](#instance-sharing) 19 | * [Limiting Granularity](#limiting-granularity) 20 | * [Installation](#installation) 21 | * [Community](#community) 22 | * [English Mailing List](#english-mailing-list) 23 | * [Chinese Mailing List](#chinese-mailing-list) 24 | * [Bugs and Patches](#bugs-and-patches) 25 | * [Author](#author) 26 | * [Copyright and License](#copyright-and-license) 27 | * [See Also](#see-also) 28 | 29 | Synopsis 30 | ======== 31 | 32 | ```nginx 33 | # demonstrate the usage of the resty.limit.req module (alone!) 34 | http { 35 | lua_shared_dict my_limit_req_store 100m; 36 | 37 | server { 38 | location / { 39 | access_by_lua_block { 40 | -- well, we could put the require() and new() calls in our own Lua 41 | -- modules to save overhead. here we put them below just for 42 | -- convenience. 43 | 44 | local limit_req = require "resty.limit.req" 45 | 46 | -- limit the requests under 200 req/sec with a burst of 100 req/sec, 47 | -- that is, we delay requests under 300 req/sec and above 200 48 | -- req/sec, and reject any requests exceeding 300 req/sec. 49 | local lim, err = limit_req.new("my_limit_req_store", 200, 100) 50 | if not lim then 51 | ngx.log(ngx.ERR, 52 | "failed to instantiate a resty.limit.req object: ", err) 53 | return ngx.exit(500) 54 | end 55 | 56 | -- the following call must be per-request. 57 | -- here we use the remote (IP) address as the limiting key 58 | local key = ngx.var.binary_remote_addr 59 | local delay, err = lim:incoming(key, true) 60 | if not delay then 61 | if err == "rejected" then 62 | return ngx.exit(503) 63 | end 64 | ngx.log(ngx.ERR, "failed to limit req: ", err) 65 | return ngx.exit(500) 66 | end 67 | 68 | if delay >= 0.001 then 69 | -- the 2nd return value holds the number of excess requests 70 | -- per second for the specified key. for example, number 31 71 | -- means the current request rate is at 231 req/sec for the 72 | -- specified key. 73 | local excess = err 74 | 75 | -- the request exceeding the 200 req/sec but below 300 req/sec, 76 | -- so we intentionally delay it here a bit to conform to the 77 | -- 200 req/sec rate. 78 | ngx.sleep(delay) 79 | end 80 | } 81 | 82 | # content handler goes here. if it is content_by_lua, then you can 83 | # merge the Lua code above in access_by_lua into your content_by_lua's 84 | # Lua handler to save a little bit of CPU time. 85 | } 86 | } 87 | } 88 | ``` 89 | 90 | Description 91 | =========== 92 | 93 | This module provides APIs to help the OpenResty/ngx_lua user programmers limit request 94 | rate using the "leaky bucket" method. 95 | 96 | If you want to use multiple different instances of this class at once or use one instance 97 | of this class with instances of other classes (like [resty.limit.conn](./conn.md)), 98 | then you *must* use the [resty.limit.traffic](./traffic.md) module to combine them. 99 | 100 | This Lua module's implementation is similar to NGINX's standard module 101 | [ngx_limit_req](http://nginx.org/en/docs/http/ngx_http_limit_req_module.html). But this Lua 102 | module is more flexible in that it can be used in almost arbitrary contexts. 103 | 104 | Methods 105 | ======= 106 | 107 | [Back to TOC](#table-of-contents) 108 | 109 | new 110 | --- 111 | **syntax:** `obj, err = class.new(shdict_name, rate, burst)` 112 | 113 | Instantiates an object of this class. The `class` value is returned by the call `require "resty.limit.req"`. 114 | 115 | This method takes the following arguments: 116 | 117 | * `shdict_name` is the name of the [lua_shared_dict](https://github.com/openresty/lua-nginx-module#lua_shared_dict) shm zone. 118 | 119 | It is best practice to use separate shm zones for different kinds of limiters. 120 | * `rate` is the specified request rate (number per second) threshold. 121 | 122 | Requests exceeding this rate (and below `burst`) will get delayed to conform to the rate. 123 | * `burst` is the number of excessive requests per second allowed to be delayed. 124 | 125 | Requests exceeding this hard limit 126 | will get rejected immediately. 127 | 128 | On failure, this method returns `nil` and a string describing the error (like a bad `lua_shared_dict` name). 129 | 130 | [Back to TOC](#table-of-contents) 131 | 132 | incoming 133 | -------- 134 | **syntax:** `delay, err = obj:incoming(key, commit)` 135 | 136 | Fires a new request incoming event and calculates the delay needed (if any) for the current request 137 | upon the specified key or whether the user should reject it immediately. 138 | 139 | This method accepts the following arguments: 140 | 141 | * `key` is the user specified key to limit the rate. 142 | 143 | For example, one can use the host name (or server zone) 144 | as the key so that we limit rate per host name. Otherwise, we can also use the client address as the 145 | key so that we can avoid a single client from flooding our service. 146 | 147 | Please note that this module 148 | does not prefix nor suffix the user key so it is the user's responsibility to ensure the key 149 | is unique in the `lua_shared_dict` shm zone). 150 | * `commit` is a boolean value. If set to `true`, the object will actually record the event 151 | in the shm zone backing the current object; otherwise it would just be a "dry run" (which is the default). 152 | 153 | The return values depend on the following cases: 154 | 155 | 1. If the request does not exceed the `rate` value specified in the [new](#new) method, then 156 | this method returns `0` as the delay and the (zero) number of excessive requests per second at 157 | the current time. 158 | 2. If the request exceeds the `rate` limit specified in the [new](#new) method but not 159 | the `rate` + `burst` value, then 160 | this method returns a proper delay (in seconds) for the current request so that it still conform to 161 | the `rate` threshold as if it came a bit later rather than now. 162 | 163 | In addition, this method 164 | also returns a second return value indicating the number of excessive requests per second 165 | at this point (including the current request). This 2nd return value can be used to monitor the 166 | unadjusted incoming request rate. 167 | 3. If the request exceeds the `rate` + `burst` limit, then this method returns `nil` and 168 | the error string `"rejected"`. 169 | 4. If an error occurred (like failures when accessing the `lua_shared_dict` shm zone backing 170 | the current object), then this method returns `nil` and a string describing the error. 171 | 172 | This method never sleeps itself. It simply returns a delay if necessary and requires the caller 173 | to later invoke the [ngx.sleep](https://github.com/openresty/lua-nginx-module#ngxsleep) 174 | method to sleep. 175 | 176 | [Back to TOC](#table-of-contents) 177 | 178 | set_rate 179 | -------- 180 | **syntax:** `obj:set_rate(rate)` 181 | 182 | Overwrites the `rate` threshold as specified in the [new](#new) method. 183 | 184 | [Back to TOC](#table-of-contents) 185 | 186 | set_burst 187 | --------- 188 | **syntax:** `obj:set_burst(burst)` 189 | 190 | Overwrites the `burst` threshold as specified in the [new](#new) method. 191 | 192 | [Back to TOC](#table-of-contents) 193 | 194 | uncommit 195 | -------- 196 | **syntax:** `ok, err = obj:uncommit(key)` 197 | 198 | This tries to undo the commit of the `incoming` call. This is simply an approximation 199 | and should be used with care. This method is mainly for being used in the [resty.limit.traffic](./traffic.md) 200 | Lua module when combining multiple limiters at the same time. 201 | 202 | [Back to TOC](#table-of-contents) 203 | 204 | Instance Sharing 205 | ================ 206 | 207 | Each instance of this class carries no state information but the `rate` and `burst` 208 | threshold values. The real limiting states based on keys are stored in the `lua_shared_dict` 209 | shm zone specified in the [new](#new) method. So it is safe to share instances of 210 | this class [on the nginx worker process level](https://github.com/openresty/lua-nginx-module#data-sharing-within-an-nginx-worker) 211 | as long as the combination of `rate` and `burst` do not change. 212 | 213 | Even if the `rate` and `burst` 214 | combination *does* change, one can still share a single instance as long as he always 215 | calls the [set_rate](#set_rate) and/or [set_burst](#set_burst) methods *right before* 216 | the [incoming](#incoming) call. 217 | 218 | [Back to TOC](#table-of-contents) 219 | 220 | Limiting Granularity 221 | ==================== 222 | 223 | The limiting works on the granularity of an individual NGINX server instance (including all 224 | its worker processes). Thanks to the shm mechanism; we can share state cheaply across 225 | all the workers in a single NGINX server instance. 226 | 227 | If you are running multiple NGINX server instances (like running multiple boxes), then 228 | you need to ensure that the incoming traffic is (more or less) evenly distributed across 229 | all the different NGINX server instances (or boxes). So if you want a limit rate of N req/sec 230 | across all the servers, then you just need to specify a limit of `N/n` req/sec in each server's configuration. This simple strategy can save all the (big) overhead of sharing a global state across 231 | machine boundaries. 232 | 233 | [Back to TOC](#table-of-contents) 234 | 235 | Installation 236 | ============ 237 | 238 | Please see [library installation instructions](../../../README.md#installation). 239 | 240 | [Back to TOC](#table-of-contents) 241 | 242 | Community 243 | ========= 244 | 245 | [Back to TOC](#table-of-contents) 246 | 247 | English Mailing List 248 | -------------------- 249 | 250 | The [openresty-en](https://groups.google.com/group/openresty-en) mailing list is for English speakers. 251 | 252 | [Back to TOC](#table-of-contents) 253 | 254 | Chinese Mailing List 255 | -------------------- 256 | 257 | The [openresty](https://groups.google.com/group/openresty) mailing list is for Chinese speakers. 258 | 259 | [Back to TOC](#table-of-contents) 260 | 261 | Bugs and Patches 262 | ================ 263 | 264 | Please report bugs or submit patches by 265 | 266 | 1. creating a ticket on the [GitHub Issue Tracker](https://github.com/openresty/lua-resty-limit-traffic/issues), 267 | 1. or posting to the [OpenResty community](#community). 268 | 269 | [Back to TOC](#table-of-contents) 270 | 271 | Author 272 | ====== 273 | 274 | Yichun "agentzh" Zhang (章亦春) , CloudFlare Inc. 275 | 276 | [Back to TOC](#table-of-contents) 277 | 278 | Copyright and License 279 | ===================== 280 | 281 | This module is licensed under the BSD license. 282 | 283 | Copyright (C) 2015-2016, by Yichun "agentzh" Zhang, CloudFlare Inc. 284 | 285 | All rights reserved. 286 | 287 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 288 | 289 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 290 | 291 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 292 | 293 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 294 | 295 | [Back to TOC](#table-of-contents) 296 | 297 | See Also 298 | ======== 299 | * module [resty.limit.conn](./conn.md) 300 | * module [resty.limit.count](./count.md) 301 | * module [resty.limit.traffic](./traffic.md) 302 | * library [lua-resty-limit-traffic](../../../README.md) 303 | * the ngx_lua module: https://github.com/openresty/lua-nginx-module 304 | * OpenResty: https://openresty.org/ 305 | 306 | [Back to TOC](#table-of-contents) 307 | 308 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Name 2 | ==== 3 | 4 | lua-resty-limit-traffic - Lua library for limiting and controlling traffic in OpenResty/ngx_lua 5 | 6 | Table of Contents 7 | ================= 8 | 9 | * [Name](#name) 10 | * [Status](#status) 11 | * [Synopsis](#synopsis) 12 | * [Description](#description) 13 | * [Installation](#installation) 14 | * [Community](#community) 15 | * [English Mailing List](#english-mailing-list) 16 | * [Chinese Mailing List](#chinese-mailing-list) 17 | * [Bugs and Patches](#bugs-and-patches) 18 | * [Author](#author) 19 | * [Copyright and License](#copyright-and-license) 20 | * [See Also](#see-also) 21 | 22 | Status 23 | ====== 24 | 25 | This library is already usable though still highly experimental. 26 | 27 | The Lua API is still in flux and may change in the near future without notice. 28 | 29 | Synopsis 30 | ======== 31 | 32 | ```nginx 33 | # demonstrate the usage of the resty.limit.req module (alone!) 34 | http { 35 | lua_shared_dict my_limit_req_store 100m; 36 | 37 | server { 38 | location / { 39 | access_by_lua_block { 40 | -- well, we could put the require() and new() calls in our own Lua 41 | -- modules to save overhead. here we put them below just for 42 | -- convenience. 43 | 44 | local limit_req = require "resty.limit.req" 45 | 46 | -- limit the requests under 200 req/sec with a burst of 100 req/sec, 47 | -- that is, we delay requests under 300 req/sec and above 200 48 | -- req/sec, and reject any requests exceeding 300 req/sec. 49 | local lim, err = limit_req.new("my_limit_req_store", 200, 100) 50 | if not lim then 51 | ngx.log(ngx.ERR, 52 | "failed to instantiate a resty.limit.req object: ", err) 53 | return ngx.exit(500) 54 | end 55 | 56 | -- the following call must be per-request. 57 | -- here we use the remote (IP) address as the limiting key 58 | local key = ngx.var.binary_remote_addr 59 | local delay, err = lim:incoming(key, true) 60 | if not delay then 61 | if err == "rejected" then 62 | return ngx.exit(503) 63 | end 64 | ngx.log(ngx.ERR, "failed to limit req: ", err) 65 | return ngx.exit(500) 66 | end 67 | 68 | if delay >= 0.001 then 69 | -- the 2nd return value holds the number of excess requests 70 | -- per second for the specified key. for example, number 31 71 | -- means the current request rate is at 231 req/sec for the 72 | -- specified key. 73 | local excess = err 74 | 75 | -- the request exceeding the 200 req/sec but below 300 req/sec, 76 | -- so we intentionally delay it here a bit to conform to the 77 | -- 200 req/sec rate. 78 | ngx.sleep(delay) 79 | end 80 | } 81 | 82 | # content handler goes here. if it is content_by_lua, then you can 83 | # merge the Lua code above in access_by_lua into your content_by_lua's 84 | # Lua handler to save a little bit of CPU time. 85 | } 86 | } 87 | } 88 | ``` 89 | 90 | ```nginx 91 | # demonstrate the usage of the resty.limit.conn module (alone!) 92 | http { 93 | lua_shared_dict my_limit_conn_store 100m; 94 | 95 | server { 96 | location / { 97 | access_by_lua_block { 98 | -- well, we could put the require() and new() calls in our own Lua 99 | -- modules to save overhead. here we put them below just for 100 | -- convenience. 101 | 102 | local limit_conn = require "resty.limit.conn" 103 | 104 | -- limit the requests under 200 concurrent requests (normally just 105 | -- incoming connections unless protocols like SPDY is used) with 106 | -- a burst of 100 extra concurrent requests, that is, we delay 107 | -- requests under 300 concurrent connections and above 200 108 | -- connections, and reject any new requests exceeding 300 109 | -- connections. 110 | -- also, we assume a default request time of 0.5 sec, which can be 111 | -- dynamically adjusted by the leaving() call in log_by_lua below. 112 | local lim, err = limit_conn.new("my_limit_conn_store", 200, 100, 0.5) 113 | if not lim then 114 | ngx.log(ngx.ERR, 115 | "failed to instantiate a resty.limit.conn object: ", err) 116 | return ngx.exit(500) 117 | end 118 | 119 | -- the following call must be per-request. 120 | -- here we use the remote (IP) address as the limiting key 121 | local key = ngx.var.binary_remote_addr 122 | local delay, err = lim:incoming(key, true) 123 | if not delay then 124 | if err == "rejected" then 125 | return ngx.exit(503) 126 | end 127 | ngx.log(ngx.ERR, "failed to limit req: ", err) 128 | return ngx.exit(500) 129 | end 130 | 131 | if lim:is_committed() then 132 | local ctx = ngx.ctx 133 | ctx.limit_conn = lim 134 | ctx.limit_conn_key = key 135 | ctx.limit_conn_delay = delay 136 | end 137 | 138 | -- the 2nd return value holds the current concurrency level 139 | -- for the specified key. 140 | local conn = err 141 | 142 | if delay >= 0.001 then 143 | -- the request exceeding the 200 connections ratio but below 144 | -- 300 connections, so 145 | -- we intentionally delay it here a bit to conform to the 146 | -- 200 connection limit. 147 | -- ngx.log(ngx.WARN, "delaying") 148 | ngx.sleep(delay) 149 | end 150 | } 151 | 152 | # content handler goes here. if it is content_by_lua, then you can 153 | # merge the Lua code above in access_by_lua into your 154 | # content_by_lua's Lua handler to save a little bit of CPU time. 155 | 156 | log_by_lua_block { 157 | local ctx = ngx.ctx 158 | local lim = ctx.limit_conn 159 | if lim then 160 | -- if you are using an upstream module in the content phase, 161 | -- then you probably want to use $upstream_response_time 162 | -- instead of ($request_time - ctx.limit_conn_delay) below. 163 | local latency = tonumber(ngx.var.request_time) - ctx.limit_conn_delay 164 | local key = ctx.limit_conn_key 165 | assert(key) 166 | local conn, err = lim:leaving(key, latency) 167 | if not conn then 168 | ngx.log(ngx.ERR, 169 | "failed to record the connection leaving ", 170 | "request: ", err) 171 | return 172 | end 173 | end 174 | } 175 | } 176 | } 177 | } 178 | ``` 179 | 180 | ```nginx 181 | # demonstrate the usage of the resty.limit.traffic module 182 | http { 183 | lua_shared_dict my_req_store 100m; 184 | lua_shared_dict my_conn_store 100m; 185 | 186 | server { 187 | location / { 188 | access_by_lua_block { 189 | local limit_conn = require "resty.limit.conn" 190 | local limit_req = require "resty.limit.req" 191 | local limit_traffic = require "resty.limit.traffic" 192 | 193 | local lim1, err = limit_req.new("my_req_store", 300, 200) 194 | assert(lim1, err) 195 | local lim2, err = limit_req.new("my_req_store", 200, 100) 196 | assert(lim2, err) 197 | local lim3, err = limit_conn.new("my_conn_store", 1000, 1000, 0.5) 198 | assert(lim3, err) 199 | 200 | local limiters = {lim1, lim2, lim3} 201 | 202 | local host = ngx.var.host 203 | local client = ngx.var.binary_remote_addr 204 | local keys = {host, client, client} 205 | 206 | local states = {} 207 | 208 | local delay, err = limit_traffic.combine(limiters, keys, states) 209 | if not delay then 210 | if err == "rejected" then 211 | return ngx.exit(503) 212 | end 213 | ngx.log(ngx.ERR, "failed to limit traffic: ", err) 214 | return ngx.exit(500) 215 | end 216 | 217 | if lim3:is_committed() then 218 | local ctx = ngx.ctx 219 | ctx.limit_conn = lim3 220 | ctx.limit_conn_key = keys[3] 221 | end 222 | 223 | print("sleeping ", delay, " sec, states: ", 224 | table.concat(states, ", ")) 225 | 226 | if delay >= 0.001 then 227 | ngx.sleep(delay) 228 | end 229 | } 230 | 231 | # content handler goes here. if it is content_by_lua, then you can 232 | # merge the Lua code above in access_by_lua into your 233 | # content_by_lua's Lua handler to save a little bit of CPU time. 234 | 235 | log_by_lua_block { 236 | local ctx = ngx.ctx 237 | local lim = ctx.limit_conn 238 | if lim then 239 | -- if you are using an upstream module in the content phase, 240 | -- then you probably want to use $upstream_response_time 241 | -- instead of $request_time below. 242 | local latency = tonumber(ngx.var.request_time) 243 | local key = ctx.limit_conn_key 244 | assert(key) 245 | local conn, err = lim:leaving(key, latency) 246 | if not conn then 247 | ngx.log(ngx.ERR, 248 | "failed to record the connection leaving ", 249 | "request: ", err) 250 | return 251 | end 252 | end 253 | } 254 | } 255 | } 256 | } 257 | ``` 258 | 259 | Description 260 | =========== 261 | 262 | This library provides several Lua modules to help OpenResty/ngx_lua users to control and 263 | limit 264 | the traffic, either request rate or request concurrency (or both). 265 | 266 | * [resty.limit.req](lib/resty/limit/req.md) provides request rate limiting and adjustment based on the "leaky bucket" method. 267 | * [resty.limit.count](lib/resty/limit/count.md) provides rate limiting based on a "fixed window" implementation since OpenResty 1.13.6.1+. 268 | * [resty.limit.conn](lib/resty/limit/conn.md) provides request concurrency level limiting and adjustment based on extra delays. 269 | * [resty.limit.traffic](lib/resty/limit/traffic.md) provides an aggregator to combine multiple instances of the [resty.limit.req](lib/resty/limit/req.md), [resty.limit.count](lib/resty/limit/count.md), or [resty.limit.conn](lib/resty/limit/conn.md) classes (or all). 270 | 271 | Please check out these Lua modules' own documentation for more details. 272 | 273 | This library provides more flexible alternatives to NGINX's standard modules 274 | [ngx_limit_req](http://nginx.org/en/docs/http/ngx_http_limit_req_module.html) 275 | and [ngx_limit_conn](http://nginx.org/en/docs/http/ngx_http_limit_conn_module.html). 276 | For example, the Lua-based limiters provided by this library can be used in any contexts 277 | like right before the downstream SSL handshaking procedure (as with `ssl_certificate_by_lua`) 278 | or right before issuing backend requests. 279 | 280 | [Back to TOC](#table-of-contents) 281 | 282 | Installation 283 | ============ 284 | 285 | This library is enabled by default in [OpenResty](https://openresty.org/) 1.11.2.2+. 286 | 287 | If you have to install this library manually, 288 | then ensure you are using at least OpenResty 1.11.2.1 or a custom nginx build including ngx_lua 0.10.6+. Also, You need to configure 289 | the [lua_package_path](https://github.com/openresty/lua-nginx-module#lua_package_path) directive to 290 | add the path of your `lua-resty-limit-traffic` source tree to ngx_lua's Lua module search path, as in 291 | 292 | ```nginx 293 | # nginx.conf 294 | http { 295 | lua_package_path "/path/to/lua-resty-limit-traffic/lib/?.lua;;"; 296 | ... 297 | } 298 | ``` 299 | 300 | and then load one of the modules provided by this library in Lua. For example, 301 | 302 | ```lua 303 | local limit_req = require "resty.limit.req" 304 | ``` 305 | 306 | [Back to TOC](#table-of-contents) 307 | 308 | Community 309 | ========= 310 | 311 | [Back to TOC](#table-of-contents) 312 | 313 | English Mailing List 314 | -------------------- 315 | 316 | The [openresty-en](https://groups.google.com/group/openresty-en) mailing list is for English speakers. 317 | 318 | [Back to TOC](#table-of-contents) 319 | 320 | Chinese Mailing List 321 | -------------------- 322 | 323 | The [openresty](https://groups.google.com/group/openresty) mailing list is for Chinese speakers. 324 | 325 | [Back to TOC](#table-of-contents) 326 | 327 | Bugs and Patches 328 | ================ 329 | 330 | Please report bugs or submit patches by 331 | 332 | 1. creating a ticket on the [GitHub Issue Tracker](https://github.com/openresty/lua-resty-limit-traffic/issues), 333 | 1. or posting to the [OpenResty community](#community). 334 | 335 | [Back to TOC](#table-of-contents) 336 | 337 | Author 338 | ====== 339 | 340 | Yichun "agentzh" Zhang (章亦春) , OpenResty Inc. 341 | 342 | [Back to TOC](#table-of-contents) 343 | 344 | Copyright and License 345 | ===================== 346 | 347 | This module is licensed under the BSD license. 348 | 349 | Copyright (C) 2015-2019, by Yichun "agentzh" Zhang, OpenResty Inc. 350 | 351 | All rights reserved. 352 | 353 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 354 | 355 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 356 | 357 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 358 | 359 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 360 | 361 | [Back to TOC](#table-of-contents) 362 | 363 | See Also 364 | ======== 365 | * module [resty.limit.req](lib/resty/limit/req.md) 366 | * module [resty.limit.count](lib/resty/limit/count.md) 367 | * module [resty.limit.conn](lib/resty/limit/conn.md) 368 | * module [resty.limit.traffic](lib/resty/limit/traffic.md) 369 | * the ngx_lua module: https://github.com/openresty/lua-nginx-module 370 | * OpenResty: https://openresty.org/ 371 | 372 | [Back to TOC](#table-of-contents) 373 | 374 | -------------------------------------------------------------------------------- /lib/resty/limit/conn.md: -------------------------------------------------------------------------------- 1 | Name 2 | ==== 3 | 4 | resty.limit.conn - Lua module for limiting request concurrency (or concurrent connections) for OpenResty/ngx_lua. 5 | 6 | Table of Contents 7 | ================= 8 | 9 | * [Name](#name) 10 | * [Synopsis](#synopsis) 11 | * [Description](#description) 12 | * [Methods](#methods) 13 | * [new](#new) 14 | * [incoming](#incoming) 15 | * [is_committed](#is_committed) 16 | * [leaving](#leaving) 17 | * [set_conn](#set_conn) 18 | * [set_burst](#set_burst) 19 | * [uncommit](#uncommit) 20 | * [Caveats](#caveats) 21 | * [Out-of-Sync Counter Prevention](#out-of-sync-counter-prevention) 22 | * [Instance Sharing](#instance-sharing) 23 | * [Limiting Granularity](#limiting-granularity) 24 | * [Installation](#installation) 25 | * [Community](#community) 26 | * [English Mailing List](#english-mailing-list) 27 | * [Chinese Mailing List](#chinese-mailing-list) 28 | * [Bugs and Patches](#bugs-and-patches) 29 | * [Author](#author) 30 | * [Copyright and License](#copyright-and-license) 31 | * [See Also](#see-also) 32 | 33 | Synopsis 34 | ======== 35 | 36 | ```nginx 37 | # demonstrate the usage of the resty.limit.conn module (alone!) 38 | http { 39 | lua_shared_dict my_limit_conn_store 100m; 40 | 41 | server { 42 | location / { 43 | access_by_lua_block { 44 | -- well, we could put the require() and new() calls in our own Lua 45 | -- modules to save overhead. here we put them below just for 46 | -- convenience. 47 | 48 | local limit_conn = require "resty.limit.conn" 49 | 50 | -- limit the requests under 200 concurrent requests (normally just 51 | -- incoming connections unless protocols like SPDY is used) with 52 | -- a burst of 100 extra concurrent requests, that is, we delay 53 | -- requests under 300 concurrent connections and above 200 54 | -- connections, and reject any new requests exceeding 300 55 | -- connections. 56 | -- also, we assume a default request time of 0.5 sec, which can be 57 | -- dynamically adjusted by the leaving() call in log_by_lua below. 58 | local lim, err = limit_conn.new("my_limit_conn_store", 200, 100, 0.5) 59 | if not lim then 60 | ngx.log(ngx.ERR, 61 | "failed to instantiate a resty.limit.conn object: ", err) 62 | return ngx.exit(500) 63 | end 64 | 65 | -- the following call must be per-request. 66 | -- here we use the remote (IP) address as the limiting key 67 | local key = ngx.var.binary_remote_addr 68 | local delay, err = lim:incoming(key, true) 69 | if not delay then 70 | if err == "rejected" then 71 | return ngx.exit(503) 72 | end 73 | ngx.log(ngx.ERR, "failed to limit req: ", err) 74 | return ngx.exit(500) 75 | end 76 | 77 | if lim:is_committed() then 78 | local ctx = ngx.ctx 79 | ctx.limit_conn = lim 80 | ctx.limit_conn_key = key 81 | ctx.limit_conn_delay = delay 82 | end 83 | 84 | -- the 2nd return value holds the current concurrency level 85 | -- for the specified key. 86 | local conn = err 87 | 88 | if delay >= 0.001 then 89 | -- the request exceeding the 200 connections ratio but below 90 | -- 300 connections, so 91 | -- we intentionally delay it here a bit to conform to the 92 | -- 200 connection limit. 93 | -- ngx.log(ngx.WARN, "delaying") 94 | ngx.sleep(delay) 95 | end 96 | } 97 | 98 | # content handler goes here. if it is content_by_lua, then you can 99 | # merge the Lua code above in access_by_lua into your 100 | # content_by_lua's Lua handler to save a little bit of CPU time. 101 | 102 | log_by_lua_block { 103 | local ctx = ngx.ctx 104 | local lim = ctx.limit_conn 105 | if lim then 106 | -- if you are using an upstream module in the content phase, 107 | -- then you probably want to use $upstream_response_time 108 | -- instead of ($request_time - ctx.limit_conn_delay) below. 109 | local latency = tonumber(ngx.var.request_time) - ctx.limit_conn_delay 110 | local key = ctx.limit_conn_key 111 | assert(key) 112 | local conn, err = lim:leaving(key, latency) 113 | if not conn then 114 | ngx.log(ngx.ERR, 115 | "failed to record the connection leaving ", 116 | "request: ", err) 117 | return 118 | end 119 | end 120 | } 121 | } 122 | } 123 | } 124 | ``` 125 | 126 | Description 127 | =========== 128 | 129 | This module provides APIs to help the OpenResty/ngx_lua user programmers limit request 130 | concurrency levels. 131 | 132 | If you want to use multiple different instances of this class at once or use one instance 133 | of this class with instances of other classes (like [resty.limit.req](./req.md)), 134 | then you *must* use the [resty.limit.traffic](./traffic.md) module to combine them. 135 | 136 | In contrast with NGINX's standard 137 | [ngx_limit_conn](http://nginx.org/en/docs/http/ngx_http_limit_conn_module.html) module, 138 | this Lua module supports connection delaying in addition to immediate rejection when the 139 | concurrency level threshold is exceeded. 140 | 141 | Methods 142 | ======= 143 | 144 | [Back to TOC](#table-of-contents) 145 | 146 | new 147 | --- 148 | **syntax:** `obj, err = class.new(shdict_name, conn, burst, default_conn_delay)` 149 | 150 | Instantiates an object of this class. The `class` value is returned by the call `require "resty.limit.conn"`. 151 | 152 | This method takes the following arguments: 153 | 154 | * `shdict_name` is the name of the 155 | [lua_shared_dict](https://github.com/openresty/lua-nginx-module#lua_shared_dict) shm zone. 156 | 157 | It is best to use separate shm zones for different kinds of limiters. 158 | * `conn` is the maximum number of concurrent requests allowed. Requests exceeding this ratio (and below `conn` + `burst`) 159 | will get delayed to conform to this threshold. 160 | * `burst` is the number of excessive concurrent requests (or connections) allowed to be 161 | delayed. 162 | 163 | Requests exceeding this hard limit should get rejected immediately. 164 | * `default_conn_delay` is the default processing latency of a typical connection (or request). 165 | 166 | This delay is used as a basic unit for the extra delay introduced for excessive concurrent requests (or connections), 167 | which can later get adjusted dynamically by the subsequent [leaving](#leaving) method 168 | calls in [log_by_lua*](https://github.com/openresty/lua-nginx-module#log_by_lua). 169 | 170 | On failure, this method returns `nil` and a string describing the error (like a bad `lua_shared_dict` name). 171 | 172 | [Back to TOC](#table-of-contents) 173 | 174 | incoming 175 | -------- 176 | **syntax:** `delay, err = obj:incoming(key, commit)` 177 | 178 | [Back to TOC](#table-of-contents) 179 | 180 | Fires a new concurrent request (or new connection) incoming event and 181 | calculates the delay needed (if any) for the current request 182 | upon the specified key or whether the user should reject it immediately. 183 | 184 | This method accepts the following arguments: 185 | 186 | * `key` is the user specified key to limit the concurrency level. 187 | 188 | For example, one can use the host name (or server zone) 189 | as the key so that we limit concurrency per host name. Otherwise, we can also use the client address as the 190 | key so that we can avoid a single client from flooding our service with too many parallel connections or requests. 191 | 192 | Please note that this module 193 | does not prefix nor suffix the user key so it is the user's responsibility to ensure the key 194 | is unique in the `lua_shared_dict` shm zone). 195 | * `commit` is a boolean value. If set to `true`, the object will actually record the event 196 | in the shm zone backing the current object; otherwise it would just be a "dry run" (which is the default). 197 | 198 | The return values depend on the following cases: 199 | 200 | 1. If the request does not exceed the `conn` value specified in the [new](#new) method, then 201 | this method returns `0` as the delay as well as the number of concurrent 202 | requests (or connections) at the current time (as the 2nd return value). 203 | 2. If the request (or connection) exceeds the `conn` limit specified in the [new](#new) method but not 204 | the `conn` + `burst` value, then 205 | this method returns a proper delay (in seconds) for the current request so that it still conform to 206 | the `conn` threshold as if it came a bit later rather than now. 207 | 208 | In addition, like the previous case, this method 209 | also returns a second return value indicating the number of concurrent requests (or connections) 210 | at this point (including the current request). This 2nd return value can be used to monitor the 211 | unadjusted incoming concurrency level. 212 | 3. If the request exceeds the `conn` + `burst` limit, then this method returns `nil` and 213 | the error string `"rejected"`. 214 | 4. If an error occurred (like failures when accessing the `lua_shared_dict` shm zone backing 215 | the current object), then this method returns `nil` and a string describing the error. 216 | 217 | This method does not sleep itself. It simply returns a delay if necessary and requires the caller 218 | to later invoke the [ngx.sleep](https://github.com/openresty/lua-nginx-module#ngxsleep) 219 | method to sleep. 220 | 221 | This method must be paired with a [leaving](#leaving) method call typically in the 222 | [log_by_lua*](https://github.com/openresty/lua-nginx-module#log_by_lua) context if 223 | and only if this method actually records the event in the shm zone (designated by 224 | a subsequent [is_committed](#is_committed) method call. 225 | 226 | is_committed 227 | ------------ 228 | **syntax:** `bool = obj:is_committed()` 229 | 230 | Returns `true` if the previous [incoming](#incoming) call actually commits the event 231 | into the `lua_shared_dict` shm store; returns `false` otherwise. 232 | 233 | This result is important in that one should only pair the [leaving](#leaving) method call 234 | with a [incoming](#incoming) call 235 | if and only if this `is_committed` method call returns `true`. 236 | 237 | [Back to TOC](#table-of-contents) 238 | 239 | leaving 240 | -------- 241 | **syntax:** `conn = obj:leaving(key, req_latency?)` 242 | 243 | Fires an event that the current request (or connection) is being finalized. Such events 244 | essentially reduce the current concurrency level. 245 | 246 | This method call usually pairs with an earlier [incoming](#incoming) call unless 247 | the [is_committed](#is_committed) call returns `false` after that [incoming](#incoming) call. 248 | 249 | This method takes the following parameters: 250 | 251 | * `key` is the same key string used in the paired [incoming](#incoming) method call. 252 | * `req_latency` is the actual latency of the current request (or connection), which is optional. 253 | 254 | Often we use the value of either the `$request_time` or `$upstream_response_time` nginx builtin variables here. One can, of course, record the latency himself. 255 | 256 | The method returns the new concurrency level (or number of active connections). Unlike 257 | [incoming](#incoming), this method always commits the changes to the shm zone. 258 | 259 | [Back to TOC](#table-of-contents) 260 | 261 | set_conn 262 | -------- 263 | **syntax:** `obj:set_conn(conn)` 264 | 265 | Overwrites the `conn` threshold value as specified in the [new](#new) method. 266 | 267 | [Back to TOC](#table-of-contents) 268 | 269 | set_burst 270 | --------- 271 | **syntax:** `obj:set_burst(burst)` 272 | 273 | Overwrites the `burst` threshold value as specified in the [new](#new) method. 274 | 275 | [Back to TOC](#table-of-contents) 276 | 277 | uncommit 278 | -------- 279 | **syntax:** `ok, err = obj:uncommit(key)` 280 | 281 | This tries to undo the commit of the `incoming` call. This method is mainly for being used in the [resty.limit.traffic](./traffic.md) 282 | Lua module when combining multiple limiters at the same time. 283 | 284 | This method should not be used replace of the [leaving](#leaving) method though they are 285 | similar in effect and implementation. 286 | 287 | [Back to TOC](#table-of-contents) 288 | 289 | Caveats 290 | ======== 291 | 292 | [Back to TOC](#table-of-contents) 293 | 294 | Out-of-Sync Counter Prevention 295 | ------------------------------ 296 | 297 | Under extreme conditions, like nginx worker processes crash in the middle of request processing, 298 | the counters stored in the shm zones can go out of sync. This can lead to catastrophic 299 | consequences like blindly rejecting *all* the incoming connections for ever. (Note that 300 | the standard `ngx_limit_conn` module also suffers from this issue.) We may 301 | add automatic protection for such cases to this Lua module in the near future. 302 | 303 | Also, it is very important to ensure that the `leaving` call appears first in your 304 | `log_by_lua*` handler code to minimize the chance that other `log_by_lua*` Lua code 305 | throws out an exception and prevents the `leaving` call from running. 306 | 307 | [Back to TOC](#table-of-contents) 308 | 309 | Instance Sharing 310 | ================ 311 | 312 | Each instance of this class carries no state information but the `conn` and `burst` 313 | threshold values. The real limiting states based on keys are stored in the `lua_shared_dict` 314 | shm zone specified in the [new](#new) method. So it is safe to share instances of 315 | this class [on the nginx worker process level](https://github.com/openresty/lua-nginx-module#data-sharing-within-an-nginx-worker) 316 | as long as the combination of `conn` and `burst` do not change. 317 | 318 | Even if the `conn` and `burst` 319 | combination *does* change, one can still share a single instance as long as he always 320 | calls the [set_conn](#set_conn) and/or [set_burst](#set_burst) methods *right before* 321 | the [incoming](#incoming) call. 322 | 323 | [Back to TOC](#table-of-contents) 324 | 325 | Limiting Granularity 326 | ==================== 327 | 328 | The limiting works on the granularity of an individual NGINX server instance (including all 329 | its worker processes). Thanks to the shm mechanism; we can share state cheaply across 330 | all the workers in a single NGINX server instance. 331 | 332 | If you are running multiple NGINX server instances (like running multiple boxes), then 333 | you need to ensure that the incoming traffic is (more or less) evenly distributed across 334 | all the different NGINX server instances (or boxes). So if you want a limit of N connections 335 | across all the servers, then you just need to specify a limit of `N/n` in each server's configuration. This simple strategy can save all the (big) overhead of sharing a global state across 336 | machine boundaries. 337 | 338 | [Back to TOC](#table-of-contents) 339 | 340 | Installation 341 | ============ 342 | 343 | Please see [library installation instructions](../../../README.md#installation). 344 | 345 | [Back to TOC](#table-of-contents) 346 | 347 | Community 348 | ========= 349 | 350 | [Back to TOC](#table-of-contents) 351 | 352 | English Mailing List 353 | -------------------- 354 | 355 | The [openresty-en](https://groups.google.com/group/openresty-en) mailing list is for English speakers. 356 | 357 | [Back to TOC](#table-of-contents) 358 | 359 | Chinese Mailing List 360 | -------------------- 361 | 362 | The [openresty](https://groups.google.com/group/openresty) mailing list is for Chinese speakers. 363 | 364 | [Back to TOC](#table-of-contents) 365 | 366 | Bugs and Patches 367 | ================ 368 | 369 | Please report bugs or submit patches by 370 | 371 | 1. creating a ticket on the [GitHub Issue Tracker](https://github.com/openresty/lua-resty-limit-traffic/issues), 372 | 1. or posting to the [OpenResty community](#community). 373 | 374 | [Back to TOC](#table-of-contents) 375 | 376 | Author 377 | ====== 378 | 379 | Yichun "agentzh" Zhang (章亦春) , CloudFlare Inc. 380 | 381 | [Back to TOC](#table-of-contents) 382 | 383 | Copyright and License 384 | ===================== 385 | 386 | This module is licensed under the BSD license. 387 | 388 | Copyright (C) 2015-2016, by Yichun "agentzh" Zhang, CloudFlare Inc. 389 | 390 | All rights reserved. 391 | 392 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 393 | 394 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 395 | 396 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 397 | 398 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 399 | 400 | [Back to TOC](#table-of-contents) 401 | 402 | See Also 403 | ======== 404 | * module [resty.limit.req](./req.md) 405 | * module [resty.limit.count](./count.md) 406 | * module [resty.limit.traffic](./traffic.md) 407 | * library [lua-resty-limit-traffic](../../../README.md) 408 | * the ngx_lua module: https://github.com/openresty/lua-nginx-module 409 | * OpenResty: https://openresty.org/ 410 | 411 | [Back to TOC](#table-of-contents) 412 | 413 | --------------------------------------------------------------------------------