├── .gitattributes ├── .gitignore ├── .travis.yml ├── Makefile ├── README.md ├── dist.ini ├── lib └── resty │ ├── checkups.lua │ ├── checkups │ ├── api.lua │ ├── base.lua │ ├── consistent_hash.lua │ ├── dyconfig.lua │ ├── heartbeat.lua │ ├── round_robin.lua │ └── try.lua │ └── subsystem.lua ├── lua-resty-checkups+API.png ├── lua-resty-checkups-0.1-0.rockspec ├── t ├── cluster_key.t ├── consistent.t ├── dyconfig.t ├── dyconfig_opts.t ├── feedback.t ├── get_redis_info.t ├── get_status.t ├── heartbeat_cb.t ├── http.t ├── lib │ ├── config_api.lua │ ├── config_down.lua │ ├── config_dyconfig.lua │ ├── config_dyconfig_opts.lua │ ├── config_fails.lua │ ├── config_feedback.lua │ ├── config_hash.lua │ ├── config_http.lua │ ├── config_key.lua │ ├── config_redis.lua │ ├── config_round_robin.lua │ ├── config_timeout.lua │ ├── config_unprotected_api.lua │ └── config_ups.lua ├── max_fails.t ├── passive.t ├── positive.t ├── round_robin.t ├── sensibility.t ├── try_timeout.t ├── unprotected.t ├── ups_down.t ├── upstream.t └── worker_crash.t └── util └── lua-releng /.gitattributes: -------------------------------------------------------------------------------- 1 | *.t linguist-language=Text 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | t/servroot 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: perl 2 | perl: 3 | - 5.10 4 | cache: 5 | - apt 6 | - ccache 7 | services: 8 | - redis-server 9 | env: 10 | - V_OPENRESTY=1.9.15.1 11 | install: 12 | - cpanm -v --notest Test::Nginx 13 | before_script: 14 | - sudo apt-get update -q 15 | - sudo apt-get install libreadline-dev libncurses5-dev libpcre3-dev libssl-dev -y 16 | - sudo apt-get install make build-essential lua5.1 -y 17 | - wget https://openresty.org/download/openresty-$V_OPENRESTY.tar.gz 18 | - tar xzf openresty-$V_OPENRESTY.tar.gz 19 | - cd openresty-$V_OPENRESTY && ./configure && make && sudo make install && cd .. 20 | script: 21 | - make test 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | OPENRESTY_PREFIX=/usr/local/openresty 2 | 3 | PREFIX ?= /usr/local 4 | LUA_INCLUDE_DIR ?= $(PREFIX)/include 5 | LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) 6 | INSTALL ?= install 7 | 8 | .PHONY: all test install 9 | 10 | all: ; 11 | 12 | install: all 13 | $(INSTALL) -d $(DESTDIR)$(LUA_LIB_DIR)/resty/checkups/ 14 | $(INSTALL) lib/resty/*.lua $(DESTDIR)$(LUA_LIB_DIR)/resty/ 15 | $(INSTALL) lib/resty/checkups/*.lua $(DESTDIR)$(LUA_LIB_DIR)/resty/checkups/ 16 | 17 | test: all 18 | util/lua-releng 19 | PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH prove -I../test-nginx/lib -r t/ 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Name 2 | ==== 3 | 4 | lua-resty-checkups - Manage Nginx upstreams in pure ngx_lua 5 | 6 | [![Build Status](https://travis-ci.org/upyun/lua-resty-checkups.svg)](https://travis-ci.org/upyun/lua-resty-checkups) 7 | 8 | Table of Contents 9 | ================= 10 | 11 | * [Name](#name) 12 | * [Status](#status) 13 | * [Features](#features) 14 | * [Installation](#installation) 15 | * [Compatibility](#compatibility) 16 | * [Synopsis](#synopsis) 17 | * [Configuration](#configuration) 18 | * [Lua configuration](#lua-configuration) 19 | * [global configuration](#global-configuration) 20 | * [cluster configuration](#cluster-configuration) 21 | * [Nginx configuration](#nginx-configuration) 22 | * [API](#api) 23 | * [init](#init) 24 | * [prepare_checker](#prepare_checker) 25 | * [create_checker](#create_checker) 26 | * [ready_ok](#ready_ok) 27 | * [select_peer](#select_peer) 28 | * [get_status](#get_status) 29 | * [get_ups_timeout](#get_ups_timeout) 30 | * [feedback_status](#feedback_status) 31 | * [update_upstream](#update_upstream) 32 | * [delete_upstream](#delete_upstream) 33 | * [Copyright and License](#copyright-and-license) 34 | 35 | Status 36 | ====== 37 | 38 | Probably production ready in most cases, though not yet proven in the wild. Please check the issues list and let me know if you have any problems / questions. 39 | 40 | Features 41 | ======== 42 | 43 | * Periodically heartbeat to upstream servers 44 | * Proactive and passive health check 45 | * Dynamic upstream update 46 | * Balance by weighted round-robin or consistent-hash 47 | * Synchronize with Nginx upstream blocks 48 | * Try clusters by levels or by keys 49 | 50 | Installation 51 | ============ 52 | 53 | * [LuaRocks](https://luarocks.org/): 54 | 55 | ```bash 56 | $ luarocks install lua-resty-checkups 57 | ``` 58 | 59 | * [OPM](https://github.com/openresty/opm): 60 | 61 | ```bash 62 | $ opm get upyun/lua-resty-checkups 63 | ``` 64 | 65 | * Manually: 66 | 67 | Just tweeks the `lua_package_path` or the `LUA_PATH` environment variable, to add the installation path for this Lua module: 68 | 69 | ``` 70 | /path/to/lua-resty-checkups/lib/resty/?.lua; 71 | ``` 72 | 73 | 74 | Compatibility 75 | ============= 76 | 77 | * [ngx_http_lua_module](https://github.com/openresty/lua-nginx-module): v0.9.20 or higher. 78 | 79 | 80 | Synopsis 81 | ======== 82 | 83 | ```lua 84 | -- config.lua 85 | 86 | _M = {} 87 | 88 | _M.global = { 89 | checkup_timer_interval = 15, 90 | checkup_shd_sync_enable = true, 91 | shd_config_timer_interval = 1, 92 | } 93 | 94 | _M.ups1 = { 95 | cluster = { 96 | { 97 | servers = { 98 | { host = "127.0.0.1", port = 4444, weight=10, max_fails=3, fail_timeout=10 }, 99 | } 100 | }, 101 | }, 102 | } 103 | 104 | return _M 105 | ``` 106 | 107 | ```lua 108 | -- nginx.conf 109 | 110 | lua_package_path "/path/to/lua-resty-checkups/lib/?.lua;/path/to/config.lua;;"; 111 | 112 | lua_shared_dict state 10m; 113 | lua_shared_dict mutex 1m; 114 | lua_shared_dict locks 1m; 115 | lua_shared_dict config 10m; 116 | 117 | server { 118 | listen 12350; 119 | return 200 12350; 120 | } 121 | 122 | server { 123 | listen 12351; 124 | return 200 12351; 125 | } 126 | 127 | init_by_lua_block { 128 | local config = require "config" 129 | local checkups = require "resty.checkups.api" 130 | checkups.init(config) 131 | } 132 | 133 | init_worker_by_lua_block { 134 | local config = require "config" 135 | local checkups = require "resty.checkups.api" 136 | 137 | checkups.prepare_checker(config) 138 | checkups.create_checker() 139 | } 140 | 141 | server { 142 | location = /12350 { 143 | proxy_pass http://127.0.0.1:12350/; 144 | } 145 | location = /12351 { 146 | proxy_pass http://127.0.0.1:12351/; 147 | } 148 | 149 | location = /t { 150 | content_by_lua_block { 151 | local checkups = require "resty.checkups.api" 152 | 153 | local callback = function(host, port) 154 | local res = ngx.location.capture("/" .. port) 155 | ngx.say(res.body) 156 | return 1 157 | end 158 | 159 | local ok, err 160 | 161 | -- connect to a dead server, no upstream available 162 | ok, err = checkups.ready_ok("ups1", callback) 163 | if err then ngx.say(err) end 164 | 165 | -- add server to ups1 166 | ok, err = checkups.update_upstream("ups1", { 167 | { 168 | servers = { 169 | { host = "127.0.0.1", port = 12350, weight=10, max_fails=3, fail_timeout=10 }, 170 | } 171 | }, 172 | }) 173 | 174 | if err then ngx.say(err) end 175 | ngx.sleep(1) 176 | ok, err = checkups.ready_ok("ups1", callback) 177 | if err then ngx.say(err) end 178 | ok, err = checkups.ready_ok("ups1", callback) 179 | if err then ngx.say(err) end 180 | 181 | -- add server to new upstream 182 | ok, err = checkups.update_upstream("ups2", { 183 | { 184 | servers = { 185 | { host="127.0.0.1", port=12351 }, 186 | } 187 | }, 188 | }) 189 | if err then ngx.say(err) end 190 | ngx.sleep(1) 191 | ok, err = checkups.ready_ok("ups2", callback) 192 | if err then ngx.say(err) end 193 | 194 | -- add server to ups2, reset rr state 195 | ok, err = checkups.update_upstream("ups2", { 196 | { 197 | servers = { 198 | { host = "127.0.0.1", port = 12350, weight=10, max_fails=3, fail_timeout=10 }, 199 | { host = "127.0.0.1", port = 12351, weight=10, max_fails=3, fail_timeout=10 }, 200 | } 201 | }, 202 | }) 203 | if err then ngx.say(err) end 204 | ngx.sleep(1) 205 | ok, err = checkups.ready_ok("ups2", callback) 206 | if err then ngx.say(err) end 207 | ok, err = checkups.ready_ok("ups2", callback) 208 | if err then ngx.say(err) end 209 | } 210 | } 211 | } 212 | ``` 213 | 214 | A typical output of the `/t` location defined above is: 215 | 216 | no servers available 217 | 12350 218 | 12350 219 | 12351 220 | 12350 221 | 12351 222 | 223 | Configuration 224 | ============= 225 | 226 | Lua configuration 227 | ----------------- 228 | 229 | Configuration file of checkups is a lua module consists of two parts, the global part and the cluster part. 230 | 231 | 232 | An example configuration file of checkups is shown below, 233 | 234 | 235 | ```lua 236 | -- config.lua 237 | 238 | -- Here is the global part 239 | 240 | _M = {} 241 | 242 | _M.global = { 243 | checkup_timer_interval = 15, 244 | checkup_timer_overtime = 60, 245 | default_heartbeat_enable = true, 246 | checkup_shd_sync_enable = true, 247 | shd_config_timer_interval = 1, 248 | } 249 | 250 | 251 | -- The rests parts are cluster configurations 252 | 253 | _M.redis = { 254 | enable = true, 255 | typ = "redis", 256 | timeout = 2, 257 | read_timeout = 15, 258 | send_timeout = 15, 259 | 260 | protected = true, 261 | 262 | cluster = { 263 | { -- level 1 264 | try = 2, 265 | servers = { 266 | { host = "192.168.0.1", port = 6379, weight=10, max_fails=3, fail_timeout=10 }, 267 | { host = "192.168.0.2", port = 6379, weight=10, max_fails=3, fail_timeout=10 }, 268 | } 269 | }, 270 | { -- level 2 271 | servers = { 272 | { host = "192.168.0.3", port = 6379, weight=10, max_fails=3, fail_timeout=10 }, 273 | } 274 | }, 275 | }, 276 | } 277 | 278 | _M.api = { 279 | enable = false, 280 | typ = "http", 281 | http_opts = { 282 | query = "GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n", 283 | statuses = { 284 | ["500"] = false, 285 | ["502"] = false, 286 | ["503"] = false, 287 | ["504"] = false, 288 | }, 289 | }, 290 | 291 | mode = "hash", 292 | 293 | cluster = { 294 | dc1 = { 295 | servers = { 296 | { host = "192.168.1.1", port = 1234, weight=10, max_fails=3, fail_timeout=10 }, 297 | } 298 | }, 299 | dc2 = { 300 | servers = { 301 | { host = "192.168.1.2", port = 1234, weight=10, max_fails=3, fail_timeout=10 }, 302 | } 303 | } 304 | } 305 | } 306 | 307 | _M.ups_from_nginx = { 308 | timeout = 2, 309 | 310 | cluster = { 311 | { -- level 1 312 | upstream = "api.com", 313 | }, 314 | { -- level 2 315 | upstream = "api.com", 316 | upstream_only_backup = true, 317 | }, 318 | }, 319 | } 320 | 321 | return _M 322 | ``` 323 | 324 | global configurations 325 | --------------------- 326 | 327 | * `checkup_timer_interval`: Interval of sending heartbeats to backend servers. Default is `5`. 328 | * `checkup_timer_overtime`: Interval of checkups to expire the timer key. In most cases, you don't need to change this value. Default is `60`. 329 | * `default_heartbeat_enable`: Checkups will sent heartbeats to servers by default or not. Default is `true`. 330 | * `checkup_shd_sync_enable`: Create upstream syncer for each worker. If set to `false`, dynamic upstream will not work properly. Default is `true`. 331 | * `shd_config_timer_interval`: Interval of syncing upstream list from shared memory. Default is equal to `checkup_timer_interval`. 332 | * `ups_status_sync_enable`: If set to `true`, checkups will sync upstram status from checkups to Nginx upstream blocks. Default is `false`. 333 | * `ups_status_timer_interval`: Interval of syncing upstream status from checkups to Nginx upstream blocks. 334 | 335 | Cluster configurations 336 | ---------------------- 337 | 338 | * `skey`: `_M.xxxxx`. `xxxxx` is the `skey`(service key) of this Cluster. 339 | * `enable`: Enable or disable heartbeats to servers. Default is `true`. 340 | * `typ`: Cluster type, must be one of `general`, `redis`, `mysql`, `http`. Default is `general`. 341 | * `general`: Heartbeat by TCP `sock:connect`. 342 | * `redis`: Heartbeat by redis `PING`. [lua-resty-redis](https://github.com/openresty/lua-resty-redis) module is required. 343 | * `mysql`: Heartbeat by mysql `db:connect`. [lua-resty-mysql](https://github.com/openresty/lua-resty-mysql) module is required. 344 | * `http`: Heartbeat by HTTP request. You can setup customized HTTP request and response codes in `http_opts`. 345 | * `timeout`: Connect timeout to upstream servers. Default is `5`. 346 | * `read_timeout`: Read timeout to upstream servers (not used during heartbeating). Default is equal to `timeout`. 347 | * `send_timeout`: Write timeout to upstream servers (not used during heartbeating). Default is equal to `timeout`. 348 | * `http_opts`: HTTP heartbeat configurations. Only works for `typ="http"`. 349 | * `query`: HTTP request to heartbeat. 350 | * `statuses`: If the code returned by server is set to `false`, then the server is considered to be failing. 351 | 352 | * `mode`: Balance mode. Can be set to `hash`, `url_hash` or `ip_hash`. Checkups will balance servers by `hash_key`, `ngx.var.uri` or `ngx.var.remote_addr`. Default is `wrr`. 353 | * `protected`: If set to `true` and all the servers in the cluster are failing, checkups will not mark the last failing server as unavailable(`err`), instead, it will be marked as `unstable`(still available in next try). Default is `true`. 354 | * `cluster`: You can configure multiple levels according to the cluster priority, at each level you can configure a cluster of `servers`. Checkups will try next level only when all the servers in the prior level are consitered unavailable. 355 | 356 | Instead of trying clusters by levels, you can configure checkups trying clusters by key(see `api` cluster above). Remember you should also pass extra argument like `opts.cluster_key={"dc1", "dc2"}` or `opts.cluster_key={3, 1, 2}` to [checkups.read_ok](#ready_ok) to make checkups trying on the order of `dc1`, `dc2` or `level 3`, `level 1`, `level 2`. If you haven't passed `opts.cluster_key` to [checkups.ready_ok](#ready_ok), checkups will still try clusters by levels. As for the above `api` cluster, checkups will eventually return `no servers available`. 357 | * `try`: Retry count. Default is the number of servers. 358 | * `try_timeout`: Limits the time during which a request can be responsed, likewise nginx `proxy_next_upstream_timeout`. 359 | * `servers`: Configuration for `servers` are listed as follows, 360 | * `weight`: Sets the weight of the server. Default is `1`. 361 | * `max_fails`: Sets the number of unsuccessful attempts to communicate with the server that should happen in the duration set by the `fail_timeout` parameter. By default, the number of unsuccessful attempts is set to `0`, which disables the accounting of attempts. What is considered an unsuccessful attempt is defined by `http_opts.statuses` if `typ="http"` or a `nil`/`false` returned by [checkups.ready_ok](#ready_ok). This options is only available in round-robin. 362 | * `fail_timeout`: Sets the time during which the specified number of unsuccessful attempts to communicate with the server should happen to consider the server unavailable and the period of time the server will be considered unavailable. By default, the parameter is set to `10` seconds. This options is only available in round-robin. 363 | 364 | * `upstream`: Name of Nginx upstream blocks. Checkups will extract servers from Nginx conf's upstream blocks in [prepare_checker](#prepare_checker). [lua-upstream-nginx-module](https://github.com/openresty/lua-upstream-nginx-module) module is required. 365 | * `upstream_only_backup`: If set to `true`, checkups will only extract backup servers from Nginx upstream blocks. 366 | 367 | 368 | Nginx configuration 369 | ------------------- 370 | 371 | Add pathes of lua config file and checkups to `lua_package_path` and create lua shared dicts used by checkups. You should put these lines into `http` block of your Nginx config file. 372 | 373 | lua_package_path "/path/to/lua-resty-checkups/lib/?.lua;/path/to/config.lua;;"; 374 | 375 | lua_shared_dict state 10m; 376 | lua_shared_dict mutex 1m; 377 | lua_shared_dict locks 1m; 378 | lua_shared_dict config 10m; 379 | 380 | If you use stream subsystem, you should put these lines into `stream` block of your Nginx config file. 381 | 382 | lua_package_path "/path/to/lua-resty-checkups/lib/?.lua;/path/to/config.lua;;"; 383 | 384 | lua_shared_dict stream_state 10m; 385 | lua_shared_dict stream_mutex 1m; 386 | lua_shared_dict stream_locks 1m; 387 | lua_shared_dict stream_config 10m; 388 | 389 | API 390 | === 391 | 392 | ![](lua-resty-checkups+API.png) 393 | 394 | init 395 | --------------- 396 | **syntax:** *init(config)* 397 | 398 | **phase:** *init_by_lua* 399 | 400 | Copy upstreams from `config.lua` to shdict, extract servers from Nginx upstream blocks and do some basic initialization. 401 | 402 | 403 | prepare_checker 404 | --------------- 405 | 406 | **syntax:** *prepare_checker(config)* 407 | 408 | **phase:** *init_worker_by_lua* 409 | 410 | Copy configurations from `config.lua` to worker checkups, extract servers from Nginx upstream blocks and do some basic initialization. 411 | 412 | 413 | create_checker 414 | -------------- 415 | 416 | **syntax:** *create_checker()* 417 | 418 | **phase:** *init_worker_by_lua* 419 | 420 | Create heartbeat timer and upstream sync timer. Only one heartbeat timer will be created among all the workers. It's highly recommended to call this method in `init_worker` phase. 421 | 422 | ready_ok 423 | -------- 424 | 425 | **syntax:** *res, err = ready_ok(skey, callback, opts?)* 426 | 427 | **phase:** *rewrite_by_lua*, access_by_lua*, content_by_lua*, ngx.timer.** 428 | 429 | Select an available `peer` from cluster `skey` and call `callback(peer.host, peer.port, opts)`. 430 | 431 | The `opts` table accepts the following fields, 432 | 433 | * `cluster_key`: Try clusters by `cluster_key`. Checkups will try clusters on the order of `cluster_key`. `clusters_key` can be the name of the clusters or the level of the clusters. clusters eg: `{"cluster_name_A", "name_B", "name_C"}`. levels eg: `{3, 2, 1}`. 434 | * `hash_key`: Key used in `hash` balance mode. If not set, `ngx.var.uri` will be used. 435 | * `try`: Retry will be no more than `try` times. 436 | * `try_timeout`: Limits the time during which a request can be responsed, likewise nginx `proxy_next_upstream_timeout`. 437 | 438 | Returns what `callback` returns on success, or returns `nil` and a string describing the error otherwise. 439 | 440 | If `callback` returns `nil` or `false`, checkups will consider it to be a failed try and will retry `callback` with another peer. So, **always remember not to return `nil` or `false` after a successful callback.** 441 | 442 | select_peer 443 | ----------- 444 | 445 | **syntax:** *peer, err = select_peer(skey)* 446 | 447 | **context:** *rewrite_by_lua*, access_by_lua*, content_by_lua*, balancer_by_lua* 448 | 449 | Select an available peer from cluster `skey`. 450 | 451 | Return a table containing `host` and `port` of an available peer. 452 | 453 | In case of errors, returns nil with a string describing the error. 454 | 455 | get_status 456 | ---------- 457 | 458 | **syntax:** *status = get_status()* 459 | 460 | **phase:** *rewrite_by_lua*, access_by_lua*, content_by_lua*, ngx.timer.** 461 | 462 | Return checkups status in `json` format. 463 | 464 | get_ups_timeout 465 | --------------- 466 | 467 | **syntax:** *connect_timeout, send_timeout, read_timeout = get_ups_timeout(skey)* 468 | 469 | **phase:** *rewrite_by_lua*, access_by_lua*, content_by_lua*, ngx.timer.** 470 | 471 | Return timeout of cluster `skey`. 472 | 473 | feedback_status 474 | --------------- 475 | 476 | **syntax:** *ok, err = feedback_status(skey, host, port, failed)* 477 | 478 | **context:** *rewrite_by_lua*, access_by_lua*, content_by_lua*, ngx.timer.*, balancer_by_lua.** 479 | 480 | Mark server `host:port` in cluster `skey` as failed(`true`) or available(`false`). 481 | 482 | Returns `1` on success, or returns `nil` and a string describing the error otherwise. 483 | 484 | update_upstream 485 | --------------- 486 | 487 | **syntax:** *ok, err = update_upstream(skey, upstream)* 488 | 489 | **phase:** *rewrite_by_lua*, access_by_lua*, content_by_lua*, ngx.timer.** 490 | 491 | Update cluster `skey`. `upstream` is in the same format as `cluster` in `config.lua`. 492 | 493 | Returns `true` on success, or returns `false` and a string describing the error otherwise. 494 | 495 | delete_upstream 496 | --------------- 497 | 498 | **syntax:** *ok, err = delete_upstream(skey)* 499 | 500 | **phase:** *rewrite_by_lua*, access_by_lua*, content_by_lua*, ngx.timer.** 501 | 502 | Delete cluster `skey` from upstream list. 503 | 504 | Returns `true` on success, or returns `false` and a string describing the error otherwise. 505 | 506 | Copyright and License 507 | ===================== 508 | 509 | The bundle itself is licensed under the 2-clause BSD license. 510 | 511 | Copyright (c) 2016, UPYUN(又拍云) Inc. 512 | 513 | This module is licensed under the terms of the BSD license. 514 | 515 | Redistribution and use in source and binary forms, with or without 516 | modification, are permitted provided that the following conditions are 517 | met: 518 | 519 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 520 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 521 | 522 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 523 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 524 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 525 | PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 526 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 527 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 528 | TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 529 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 530 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 531 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 532 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 533 | 534 | See Also 535 | ======== 536 | * [lua-nginx-module](https://github.com/openresty/lua-nginx-module) 537 | -------------------------------------------------------------------------------- /dist.ini: -------------------------------------------------------------------------------- 1 | name=lua-resty-checkups 2 | abstract=Manage Nginx upstreams in pure Lua 3 | author=huangnauh huanglibo2010@gmail.com 4 | is_original=yes 5 | license=2bsd 6 | lib_dir=lib 7 | repo_link=https://github.com/upyun/lua-resty-checkups 8 | version=0.1 9 | 10 | 11 | -------------------------------------------------------------------------------- /lib/resty/checkups.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) 2014-2016 UPYUN, Inc. 2 | 3 | local api = require "resty.checkups.api" 4 | 5 | return api 6 | -------------------------------------------------------------------------------- /lib/resty/checkups/api.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) 2014-2016 UPYUN, Inc. 2 | 3 | local cjson = require "cjson.safe" 4 | 5 | local heartbeat = require "resty.checkups.heartbeat" 6 | local dyconfig = require "resty.checkups.dyconfig" 7 | local base = require "resty.checkups.base" 8 | local try = require "resty.checkups.try" 9 | local subsystem = require "resty.subsystem" 10 | 11 | local str_format = string.format 12 | 13 | local localtime = ngx.localtime 14 | local log = ngx.log 15 | local now = ngx.now 16 | local ERR = ngx.ERR 17 | local WARN = ngx.WARN 18 | local INFO = ngx.INFO 19 | local worker_id = ngx.worker.id 20 | local get_phase = ngx.get_phase 21 | local type = type 22 | local next = next 23 | local pairs = pairs 24 | local ipairs = ipairs 25 | 26 | local get_shm = subsystem.get_shm 27 | local mutex = get_shm("mutex") 28 | local state = get_shm("state") 29 | local shd_config = get_shm("config") 30 | 31 | 32 | local _M = { 33 | _VERSION = "0.20", 34 | STATUS_OK = base.STATUS_OK, STATUS_UNSTABLE = base.STATUS_UNSTABLE, STATUS_ERR = base.STATUS_ERR 35 | } 36 | 37 | 38 | function _M.feedback_status(skey, host, port, failed) 39 | local ups = base.upstream.checkups[skey] 40 | if not ups then 41 | return nil, "unknown skey " .. skey 42 | end 43 | 44 | local srv 45 | for level, cls in pairs(ups.cluster) do 46 | for _, s in ipairs(cls.servers) do 47 | if s.host == host and s.port == port then 48 | srv = s 49 | break 50 | end 51 | end 52 | end 53 | 54 | if not srv then 55 | return nil, "unknown host:port" .. host .. ":" .. port 56 | end 57 | 58 | base.set_srv_status(skey, srv, failed) 59 | return 1 60 | end 61 | 62 | 63 | function _M.ready_ok(skey, callback, opts) 64 | opts = opts or {} 65 | local ups = base.upstream.checkups[skey] 66 | if not ups then 67 | return nil, "unknown skey " .. skey 68 | end 69 | 70 | return try.try_cluster(skey, callback, opts) 71 | end 72 | 73 | 74 | function _M.init(config) 75 | if not config.global.checkup_shd_sync_enable then 76 | return true 77 | end 78 | 79 | local skeys = {} 80 | for skey, ups in pairs(config) do 81 | if type(ups) == "table" and type(ups.cluster) == "table" then 82 | for level, cls in pairs(ups.cluster) do 83 | base.extract_servers_from_upstream(skey, cls) 84 | end 85 | 86 | local key = dyconfig._gen_shd_key(skey) 87 | local ok, err = shd_config:set(key, cjson.encode(base.table_dup(ups))) 88 | if not ok then 89 | return nil, err 90 | end 91 | end 92 | skeys[skey] = 1 93 | end 94 | 95 | local ok, err = shd_config:set(base.SHD_CONFIG_VERSION_KEY, 0) 96 | if not ok then 97 | return nil, err 98 | end 99 | 100 | local ok, err = shd_config:set(base.SKEYS_KEY, cjson.encode(skeys)) 101 | if not ok then 102 | return nil, err 103 | end 104 | 105 | return true 106 | end 107 | 108 | 109 | function _M.prepare_checker(config) 110 | base.upstream.start_time = localtime() 111 | base.upstream.conf_hash = config.global.conf_hash 112 | base.upstream.checkup_timer_interval = config.global.checkup_timer_interval or 5 113 | base.upstream.checkup_timer_overtime = config.global.checkup_timer_overtime or 60 114 | base.upstream.checkups = {} 115 | base.upstream.ups_status_sync_enable = config.global.ups_status_sync_enable 116 | base.upstream.ups_status_timer_interval = config.global.ups_status_timer_interval or 5 117 | base.upstream.checkup_shd_sync_enable = config.global.checkup_shd_sync_enable 118 | base.upstream.shd_config_timer_interval = config.global.shd_config_timer_interval 119 | or base.upstream.checkup_timer_interval 120 | base.upstream.default_heartbeat_enable = config.global.default_heartbeat_enable 121 | 122 | for skey, ups in pairs(config) do 123 | if type(ups) == "table" and type(ups.cluster) == "table" then 124 | base.upstream.checkups[skey] = base.table_dup(ups) 125 | 126 | for level, cls in pairs(base.upstream.checkups[skey].cluster) do 127 | base.extract_servers_from_upstream(skey, cls) 128 | end 129 | end 130 | end 131 | 132 | if base.upstream.checkup_shd_sync_enable then 133 | base.upstream.shd_config_version = 0 134 | end 135 | 136 | base.upstream.initialized = true 137 | end 138 | 139 | 140 | function _M.get_status() 141 | local all_status = {} 142 | for skey in pairs(base.upstream.checkups) do 143 | all_status["cls:" .. skey] = base.get_upstream_status(skey) 144 | end 145 | local last_check_time = state:get(base.CHECKUP_LAST_CHECK_TIME_KEY) or cjson.null 146 | all_status.last_check_time = last_check_time 147 | all_status.checkup_timer_alive = state:get(base.CHECKUP_TIMER_ALIVE_KEY) or false 148 | all_status.start_time = base.upstream.start_time 149 | all_status.conf_hash = base.upstream.conf_hash or cjson.null 150 | all_status.shd_config_version = base.upstream.shd_config_version or cjson.null 151 | 152 | all_status.config_timer = dyconfig.get_timer_key_status() 153 | 154 | return all_status 155 | end 156 | 157 | 158 | function _M.get_ups_timeout(skey) 159 | if not skey then 160 | return 161 | end 162 | 163 | local ups = base.upstream.checkups[skey] 164 | if not ups then 165 | return 166 | end 167 | 168 | local timeout = ups.timeout or 5 169 | return timeout, ups.send_timeout or timeout, ups.read_timeout or timeout 170 | end 171 | 172 | 173 | function _M.create_checker() 174 | local phase = get_phase() 175 | if phase ~= "init_worker" then 176 | error("create_checker must be called in init_worker phase") 177 | end 178 | 179 | if not base.upstream.initialized then 180 | log(ERR, "create checker failed, call prepare_checker in init_by_lua") 181 | return 182 | end 183 | 184 | -- shd config syncer enabled 185 | if base.upstream.shd_config_version then 186 | dyconfig.create_shd_config_syncer() 187 | end 188 | 189 | if base.upstream.ups_status_sync_enable and not base.ups_status_timer_created then 190 | local ok, err = ngx.timer.at(0, base.ups_status_checker) 191 | if not ok then 192 | log(WARN, "failed to create ups_status_checker: ", err) 193 | return 194 | end 195 | base.ups_status_timer_created = true 196 | end 197 | 198 | if not worker_id then 199 | log(ERR, "ngx_http_lua_module version too low, no heartbeat timer will be created") 200 | return 201 | elseif worker_id() ~= 0 then 202 | return 203 | end 204 | 205 | -- only worker 0 will create heartbeat timer 206 | local ok, err = ngx.timer.at(0, heartbeat.active_checkup) 207 | if not ok then 208 | log(WARN, "failed to create timer: ", err) 209 | return 210 | end 211 | 212 | local ckey = base.CHECKUP_TIMER_KEY 213 | local overtime = base.upstream.checkup_timer_overtime 214 | local ok, err = mutex:set(ckey, 1, overtime) 215 | if not ok then 216 | log(WARN, "failed to update shm: ", err) 217 | end 218 | end 219 | 220 | 221 | function _M.select_peer(skey) 222 | return _M.ready_ok(skey, function(host, port) 223 | return { host=host, port=port } 224 | end) 225 | end 226 | 227 | 228 | local function gen_upstream(skey, upstream) 229 | local ups = upstream 230 | if upstream.cluster then 231 | -- all upstream 232 | if type(upstream.cluster) ~= "table" then 233 | return nil, "cluster invalid" 234 | end 235 | else 236 | -- only servers 237 | local dyupstream, err = dyconfig.do_get_upstream(skey) 238 | if err then 239 | return nil, err 240 | end 241 | 242 | dyupstream = dyupstream or {} 243 | dyupstream.cluster = upstream 244 | ups = dyupstream 245 | end 246 | 247 | -- check servers 248 | for level, cls in pairs(ups.cluster) do 249 | if type(cls) ~= "table" or not next(cls) then 250 | return nil, "can not update empty level" 251 | end 252 | 253 | local servers = cls.servers 254 | if type(servers) ~= "table" or not next(servers) then 255 | return nil, "servers invalid" 256 | end 257 | 258 | for _, srv in ipairs(servers) do 259 | local ok, err = dyconfig.check_update_server_args(skey, level, srv) 260 | if not ok then 261 | return nil, err 262 | end 263 | end 264 | end 265 | 266 | return ups 267 | end 268 | 269 | 270 | function _M.get_upstream(skey) 271 | local ups, err 272 | if skey then 273 | ups, err = dyconfig.do_get_upstream(skey) 274 | else 275 | ups, err = dyconfig.do_get_upstreams() 276 | end 277 | 278 | if err then 279 | return nil, err 280 | end 281 | return ups 282 | end 283 | 284 | 285 | function _M.update_upstream(skey, upstream) 286 | if type(upstream) ~= "table" or not next(upstream) then 287 | return false, "can not set empty upstream" 288 | end 289 | 290 | local lock, err = base.get_lock(base.SKEYS_KEY) 291 | if not lock then 292 | log(WARN, "failed to acquire the lock: ", err) 293 | return false, err 294 | end 295 | 296 | local ups, err = gen_upstream(skey, upstream) 297 | local ok = false 298 | if not err then 299 | ok, err = dyconfig.do_update_upstream(skey, ups) 300 | end 301 | 302 | base.release_lock(lock) 303 | 304 | return ok, err 305 | end 306 | 307 | 308 | function _M.delete_upstream(skey) 309 | local lock, ok, err 310 | lock, err = base.get_lock(base.SKEYS_KEY) 311 | if not lock then 312 | log(WARN, "failed to acquire the lock: ", err) 313 | return false, err 314 | end 315 | 316 | ok, err = dyconfig.do_delete_upstream(skey) 317 | 318 | base.release_lock(lock) 319 | 320 | return ok, err 321 | end 322 | 323 | 324 | return _M 325 | -------------------------------------------------------------------------------- /lib/resty/checkups/base.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) 2014-2016 UPYUN, Inc. 2 | 3 | local cjson = require "cjson.safe" 4 | 5 | local lock = require "resty.lock" 6 | local subsystem = require "resty.subsystem" 7 | 8 | local str_format = string.format 9 | local str_sub = string.sub 10 | local str_find = string.find 11 | local str_match = string.match 12 | local tab_insert = table.insert 13 | local unpack = unpack 14 | local tostring = tostring 15 | local ipairs = ipairs 16 | local pairs = pairs 17 | local type = type 18 | 19 | local log = ngx.log 20 | local ERR = ngx.ERR 21 | local WARN = ngx.WARN 22 | local now = ngx.now 23 | 24 | local get_shm = subsystem.get_shm 25 | local get_shm_key = subsystem.get_shm_key 26 | local state = get_shm("state") 27 | 28 | 29 | local _M = { 30 | _VERSION = "0.20", 31 | STATUS_OK = 0, STATUS_UNSTABLE = 1, STATUS_ERR = 2 32 | } 33 | 34 | local ngx_upstream 35 | 36 | local CHECKUP_TIMER_KEY = "checkups:timer" 37 | _M.CHECKUP_TIMER_KEY = CHECKUP_TIMER_KEY 38 | local CHECKUP_LAST_CHECK_TIME_KEY = "checkups:last_check_time" 39 | _M.CHECKUP_LAST_CHECK_TIME_KEY = CHECKUP_LAST_CHECK_TIME_KEY 40 | local CHECKUP_TIMER_ALIVE_KEY = "checkups:timer_alive" 41 | _M.CHECKUP_TIMER_ALIVE_KEY = CHECKUP_TIMER_ALIVE_KEY 42 | local PEER_STATUS_PREFIX = "checkups:peer_status:" 43 | _M.PEER_STATUS_PREFIX = PEER_STATUS_PREFIX 44 | local SHD_CONFIG_VERSION_KEY = "config_version" 45 | _M.SHD_CONFIG_VERSION_KEY = SHD_CONFIG_VERSION_KEY 46 | local SKEYS_KEY = "checkups:skeys" 47 | _M.SKEYS_KEY = SKEYS_KEY 48 | local SHD_CONFIG_PREFIX = "shd_config" 49 | _M.SHD_CONFIG_PREFIX = SHD_CONFIG_PREFIX 50 | 51 | 52 | local upstream = {} 53 | _M.upstream = upstream 54 | local peer_id_dict = {} 55 | 56 | local ups_status_timer_created 57 | _M.ups_status_timer_created = ups_status_timer_created 58 | local cluster_status = {} 59 | _M.cluster_status = cluster_status 60 | 61 | 62 | _M.is_tab = function(t) return type(t) == "table" end 63 | _M.is_str = function(t) return type(t) == "string" end 64 | _M.is_num = function(t) return type(t) == "number" end 65 | _M.is_nul = function(t) return t == nil or t == ngx.null end 66 | 67 | 68 | local function _gen_key(skey, srv) 69 | return str_format("%s:%s:%d", skey, srv.host, srv.port) 70 | end 71 | _M._gen_key = _gen_key 72 | 73 | 74 | local function extract_srv_host_port(name) 75 | local from, to = str_find(name, ":") 76 | if from then 77 | local host = str_sub(name, 1, from - 1) 78 | local port = str_sub(name, to + 1) 79 | host = str_match(host, "^%d+%.%d+%.%d+%.%d+$") 80 | port = str_match(port, "^%d+$") 81 | if host and port then 82 | return host, port 83 | end 84 | else 85 | local host = str_match(name, "^%d+%.%d+%.%d+%.%d+$") 86 | if host then 87 | return host, 80 88 | end 89 | end 90 | end 91 | 92 | 93 | function _M.get_srv_status(skey, srv) 94 | local server_status = cluster_status[skey] 95 | if not server_status then 96 | return _M.STATUS_OK 97 | end 98 | 99 | local srv_key = str_format("%s:%d", srv.host, srv.port) 100 | local srv_status = server_status[srv_key] 101 | local fail_timeout = srv.fail_timeout or 10 102 | 103 | if srv_status and srv_status.lastmodify + fail_timeout > now() then 104 | return srv_status.status 105 | end 106 | 107 | return _M.STATUS_OK 108 | end 109 | 110 | 111 | function _M.set_srv_status(skey, srv, failed) 112 | local server_status = cluster_status[skey] 113 | if not server_status then 114 | server_status = {} 115 | cluster_status[skey] = server_status 116 | end 117 | 118 | -- The default max_fails is 0, which differs from nginx upstream module(1). 119 | local max_fails = srv.max_fails or 0 120 | local fail_timeout = srv.fail_timeout or 10 121 | if max_fails == 0 then -- disables the accounting of attempts 122 | return 123 | end 124 | 125 | local time_now = now() 126 | local srv_key = str_format("%s:%d", srv.host, srv.port) 127 | local srv_status = server_status[srv_key] 128 | if not srv_status then -- first set 129 | srv_status = { 130 | status = _M.STATUS_OK, 131 | failed_count = 0, 132 | lastmodify = time_now 133 | } 134 | server_status[srv_key] = srv_status 135 | elseif srv_status.lastmodify + fail_timeout < time_now then -- srv_status expired 136 | srv_status.status = _M.STATUS_OK 137 | srv_status.failed_count = 0 138 | srv_status.lastmodify = time_now 139 | end 140 | 141 | if failed then 142 | srv_status.failed_count = srv_status.failed_count + 1 143 | 144 | if srv_status.failed_count >= max_fails then 145 | local ups = upstream.checkups[skey] 146 | for level, cls in pairs(ups.cluster) do 147 | for _, s in ipairs(cls.servers) do 148 | local k = str_format("%s:%d", s.host, s.port) 149 | local st = server_status[k] 150 | -- not the last ok server 151 | if not st or st.status == _M.STATUS_OK and k ~= srv_key then 152 | srv_status.status = _M.STATUS_ERR 153 | return 154 | end 155 | end 156 | end 157 | end 158 | end 159 | end 160 | 161 | 162 | function _M.check_res(res, check_opts) 163 | if res then 164 | local typ = check_opts.typ 165 | 166 | if typ == "http" and type(res) == "table" 167 | and res.status then 168 | local status = tostring(res.status) 169 | local http_opts = check_opts.http_opts 170 | if http_opts and http_opts.statuses and 171 | http_opts.statuses[status] == false then 172 | return false 173 | end 174 | end 175 | return true 176 | end 177 | 178 | return false 179 | end 180 | 181 | 182 | function _M.try_server(skey, ups, srv, callback, args, try) 183 | try = try or 1 184 | local peer_key = _gen_key(skey, srv) 185 | local peer_status = cjson.decode(state:get(PEER_STATUS_PREFIX .. peer_key)) 186 | local res, err 187 | 188 | if peer_status == nil or peer_status.status ~= _M.STATUS_ERR then 189 | for i = 1, try, 1 do 190 | res, err = callback(srv.host, srv.port, unpack(args)) 191 | if _M.check_res(res, ups) then 192 | return res 193 | end 194 | end 195 | end 196 | 197 | return nil, err 198 | end 199 | 200 | 201 | function _M.get_lock(key, timeout) 202 | local lock = lock:new(get_shm_key("locks"), {timeout=timeout}) 203 | local elapsed, err = lock:lock(key) 204 | if not elapsed then 205 | log(WARN, "failed to acquire the lock: ", key, ", ", err) 206 | return nil, err 207 | end 208 | 209 | return lock 210 | end 211 | 212 | 213 | function _M.release_lock(lock) 214 | local ok, err = lock:unlock() 215 | if not ok then 216 | log(WARN, "failed to unlock: ", err) 217 | end 218 | end 219 | 220 | 221 | function _M.get_peer_status(skey, srv) 222 | local peer_key = PEER_STATUS_PREFIX .. _gen_key(skey, srv) 223 | local peer_status = state:get(peer_key) 224 | return not _M.is_nul(peer_status) and cjson.decode(peer_status) or nil 225 | end 226 | 227 | 228 | function _M.get_upstream_status(skey) 229 | local ups = upstream.checkups[skey] 230 | if not ups then 231 | return 232 | end 233 | 234 | local ups_status = {} 235 | 236 | for level, cls in pairs(ups.cluster) do 237 | local servers = cls.servers 238 | ups_status[level] = {} 239 | if servers and type(servers) == "table" and #servers > 0 then 240 | for _, srv in ipairs(servers) do 241 | local peer_status = _M.get_peer_status(skey, srv) or {} 242 | peer_status.server = _gen_key(skey, srv) 243 | peer_status["weight"] = srv.weight 244 | peer_status["max_fails"] = srv.max_fails 245 | peer_status["fail_timeout"] = srv.fail_timeout 246 | if ups.enable == false or (ups.enable == nil and 247 | upstream.default_heartbeat_enable == false) then 248 | peer_status.status = "unchecked" 249 | else 250 | if not peer_status.status or 251 | peer_status.status == _M.STATUS_OK then 252 | peer_status.status = "ok" 253 | elseif peer_status.status == _M.STATUS_ERR then 254 | peer_status.status = "err" 255 | else 256 | peer_status.status = "unstable" 257 | end 258 | end 259 | tab_insert(ups_status[level], peer_status) 260 | end 261 | end 262 | end 263 | 264 | return ups_status 265 | end 266 | 267 | 268 | function _M.extract_servers_from_upstream(skey, cls) 269 | local up_key = cls.upstream 270 | if not up_key then 271 | return 272 | end 273 | 274 | cls.servers = cls.servers or {} 275 | 276 | if not ngx_upstream then 277 | local ok 278 | ok, ngx_upstream = pcall(require, "ngx.upstream") 279 | if not ok then 280 | log(ERR, "ngx_upstream_lua module required") 281 | return 282 | end 283 | end 284 | 285 | local ups_backup = cls.upstream_only_backup 286 | local ups_skip_down = cls.upstream_skip_down 287 | local srvs_getter = ngx_upstream.get_primary_peers 288 | if ups_backup then 289 | srvs_getter = ngx_upstream.get_backup_peers 290 | end 291 | local srvs, err = srvs_getter(up_key) 292 | if not srvs and err then 293 | log(ERR, "failed to get servers in upstream ", err) 294 | return 295 | end 296 | 297 | for _, srv in ipairs(srvs) do 298 | if ups_skip_down and srv.down then 299 | goto continue 300 | end 301 | 302 | local host, port = extract_srv_host_port(srv.name) 303 | if not host then 304 | log(ERR, "invalid server name: ", srv.name) 305 | return 306 | end 307 | peer_id_dict[_gen_key(skey, { host = host, port = port })] = { 308 | id = srv.id, backup = ups_backup and true or false} 309 | tab_insert(cls.servers, { 310 | host = host, 311 | port = port, 312 | weight = srv.weight, 313 | max_fails = srv.max_fails, 314 | fail_timeout = srv.fail_timeout, 315 | }) 316 | 317 | ::continue:: 318 | end 319 | end 320 | 321 | 322 | function _M.table_dup(ori_tab) 323 | if type(ori_tab) ~= "table" then 324 | return ori_tab 325 | end 326 | local new_tab = {} 327 | for k, v in pairs(ori_tab) do 328 | if type(v) == "table" then 329 | new_tab[k] = _M.table_dup(v) 330 | else 331 | new_tab[k] = v 332 | end 333 | end 334 | return new_tab 335 | end 336 | 337 | 338 | function _M.ups_status_checker(premature) 339 | if premature then 340 | return 341 | end 342 | 343 | if not ngx_upstream then 344 | local ok 345 | ok, ngx_upstream = pcall(require, "ngx.upstream") 346 | if not ok then 347 | log(ERR, "ngx_upstream_lua module required") 348 | return 349 | end 350 | end 351 | 352 | local ups_status = {} 353 | local names = ngx_upstream.get_upstreams() 354 | -- get current upstream down status 355 | for _, name in ipairs(names) do 356 | local srvs = ngx_upstream.get_primary_peers(name) 357 | for _, srv in ipairs(srvs) do 358 | ups_status[srv.name] = srv.down and _M.STATUS_ERR or _M.STATUS_OK 359 | end 360 | 361 | srvs = ngx_upstream.get_backup_peers(name) 362 | for _, srv in ipairs(srvs) do 363 | ups_status[srv.name] = srv.down and _M.STATUS_ERR or _M.STATUS_OK 364 | end 365 | end 366 | 367 | for skey, ups in pairs(upstream.checkups) do 368 | for level, cls in pairs(ups.cluster) do 369 | if not cls.upstream then 370 | break 371 | end 372 | 373 | for _, srv in pairs(cls.servers) do 374 | local peer_key = _gen_key(skey, srv) 375 | local status_key = PEER_STATUS_PREFIX .. peer_key 376 | 377 | local peer_status, err = state:get(status_key) 378 | if peer_status then 379 | local st = cjson.decode(peer_status) 380 | local up_st = ups_status[srv.host .. ':' .. srv.port] 381 | local unstable = st.status == _M.STATUS_UNSTABLE 382 | if (unstable and up_st == _M.STATUS_ERR) or 383 | (not unstable and up_st and st.status ~= up_st) then 384 | local up_id = peer_id_dict[peer_key] 385 | local down = up_st == _M.STATUS_OK 386 | local ok, err = ngx_upstream.set_peer_down( 387 | cls.upstream, up_id.backup, up_id.id, down) 388 | if not ok then 389 | log(ERR, "failed to set peer down", err) 390 | end 391 | end 392 | elseif err then 393 | log(WARN, "get peer status error ", status_key, " ", err) 394 | end 395 | end 396 | end 397 | end 398 | 399 | local interval = upstream.ups_status_timer_interval 400 | local ok, err = ngx.timer.at(interval, _M.ups_status_checker) 401 | if not ok then 402 | ups_status_timer_created = false 403 | log(WARN, "failed to create ups_status_checker: ", err) 404 | end 405 | end 406 | 407 | 408 | return _M 409 | -------------------------------------------------------------------------------- /lib/resty/checkups/consistent_hash.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) 2014-2016, UPYUN Inc. 2 | 3 | local floor = math.floor 4 | local str_byte = string.byte 5 | local tab_sort = table.sort 6 | local tab_insert = table.insert 7 | 8 | local _M = { _VERSION = "0.11" } 9 | 10 | local MOD = 2 ^ 32 11 | local REPLICAS = 20 12 | local LUCKY_NUM = 13 13 | 14 | 15 | local function hash_string(str) 16 | local key = 0 17 | for i = 1, #str do 18 | key = (key * 31 + str_byte(str, i)) % MOD 19 | end 20 | 21 | return key 22 | end 23 | 24 | 25 | local function init_consistent_hash_state(servers) 26 | local weight_sum = 0 27 | for _, srv in ipairs(servers) do 28 | weight_sum = weight_sum + (srv.weight or 1) 29 | end 30 | 31 | local circle, members = {}, 0 32 | for index, srv in ipairs(servers) do 33 | local key = ("%s:%s"):format(srv.host, srv.port) 34 | local base_hash = hash_string(key) 35 | for c = 1, REPLICAS * weight_sum do 36 | -- TODO: more balance hash 37 | local hash = (base_hash * c * LUCKY_NUM) % MOD 38 | tab_insert(circle, { hash, index }) 39 | end 40 | members = members + 1 41 | end 42 | 43 | tab_sort(circle, function(a, b) return a[1] < b[1] end) 44 | 45 | return { circle = circle, members = members } 46 | end 47 | 48 | 49 | local function binary_search(circle, key) 50 | local size = #circle 51 | local st, ed, mid = 1, size 52 | while st <= ed do 53 | mid = floor((st + ed) / 2) 54 | if circle[mid][1] < key then 55 | st = mid + 1 56 | else 57 | ed = mid - 1 58 | end 59 | end 60 | 61 | return st == size + 1 and 1 or st 62 | end 63 | 64 | 65 | function _M.next_consistent_hash_server(servers, peer_cb, hash_key) 66 | local is_tab = require "resty.checkups.base".is_tab 67 | servers.chash = is_tab(servers.chash) and servers.chash 68 | or init_consistent_hash_state(servers) 69 | 70 | local chash = servers.chash 71 | if chash.members == 1 then 72 | if peer_cb(1, servers[1]) then 73 | return servers[1] 74 | end 75 | 76 | return nil, "consistent hash: no servers available" 77 | end 78 | 79 | local circle = chash.circle 80 | local st = binary_search(circle, hash_string(hash_key)) 81 | local size = #circle 82 | local ed = st + size - 1 83 | for i = st, ed do -- TODO: algorithm O(n) 84 | local idx = circle[(i - 1) % size + 1][2] 85 | if peer_cb(idx, servers[idx]) then 86 | return servers[idx] 87 | end 88 | end 89 | 90 | return nil, "consistent hash: no servers available" 91 | end 92 | 93 | 94 | function _M.free_consitent_hash_server(srv, failed) 95 | return 96 | end 97 | 98 | 99 | return _M 100 | -------------------------------------------------------------------------------- /lib/resty/checkups/dyconfig.lua: -------------------------------------------------------------------------------- 1 | local cjson = require "cjson.safe" 2 | 3 | local base = require "resty.checkups.base" 4 | local subsystem = require "resty.subsystem" 5 | 6 | local worker_id = ngx.worker.id 7 | local worker_count = ngx.worker.count 8 | local update_time = ngx.update_time 9 | 10 | local log = ngx.log 11 | local ERR = ngx.ERR 12 | local WARN = ngx.WARN 13 | local INFO = ngx.INFO 14 | 15 | local str_format = string.format 16 | local type = type 17 | local pairs = pairs 18 | 19 | 20 | local get_shm = subsystem.get_shm 21 | local mutex = get_shm("mutex") 22 | local shd_config = get_shm("config") 23 | 24 | local _M = { 25 | _VERSION = "0.11", 26 | STATUS_OK = base.STATUS_OK, STATUS_UNSTABLE = base.STATUS_UNSTABLE, STATUS_ERR = base.STATUS_ERR 27 | } 28 | 29 | local function _gen_shd_key(skey) 30 | return str_format("%s:%s", base.SHD_CONFIG_PREFIX, skey) 31 | end 32 | _M._gen_shd_key = _gen_shd_key 33 | 34 | 35 | local function shd_config_syncer(premature) 36 | local ckey = base.CHECKUP_TIMER_KEY .. ":shd_config:" .. worker_id() 37 | update_time() 38 | 39 | if premature then 40 | local ok, err = mutex:set(ckey, nil) 41 | if not ok then 42 | log(WARN, "failed to update shm: ", err) 43 | end 44 | return 45 | end 46 | 47 | local interval = base.upstream.shd_config_timer_interval 48 | 49 | local overtime = base.upstream.checkup_timer_overtime 50 | 51 | local lock, err = base.get_lock(base.SKEYS_KEY) 52 | if not lock then 53 | log(WARN, "upstream updating, failed to acquire the lock: ", base.SKEYS_KEY, ", ", err) 54 | local ok, err = ngx.timer.at(interval, shd_config_syncer) 55 | if not ok then 56 | log(ERR, "failed to create timer: ", err) 57 | local ok, err = mutex:set(ckey, nil) 58 | if not ok then 59 | log(ERR, "failed to update shm: ", err) 60 | end 61 | else 62 | local ok, err = mutex:set(ckey, 1, overtime) 63 | if not ok then 64 | log(ERR, "failed to update shm: ", err) 65 | end 66 | end 67 | return 68 | end 69 | 70 | local config_version, err = shd_config:get(base.SHD_CONFIG_VERSION_KEY) 71 | 72 | if config_version and config_version ~= base.upstream.shd_config_version then 73 | local skeys = shd_config:get(base.SKEYS_KEY) 74 | if skeys then 75 | skeys = cjson.decode(skeys) 76 | 77 | -- delete skey from upstream 78 | for skey, _ in pairs(base.upstream.checkups) do 79 | if not skeys[skey] then 80 | base.upstream.checkups[skey] = nil 81 | end 82 | end 83 | 84 | local success = true 85 | for skey, _ in pairs(skeys) do 86 | local shd_servers, err = shd_config:get(_gen_shd_key(skey)) 87 | log(INFO, "get ", skey, " from shm: ", shd_servers) 88 | if shd_servers then 89 | shd_servers = cjson.decode(shd_servers) 90 | base.upstream.checkups[skey] = base.table_dup(shd_servers) 91 | elseif err then 92 | success = false 93 | log(WARN, "failed to get from shm: ", err) 94 | end 95 | end 96 | 97 | if success then 98 | base.upstream.shd_config_version = config_version 99 | end 100 | end 101 | elseif err then 102 | log(WARN, "failed to get config version from shm") 103 | end 104 | 105 | base.release_lock(lock) 106 | 107 | 108 | local ok, err = mutex:set(ckey, 1, overtime) 109 | if not ok then 110 | log(WARN, "failed to update shm: ", err) 111 | end 112 | 113 | local ok, err = ngx.timer.at(interval, shd_config_syncer) 114 | if not ok then 115 | log(ERR, "failed to create timer: ", err) 116 | local ok, err = mutex:set(ckey, nil) 117 | if not ok then 118 | log(WARN, "failed to update shm: ", err) 119 | end 120 | return 121 | end 122 | end 123 | 124 | _M.shd_config_syncer = shd_config_syncer 125 | 126 | 127 | function _M.check_update_server_args(skey, level, server) 128 | if type(skey) ~= "string" then 129 | return false, "skey must be a string" 130 | end 131 | if type(level) ~= "number" and type(level) ~= "string" then 132 | return false, "level must be string or number" 133 | end 134 | if type(server) ~= "table" then 135 | return false, "server must be a table" 136 | end 137 | if not server.host or not server.port then 138 | return false, "no server.host nor server.port found" 139 | end 140 | 141 | return true 142 | end 143 | 144 | 145 | function _M.do_get_upstream(skey) 146 | local skeys = shd_config:get(base.SKEYS_KEY) 147 | if not skeys then 148 | return nil, "no skeys found from shm" 149 | end 150 | 151 | local key = _gen_shd_key(skey) 152 | local shd_servers, err = shd_config:get(key) 153 | if shd_servers then 154 | shd_servers = cjson.decode(shd_servers) 155 | if type(shd_servers) ~= "table" then 156 | return nil 157 | end 158 | 159 | return shd_servers 160 | elseif err then 161 | log(WARN, "failed to get from shm: ", err) 162 | return nil, err 163 | else 164 | log(WARN, "upstream " .. skey .. " not found") 165 | return nil 166 | end 167 | end 168 | 169 | 170 | function _M.do_get_upstreams() 171 | local skeys = shd_config:get(base.SKEYS_KEY) 172 | if not skeys then 173 | return nil, "no skeys found from shm" 174 | end 175 | local upstreams = {} 176 | skeys = cjson.decode(skeys) 177 | for skey, _ in pairs(skeys) do 178 | local shd_servers, err = shd_config:get(_gen_shd_key(skey)) 179 | log(INFO, "get ", skey, " from shm: ", shd_servers) 180 | if shd_servers then 181 | upstreams[skey] = cjson.decode(shd_servers) 182 | elseif err then 183 | log(WARN, "failed to get from shm: ", err) 184 | end 185 | end 186 | return upstreams 187 | end 188 | 189 | 190 | function _M.do_update_upstream(skey, upstream) 191 | local skeys = shd_config:get(base.SKEYS_KEY) 192 | if not skeys then 193 | return false, "no skeys found from shm" 194 | end 195 | 196 | skeys = cjson.decode(skeys) 197 | 198 | local new_ver, ok, err 199 | 200 | new_ver, err = shd_config:incr(base.SHD_CONFIG_VERSION_KEY, 1) 201 | 202 | if err then 203 | log(WARN, "failed to set new version to shm") 204 | return false, err 205 | end 206 | 207 | local key = _gen_shd_key(skey) 208 | ok, err = shd_config:set(key, cjson.encode(upstream)) 209 | 210 | if err then 211 | log(WARN, "failed to set new upstream to shm") 212 | return false, err 213 | end 214 | 215 | -- new skey 216 | if not skeys[skey] then 217 | skeys[skey] = 1 218 | local _, err = shd_config:set(base.SKEYS_KEY, cjson.encode(skeys)) 219 | if err then 220 | log(WARN, "failed to set new skeys to shm") 221 | return false, err 222 | end 223 | log(INFO, "add new skey to upstreams, ", skey) 224 | end 225 | 226 | return true 227 | end 228 | 229 | 230 | function _M.do_delete_upstream(skey) 231 | local skeys = shd_config:get(base.SKEYS_KEY) 232 | if skeys then 233 | skeys = cjson.decode(skeys) 234 | else 235 | return false, "upstream " .. skey .. " not found" 236 | end 237 | 238 | local key = _gen_shd_key(skey) 239 | local shd_servers, err = shd_config:get(key) 240 | if shd_servers then 241 | local new_ver, ok, err 242 | new_ver, err = shd_config:incr(base.SHD_CONFIG_VERSION_KEY, 1) 243 | if err then 244 | log(WARN, "failed to set new version to shm") 245 | return false, err 246 | end 247 | 248 | ok, err = shd_config:delete(key) 249 | if err then 250 | log(WARN, "failed to delete servers in shm") 251 | return false, err 252 | end 253 | 254 | skeys[skey] = nil 255 | 256 | local _, err = shd_config:set(base.SKEYS_KEY, cjson.encode(skeys)) 257 | if err then 258 | log(WARN, "failed to set new skeys to shm") 259 | return false, err 260 | end 261 | 262 | log(INFO, "delete skey from upstreams, ", skey) 263 | 264 | elseif err then 265 | return false, err 266 | else 267 | return false, "upstream " .. skey .. " not found" 268 | end 269 | 270 | return true 271 | end 272 | 273 | 274 | function _M.create_shd_config_syncer() 275 | local ok, err = ngx.timer.at(0, shd_config_syncer) 276 | if not ok then 277 | log(ERR, "failed to create shd_config timer: ", err) 278 | return 279 | end 280 | 281 | local overtime = base.upstream.checkup_timer_overtime 282 | local ckey = base.CHECKUP_TIMER_KEY .. ":shd_config:" .. worker_id() 283 | local ok, err = mutex:set(ckey, 1, overtime) 284 | if not ok then 285 | log(WARN, "failed to update shm: ", err) 286 | end 287 | end 288 | 289 | 290 | function _M.get_timer_key_status() 291 | if not worker_count then 292 | log(WARN, "can not get worker count, please upgrade lua-nginx-module to 0.9.20 or higher") 293 | return 294 | end 295 | 296 | local timer_status = {} 297 | local count = worker_count() 298 | for i=0, count-1 do 299 | local key = "worker-" .. i 300 | local ckey = base.CHECKUP_TIMER_KEY .. ":shd_config:" .. i 301 | local val, err = mutex:get(ckey) 302 | if err then 303 | timer_status[key] = err 304 | elseif val then 305 | timer_status[key] = "alive" 306 | else 307 | timer_status[key] = "dead" 308 | end 309 | end 310 | 311 | return timer_status 312 | end 313 | 314 | 315 | return _M 316 | -------------------------------------------------------------------------------- /lib/resty/checkups/heartbeat.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) 2014-2016 UPYUN, Inc. 2 | 3 | local cjson = require "cjson.safe" 4 | 5 | local base = require "resty.checkups.base" 6 | local subsystem = require "resty.subsystem" 7 | 8 | local str_sub = string.sub 9 | local lower = string.lower 10 | local tab_sort = table.sort 11 | local tab_concat = table.concat 12 | local tab_insert = table.insert 13 | 14 | local re_gmatch = ngx.re.gmatch 15 | local re_find = ngx.re.find 16 | local log = ngx.log 17 | local localtime = ngx.localtime 18 | local ERR = ngx.ERR 19 | local WARN = ngx.WARN 20 | local now = ngx.now 21 | local tcp = ngx.socket.tcp 22 | local update_time = ngx.update_time 23 | 24 | local spawn = ngx.thread.spawn 25 | local wait = ngx.thread.wait 26 | 27 | local get_shm = subsystem.get_shm 28 | local mutex = get_shm("mutex") 29 | local state = get_shm("state") 30 | 31 | local _M = { 32 | _VERSION = "0.11", 33 | STATUS_OK = base.STATUS_OK, STATUS_UNSTABLE = base.STATUS_UNSTABLE, STATUS_ERR = base.STATUS_ERR 34 | } 35 | 36 | local resty_redis, resty_mysql 37 | 38 | 39 | local function update_peer_status(srv, sensibility) 40 | local peer_key = srv.peer_key 41 | local status_key = base.PEER_STATUS_PREFIX .. peer_key 42 | local status_str, err = state:get(status_key) 43 | 44 | if err then 45 | log(ERR, "get old status ", status_key, " ", err) 46 | return 47 | end 48 | 49 | local old_status, err 50 | if status_str then 51 | old_status, err = cjson.decode(status_str) 52 | if err then 53 | log(WARN, "decode old status error: ", err) 54 | end 55 | end 56 | 57 | if not old_status then 58 | old_status = { 59 | status = _M.STATUS_OK, 60 | fail_num = 0, 61 | lastmodified = localtime(), 62 | } 63 | end 64 | 65 | local status = srv.status 66 | if status == _M.STATUS_OK then 67 | if old_status.status ~= _M.STATUS_OK then 68 | old_status.lastmodified = localtime() 69 | old_status.status = _M.STATUS_OK 70 | end 71 | old_status.fail_num = 0 72 | else -- status == _M.STATUS_ERR or _M.STATUS_UNSTABLE 73 | old_status.fail_num = old_status.fail_num + 1 74 | 75 | if old_status.status ~= status and 76 | old_status.fail_num >= sensibility then 77 | old_status.status = status 78 | old_status.lastmodified = localtime() 79 | end 80 | end 81 | 82 | for k, v in pairs(srv.statuses) do 83 | old_status[k] = v 84 | end 85 | 86 | local ok, err = state:set(status_key, cjson.encode(old_status)) 87 | if not ok then 88 | log(ERR, "failed to set new status ", err) 89 | end 90 | end 91 | 92 | 93 | local function update_upstream_status(ups_status, sensibility) 94 | if not ups_status then 95 | return 96 | end 97 | 98 | for _, srv in ipairs(ups_status) do 99 | update_peer_status(srv, sensibility) 100 | end 101 | end 102 | 103 | 104 | local heartbeat = { 105 | general = function (host, port, ups) 106 | local id = host .. ':' .. port 107 | 108 | local sock = tcp() 109 | sock:settimeout(ups.timeout * 1000) 110 | local ok, err = sock:connect(host, port) 111 | if not ok then 112 | log(ERR, "failed to connect: ", id, ", ", err) 113 | return _M.STATUS_ERR, err 114 | end 115 | 116 | sock:setkeepalive() 117 | 118 | return _M.STATUS_OK 119 | end, 120 | 121 | redis = function (host, port, ups) 122 | local id = host .. ':' .. port 123 | 124 | if not resty_redis then 125 | local ok 126 | ok, resty_redis = pcall(require, "resty.redis") 127 | if not ok then 128 | log(ERR, "failed to require resty.redis") 129 | return _M.STATUS_ERR, "failed to require resty.redis" 130 | end 131 | end 132 | 133 | local red, err = resty_redis:new() 134 | if not red then 135 | log(WARN, "failed to new redis: ", err) 136 | return _M.STATUS_ERR, err 137 | end 138 | 139 | red:set_timeout(ups.timeout * 1000) 140 | 141 | local redis_err = { status = _M.STATUS_ERR, replication = cjson.null } 142 | local ok, err = red:connect(host, port) 143 | if not ok then 144 | log(ERR, "failed to connect redis: ", id, ", ", err) 145 | return redis_err, err 146 | end 147 | 148 | local res, err = red:ping() 149 | if not res then 150 | log(ERR, "failed to ping redis: ", id, ", ", err) 151 | return redis_err, err 152 | end 153 | 154 | local replication = {} 155 | local statuses = { 156 | status = _M.STATUS_OK , 157 | replication = replication 158 | } 159 | 160 | local res, got_all_info = {}, false 161 | 162 | local info, err = red:info("replication") 163 | if not info then 164 | info, err = red:info() 165 | if not info then 166 | replication.err = err 167 | return statuses 168 | end 169 | 170 | got_all_info = true 171 | end 172 | 173 | tab_insert(res, info) 174 | 175 | if not got_all_info then 176 | local info, err = red:info("server") 177 | if info then 178 | tab_insert(res, info) 179 | end 180 | end 181 | 182 | res = tab_concat(res) 183 | 184 | red:set_keepalive(10000, 100) 185 | 186 | local iter, err = re_gmatch(res, [[([a-zA-Z_]+):(.+?)\r\n]], "jo") 187 | if not iter then 188 | replication.err = err 189 | return statuses 190 | end 191 | 192 | local replication_field = { 193 | role = true, 194 | master_host = true, 195 | master_port = true, 196 | master_link_status = true, 197 | master_link_down_since_seconds = true, 198 | master_last_io_seconds_ago = true, 199 | } 200 | 201 | local other_field = { 202 | redis_version = true, 203 | } 204 | 205 | while true do 206 | local m, err = iter() 207 | if err then 208 | replication.err = err 209 | return statuses 210 | end 211 | 212 | if not m then 213 | break 214 | end 215 | 216 | if replication_field[lower(m[1])] then 217 | replication[m[1]] = m[2] 218 | end 219 | 220 | if other_field[lower(m[1])] then 221 | statuses[m[1]] = m[2] 222 | end 223 | end 224 | 225 | if replication.master_link_status == "down" then 226 | statuses.status = _M.STATUS_UNSTABLE 227 | statuses.msg = "master link status: down" 228 | end 229 | 230 | return statuses 231 | end, 232 | 233 | mysql = function (host, port, ups) 234 | local id = host .. ':' .. port 235 | 236 | if not resty_mysql then 237 | local ok 238 | ok, resty_mysql = pcall(require, "resty.mysql") 239 | if not ok then 240 | log(ERR, "failed to require resty.mysql") 241 | return _M.STATUS_ERR, "failed to require resty.mysql" 242 | end 243 | end 244 | 245 | local db, err = resty_mysql:new() 246 | if not db then 247 | log(WARN, "failed to new mysql: ", err) 248 | return _M.STATUS_ERR, err 249 | end 250 | 251 | db:set_timeout(ups.timeout * 1000) 252 | 253 | local ok, err, errno, sqlstate = db:connect{ 254 | host = host, 255 | port = port, 256 | database = ups.name, 257 | user = ups.user, 258 | password = ups.pass, 259 | charset = ups.charset, 260 | max_packet_size = 1024*1024 261 | } 262 | 263 | if not ok then 264 | log(ERR, "failed to connect: ", id, ", ", err, ": ", errno, " ", sqlstate) 265 | return _M.STATUS_ERR, err 266 | end 267 | 268 | db:set_keepalive(10000, 100) 269 | 270 | return _M.STATUS_OK 271 | end, 272 | 273 | http = function(host, port, ups) 274 | local id = host .. ':' .. port 275 | 276 | local sock, err = tcp() 277 | if not sock then 278 | log(WARN, "failed to create sock: ", err) 279 | return _M.STATUS_ERR, err 280 | end 281 | 282 | sock:settimeout(ups.timeout * 1000) 283 | local ok, err = sock:connect(host, port) 284 | if not ok then 285 | log(ERR, "failed to connect: ", id, ", ", err) 286 | return _M.STATUS_ERR, err 287 | end 288 | 289 | local opts = ups.http_opts or {} 290 | 291 | local req = opts.query 292 | if not req then 293 | sock:setkeepalive() 294 | return _M.STATUS_OK 295 | end 296 | 297 | local bytes, err = sock:send(req) 298 | if not bytes then 299 | log(ERR, "failed to send request to: ", id, ", ", err) 300 | return _M.STATUS_ERR, err 301 | end 302 | 303 | local readline = sock:receiveuntil("\r\n") 304 | local status_line, err = readline() 305 | if not status_line then 306 | log(ERR, "failed to receive status line from: ", id, ", ", err) 307 | return _M.STATUS_ERR, err 308 | end 309 | 310 | local statuses = opts.statuses 311 | if statuses then 312 | local from, to, err = re_find(status_line, 313 | [[^HTTP/\d+\.\d+\s+(\d+)]], "joi", nil, 1) 314 | if not from then 315 | log(ERR, "bad status line from: ", id, ", ", err) 316 | return _M.STATUS_ERR, err 317 | end 318 | 319 | local status = str_sub(status_line, from, to) 320 | if statuses[status] == false then 321 | return _M.STATUS_ERR, "bad status code" 322 | end 323 | end 324 | 325 | sock:setkeepalive() 326 | 327 | return _M.STATUS_OK 328 | end, 329 | } 330 | 331 | 332 | local function cluster_heartbeat(skey) 333 | local ups = base.upstream.checkups[skey] 334 | if ups.enable == false or (ups.enable == nil and 335 | base.upstream.default_heartbeat_enable == false) then 336 | return 337 | end 338 | 339 | local ups_typ = ups.typ or "general" 340 | local ups_heartbeat = ups.heartbeat 341 | local ups_sensi = ups.sensibility or 1 342 | local ups_protected = true 343 | if ups.protected == false then 344 | ups_protected = false 345 | end 346 | 347 | ups.timeout = ups.timeout or 5 348 | 349 | local server_count = 0 350 | for level, cls in pairs(ups.cluster) do 351 | if cls.servers and #cls.servers > 0 then 352 | server_count = server_count + #cls.servers 353 | end 354 | end 355 | 356 | local error_count = 0 357 | local unstable_count = 0 358 | local srv_available = false 359 | local ups_status = {} 360 | for level, cls in pairs(ups.cluster) do 361 | for _, srv in ipairs(cls.servers) do 362 | local peer_key = base._gen_key(skey, srv) 363 | local cb_heartbeat = ups_heartbeat or heartbeat[ups_typ] or 364 | heartbeat["general"] 365 | local statuses, err = cb_heartbeat(srv.host, srv.port, ups) 366 | 367 | local status 368 | if type(statuses) == "table" then 369 | status = statuses.status 370 | statuses.status = nil 371 | else 372 | status = statuses 373 | statuses = {} 374 | end 375 | 376 | if not statuses.msg then 377 | statuses.msg = err or cjson.null 378 | end 379 | 380 | local srv_status = { 381 | peer_key = peer_key , 382 | status = status , 383 | statuses = statuses , 384 | } 385 | 386 | if status == _M.STATUS_OK then 387 | update_peer_status(srv_status, ups_sensi) 388 | srv_status.updated = true 389 | srv_available = true 390 | if next(ups_status) then 391 | for _, v in ipairs(ups_status) do 392 | if v.status == _M.STATUS_UNSTABLE then 393 | v.status = _M.STATUS_ERR 394 | end 395 | update_peer_status(v, ups_sensi) 396 | end 397 | ups_status = {} 398 | end 399 | end 400 | 401 | if status == _M.STATUS_ERR then 402 | error_count = error_count + 1 403 | if srv_available then 404 | update_peer_status(srv_status, ups_sensi) 405 | srv_status.updated = true 406 | end 407 | end 408 | 409 | if status == _M.STATUS_UNSTABLE then 410 | unstable_count = unstable_count + 1 411 | if srv_available then 412 | srv_status.status = _M.STATUS_ERR 413 | update_peer_status(srv_status, ups_sensi) 414 | srv_status.updated = true 415 | end 416 | end 417 | 418 | if srv_status.updated ~= true then 419 | tab_insert(ups_status, srv_status) 420 | end 421 | end 422 | end 423 | 424 | if next(ups_status) then 425 | if error_count == server_count then 426 | if ups_protected then 427 | ups_status[1].status = _M.STATUS_UNSTABLE 428 | end 429 | elseif error_count + unstable_count == server_count then 430 | tab_sort(ups_status, function(a, b) return a.status < b.status end) 431 | end 432 | 433 | update_upstream_status(ups_status, ups_sensi) 434 | end 435 | end 436 | 437 | 438 | function _M.active_checkup(premature) 439 | local ckey = base.CHECKUP_TIMER_KEY 440 | 441 | update_time() -- flush cache time 442 | 443 | if premature then 444 | local ok, err = mutex:set(ckey, nil) 445 | if not ok then 446 | log(WARN, "failed to update shm: ", err) 447 | end 448 | return 449 | end 450 | 451 | local thread = {} 452 | for skey in pairs(base.upstream.checkups) do 453 | thread[#thread + 1] = spawn(cluster_heartbeat, skey) 454 | end 455 | 456 | for _,v in ipairs(thread) do 457 | if v then 458 | wait(v) 459 | end 460 | end 461 | 462 | local interval = base.upstream.checkup_timer_interval 463 | local overtime = base.upstream.checkup_timer_overtime 464 | 465 | state:set(base.CHECKUP_LAST_CHECK_TIME_KEY, localtime()) 466 | state:set(base.CHECKUP_TIMER_ALIVE_KEY, true, overtime) 467 | 468 | local ok, err = mutex:set(ckey, 1, overtime) 469 | if not ok then 470 | log(WARN, "failed to update shm: ", err) 471 | end 472 | 473 | local ok, err = ngx.timer.at(interval, _M.active_checkup) 474 | if not ok then 475 | log(WARN, "failed to create timer: ", err) 476 | local ok, err = mutex:set(ckey, nil) 477 | if not ok then 478 | log(WARN, "failed to update shm: ", err) 479 | end 480 | return 481 | end 482 | end 483 | 484 | 485 | return _M 486 | -------------------------------------------------------------------------------- /lib/resty/checkups/round_robin.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) 2014-2016, UPYUN Inc. 2 | 3 | local ceil = math.ceil 4 | 5 | local _M = { _VERSION = "0.11" } 6 | 7 | 8 | --[[ 9 | parameters: 10 | - (table) servers 11 | - (function) peer_cb(index, server) 12 | return: 13 | - (table) server 14 | - (string) error 15 | --]] 16 | function _M.next_round_robin_server(servers, peer_cb) 17 | local srvs_cnt = #servers 18 | 19 | if srvs_cnt == 1 then 20 | if peer_cb(1, servers[1]) then 21 | return servers[1], nil 22 | end 23 | 24 | return nil, "round robin: no servers available" 25 | end 26 | 27 | -- select round robin server 28 | local best 29 | local max_weight 30 | local weight_sum = 0 31 | for idx = 1, srvs_cnt do 32 | local srv = servers[idx] 33 | -- init round robin state 34 | srv.weight = srv.weight or 1 35 | srv.effective_weight = srv.effective_weight or srv.weight 36 | srv.current_weight = srv.current_weight or 0 37 | 38 | if peer_cb(idx, srv) then 39 | srv.current_weight = srv.current_weight + srv.effective_weight 40 | weight_sum = weight_sum + srv.effective_weight 41 | 42 | if srv.effective_weight < srv.weight then 43 | srv.effective_weight = srv.effective_weight + 1 44 | end 45 | 46 | if not max_weight or srv.current_weight > max_weight then 47 | max_weight = srv.current_weight 48 | best = srv 49 | end 50 | end 51 | end 52 | 53 | if not best then 54 | return nil, "round robin: no servers available" 55 | end 56 | 57 | best.current_weight = best.current_weight - weight_sum 58 | 59 | return best, nil 60 | end 61 | 62 | 63 | 64 | function _M.free_round_robin_server(srv, failed) 65 | if not failed then 66 | return 67 | end 68 | 69 | srv.effective_weight = ceil((srv.effective_weight or 1) / 2) 70 | end 71 | 72 | 73 | return _M 74 | -------------------------------------------------------------------------------- /lib/resty/checkups/try.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) 2014-2016, UPYUN Inc. 2 | 3 | local cjson = require "cjson.safe" 4 | local round_robin = require "resty.checkups.round_robin" 5 | local consistent_hash = require "resty.checkups.consistent_hash" 6 | local base = require "resty.checkups.base" 7 | 8 | local max = math.max 9 | local sqrt = math.sqrt 10 | local floor = math.floor 11 | local tab_insert = table.insert 12 | local tostring = tostring 13 | 14 | local update_time = ngx.update_time 15 | local now = ngx.now 16 | 17 | local _M = { _VERSION = "0.11" } 18 | 19 | local is_tab = base.is_tab 20 | 21 | local NEED_RETRY = 0 22 | local REQUEST_SUCCESS = 1 23 | local EXCESS_TRY_LIMIT = 2 24 | 25 | 26 | local function prepare_callbacks(skey, opts) 27 | local ups = base.upstream.checkups[skey] 28 | 29 | -- calculate count of cluster and server 30 | local cls_keys = {} -- string key or number level 31 | local srvs_cnt = 0 32 | if is_tab(opts.cluster_key) then -- specify try cluster 33 | for _, cls_key in ipairs(opts.cluster_key) do 34 | local cls = ups.cluster[cls_key] 35 | if is_tab(cls) then 36 | tab_insert(cls_keys, cls_key) 37 | srvs_cnt = srvs_cnt + #cls.servers 38 | end 39 | end 40 | else -- default try cluster 41 | for cls_key, cls in pairs(ups.cluster) do 42 | tab_insert(cls_keys, cls_key) 43 | srvs_cnt = srvs_cnt + #cls.servers 44 | end 45 | end 46 | 47 | 48 | -- get next level cluster 49 | local cls_key 50 | local cls_index = 0 51 | local cls_cnt = #cls_keys 52 | local next_cluster_cb = function() 53 | cls_index = cls_index + 1 54 | if cls_index > cls_cnt then 55 | return 56 | end 57 | 58 | cls_key = cls_keys[cls_index] 59 | return ups.cluster[cls_key] 60 | end 61 | 62 | 63 | -- get next select server 64 | local mode = ups.mode 65 | local next_server_func = round_robin.next_round_robin_server 66 | local key 67 | if mode ~= nil then 68 | if mode == "hash" then 69 | key = opts.hash_key or ngx.var.uri 70 | elseif mode == "url_hash" then 71 | key = ngx.var.uri 72 | elseif mode == "ip_hash" then 73 | key = ngx.var.remote_addr 74 | elseif mode == "header_hash" then 75 | key = ngx.var.http_x_hash_key or ngx.var.uri 76 | end 77 | 78 | next_server_func = consistent_hash.next_consistent_hash_server 79 | end 80 | local next_server_cb = function(servers, peer_cb) 81 | return next_server_func(servers, peer_cb, key) 82 | end 83 | 84 | 85 | -- check whether ther server is available 86 | local bad_servers = {} 87 | local peer_cb = function(index, srv) 88 | local key = ("%s:%s:%s"):format(cls_key, srv.host, srv.port) 89 | if bad_servers[key] then 90 | return false 91 | end 92 | 93 | if ups.enable == false or (ups.enable == nil 94 | and base.upstream.default_heartbeat_enable == false) then 95 | return base.get_srv_status(skey, srv) == base.STATUS_OK 96 | end 97 | 98 | local peer_status = base.get_peer_status(skey, srv) 99 | if (not peer_status or peer_status.status ~= base.STATUS_ERR) 100 | and base.get_srv_status(skey, srv) == base.STATUS_OK then 101 | return true 102 | end 103 | end 104 | 105 | 106 | -- check whether need retry 107 | local statuses 108 | if ups.typ == "http" and is_tab(ups.http_opts) then 109 | statuses = ups.http_opts.statuses 110 | end 111 | local try_cnt = 0 112 | local try_limit = opts.try or ups.try or srvs_cnt 113 | local retry_cb = function(res) 114 | if is_tab(res) and res.status and is_tab(statuses) then 115 | if statuses[tostring(res.status)] ~= false then 116 | return REQUEST_SUCCESS 117 | end 118 | elseif res then 119 | return REQUEST_SUCCESS 120 | end 121 | 122 | try_cnt = try_cnt + 1 123 | if try_cnt >= try_limit then 124 | return EXCESS_TRY_LIMIT 125 | end 126 | 127 | return NEED_RETRY 128 | end 129 | 130 | 131 | -- check whether try_time has over amount_request_time 132 | local try_time = 0 133 | local try_time_limit = opts.try_timeout or ups.try_timeout or 0 134 | local try_time_cb = function(this_time_try_time) 135 | try_time = try_time + this_time_try_time 136 | if try_time_limit == 0 then 137 | return NEED_RETRY 138 | elseif try_time >= try_time_limit then 139 | return EXCESS_TRY_LIMIT 140 | end 141 | 142 | return NEED_RETRY 143 | end 144 | 145 | 146 | -- set some status 147 | local free_server_func = round_robin.free_round_robin_server 148 | if mode == "hash" then 149 | free_server_func = consistent_hash.free_consitent_hash_server 150 | end 151 | local set_status_cb = function(srv, failed) 152 | local key = ("%s:%s:%s"):format(cls_key, srv.host, srv.port) 153 | bad_servers[key] = failed 154 | base.set_srv_status(skey, srv, failed) 155 | free_server_func(srv, failed) 156 | end 157 | 158 | 159 | return { 160 | next_cluster_cb = next_cluster_cb, 161 | next_server_cb = next_server_cb, 162 | retry_cb = retry_cb, 163 | peer_cb = peer_cb, 164 | set_status_cb = set_status_cb, 165 | try_time_cb = try_time_cb, 166 | } 167 | end 168 | 169 | 170 | 171 | --[[ 172 | parameters: 173 | - (string) skey 174 | - (function) request_cb(host, port) 175 | - (table) opts 176 | - (number) try 177 | - (table) cluster_key 178 | - (string) hash_key 179 | return: 180 | - (string) result 181 | - (string) error 182 | --]] 183 | function _M.try_cluster(skey, request_cb, opts) 184 | local callbacks = prepare_callbacks(skey, opts) 185 | 186 | local next_cluster_cb = callbacks.next_cluster_cb 187 | local next_server_cb = callbacks.next_server_cb 188 | local peer_cb = callbacks.peer_cb 189 | local retry_cb = callbacks.retry_cb 190 | local set_status_cb = callbacks.set_status_cb 191 | local try_time_cb = callbacks.try_time_cb 192 | 193 | -- iter servers function 194 | local itersrvs = function(servers, peer_cb) 195 | return function() return next_server_cb(servers, peer_cb) end 196 | end 197 | 198 | local res, err = nil, "no servers available" 199 | repeat 200 | -- get next level/key cluster 201 | local cls = next_cluster_cb() 202 | if not cls then 203 | break 204 | end 205 | 206 | for srv, err in itersrvs(cls.servers, peer_cb) do 207 | -- exec request callback by server 208 | local start_time = now() 209 | res, err = request_cb(srv.host, srv.port) 210 | 211 | -- check whether need retry 212 | local end_time = now() 213 | local delta_time = end_time - start_time 214 | 215 | local feedback = retry_cb(res) 216 | set_status_cb(srv, feedback ~= REQUEST_SUCCESS) -- set some status 217 | if feedback ~= NEED_RETRY then 218 | return res, err 219 | end 220 | 221 | local feedback_try_time = try_time_cb(delta_time) 222 | if feedback_try_time ~= NEED_RETRY then 223 | return nil, "try_timeout excceed" 224 | end 225 | end 226 | until false 227 | 228 | return res, err 229 | end 230 | 231 | 232 | return _M 233 | -------------------------------------------------------------------------------- /lib/resty/subsystem.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (C) 2017 Libo Huang (huangnauh), UPYUN Inc. 2 | 3 | local ngx_subsystem = ngx.config.subsystem 4 | local str_format = string.format 5 | local _M = {} 6 | 7 | local function get_shm_key(key) 8 | if ngx_subsystem == "http" then 9 | return key 10 | else 11 | return str_format("%s_%s", ngx_subsystem, key) 12 | end 13 | end 14 | 15 | function _M.get_shm(key) 16 | local shm_key = get_shm_key(key) 17 | return ngx.shared[shm_key] 18 | end 19 | 20 | _M.get_shm_key = get_shm_key 21 | 22 | return _M 23 | -------------------------------------------------------------------------------- /lua-resty-checkups+API.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/upyun/lua-resty-checkups/abbbc08558a7b716eba49c1cf3cc350a9f0255d2/lua-resty-checkups+API.png -------------------------------------------------------------------------------- /lua-resty-checkups-0.1-0.rockspec: -------------------------------------------------------------------------------- 1 | package = 'lua-resty-checkups' 2 | version = '0.1-0' 3 | source = { 4 | url = "git://github.com/upyun/lua-resty-checkups", 5 | tag = "v0.1", 6 | } 7 | 8 | description = { 9 | summary = "Manage Nginx upstreams in pure ngx_lua", 10 | detailed = "Manage Nginx upstreams in pure ngx_lua", 11 | license = "2-clause BSD", 12 | homepage = "https://github.com/upyun/lua-resty-checkups", 13 | maintainer = "huangnauh (https://github.com/huangnauh)", 14 | } 15 | 16 | dependencies = { 17 | 'lua >= 5.1', 18 | } 19 | 20 | build = { 21 | type = "builtin", 22 | modules = { 23 | ["resty.checkups"] = "lib/resty/checkups.lua", 24 | ["resty.subsystem"] = "lib/resty/subsystem.lua", 25 | ["resty.checkups.api"] = "lib/resty/checkups/api.lua", 26 | ["resty.checkups.base"] = "lib/resty/checkups/base.lua", 27 | ["resty.checkups.consistent_hash"] = "lib/resty/checkups/consistent_hash.lua", 28 | ["resty.checkups.dyconfig"] = "lib/resty/checkups/dyconfig.lua", 29 | ["resty.checkups.heartbeat"] = "lib/resty/checkups/heartbeat.lua", 30 | ["resty.checkups.round_robin"] = "lib/resty/checkups/round_robin.lua", 31 | ["resty.checkups.try"] = "lib/resty/checkups/try.lua", 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /t/cluster_key.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | location = /status { 25 | return 200; 26 | } 27 | } 28 | 29 | server { 30 | listen 12355; 31 | location = /status { 32 | return 502; 33 | } 34 | } 35 | 36 | server { 37 | listen 12356; 38 | location = /status { 39 | return 404; 40 | } 41 | } 42 | 43 | server { 44 | listen 12357; 45 | location = /status { 46 | content_by_lua ' 47 | ngx.sleep(3) 48 | ngx.status = 200 49 | '; 50 | } 51 | } 52 | 53 | init_by_lua ' 54 | local config = require "config_key" 55 | local checkups = require "resty.checkups" 56 | checkups.init(config) 57 | '; 58 | 59 | init_worker_by_lua ' 60 | local config = require "config_key" 61 | local checkups = require "resty.checkups" 62 | checkups.prepare_checker(config) 63 | checkups.create_checker() 64 | '; 65 | 66 | }; 67 | 68 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 69 | $ENV{TEST_NGINX_USE_HUP} = 1; 70 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 71 | #no_diff(); 72 | no_long_string(); 73 | 74 | run_tests(); 75 | 76 | __DATA__ 77 | 78 | === TEST 1: http 79 | --- http_config eval: $::HttpConfig 80 | --- config 81 | location = /t { 82 | access_log off; 83 | content_by_lua ' 84 | local checkups = require "resty.checkups" 85 | ngx.sleep(4) 86 | local cb_ok = function(host, port) 87 | ngx.say(host .. ":" .. port) 88 | return 1 89 | end 90 | 91 | local ok, err = checkups.ready_ok("upyun", cb_ok, {cluster_key = {"c1", "c2"}}) 92 | if err then 93 | ngx.say(err) 94 | end 95 | local ok, err = checkups.ready_ok("upyun", cb_ok, {cluster_key = {"c1", "c2"}}) 96 | if err then 97 | ngx.say(err) 98 | end 99 | local ok, err = checkups.ready_ok("upyun", cb_ok, {cluster_key = {"c1", "c2"}}) 100 | if err then 101 | ngx.say(err) 102 | end 103 | '; 104 | } 105 | --- request 106 | GET /t 107 | --- response_body 108 | 127.0.0.1:12354 109 | 127.0.0.1:12356 110 | 127.0.0.1:12354 111 | --- grep_error_log eval: qr/failed to connect: 127.0.0.1:\d+, connection refused|failed to receive status line from: 127.0.0.1:\d+, timeout/ 112 | --- grep_error_log_out 113 | failed to receive status line from: 127.0.0.1:12357, timeout 114 | failed to receive status line from: 127.0.0.1:12357, timeout 115 | --- timeout: 10 116 | 117 | 118 | === TEST 2: fail with status code 119 | --- http_config eval: $::HttpConfig 120 | --- config 121 | location = /t { 122 | access_log off; 123 | content_by_lua ' 124 | local checkups = require "resty.checkups" 125 | ngx.sleep(4) 126 | local cb = function(host, port) 127 | ngx.say(host .. ":" .. port) 128 | return {status = 502} 129 | end 130 | 131 | local ok, err = checkups.ready_ok("upyun", cb, {cluster_key = {"c1", "c2"}}) 132 | if err then 133 | ngx.say(err) 134 | end 135 | '; 136 | } 137 | --- request 138 | GET /t 139 | --- response_body 140 | 127.0.0.1:12354 141 | 127.0.0.1:12356 142 | 127.0.0.1:12356 143 | 127.0.0.1:12354 144 | no servers available 145 | --- grep_error_log eval: qr/failed to connect: 127.0.0.1:\d+, connection refused|failed to receive status line from: 127.0.0.1:\d+, timeout/ 146 | --- grep_error_log_out 147 | failed to receive status line from: 127.0.0.1:12357, timeout 148 | failed to receive status line from: 127.0.0.1:12357, timeout 149 | --- timeout: 10 150 | 151 | 152 | === TEST 3: backup cluster 153 | --- http_config eval: $::HttpConfig 154 | --- config 155 | location = /t { 156 | access_log off; 157 | content_by_lua ' 158 | local checkups = require "resty.checkups" 159 | ngx.sleep(4) 160 | local cb = function(host, port) 161 | ngx.say(host .. ":" .. port) 162 | return {status = 502} 163 | end 164 | 165 | local ok, err = checkups.ready_ok("upyun", cb, {cluster_key = {"c1", "c2"}}) 166 | if err then 167 | ngx.say(err) 168 | end 169 | '; 170 | } 171 | --- request 172 | GET /t 173 | --- response_body 174 | 127.0.0.1:12354 175 | 127.0.0.1:12356 176 | 127.0.0.1:12356 177 | 127.0.0.1:12354 178 | no servers available 179 | --- grep_error_log eval: qr/failed to connect: 127.0.0.1:\d+, connection refused|failed to receive status line from: 127.0.0.1:\d+, timeout/ 180 | --- grep_error_log_out 181 | failed to receive status line from: 127.0.0.1:12357, timeout 182 | failed to receive status line from: 127.0.0.1:12357, timeout 183 | --- timeout: 10 184 | -------------------------------------------------------------------------------- /t/consistent.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | location = /status { 25 | return 200; 26 | } 27 | } 28 | 29 | server { 30 | listen 12355; 31 | location = /status { 32 | return 502; 33 | } 34 | } 35 | 36 | server { 37 | listen 12356; 38 | location = /status { 39 | return 404; 40 | } 41 | } 42 | 43 | server { 44 | listen 12360; 45 | location = /status { 46 | return 200; 47 | } 48 | } 49 | 50 | server { 51 | listen 12361; 52 | location = /status { 53 | return 200; 54 | } 55 | } 56 | 57 | init_by_lua ' 58 | local config = require "config_hash" 59 | local checkups = require "resty.checkups" 60 | checkups.init(config) 61 | '; 62 | 63 | init_worker_by_lua ' 64 | local config = require "config_hash" 65 | local checkups = require "resty.checkups" 66 | checkups.prepare_checker(config) 67 | checkups.create_checker() 68 | '; 69 | 70 | }; 71 | 72 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 73 | $ENV{TEST_NGINX_USE_HUP} = 1; 74 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 75 | #no_diff(); 76 | no_long_string(); 77 | 78 | run_tests(); 79 | 80 | __DATA__ 81 | 82 | === TEST 1: consistent hash 83 | --- http_config eval: $::HttpConfig 84 | --- config 85 | location = /t { 86 | access_log off; 87 | content_by_lua ' 88 | local checkups = require "resty.checkups" 89 | ngx.sleep(1) 90 | local cb_ok = function(host, port) 91 | ngx.say(host .. ":" .. port) 92 | return 1 93 | end 94 | 95 | local ok, err = checkups.ready_ok("hash", cb_ok, {hash_key = "/ab"}) 96 | local ok, err = checkups.ready_ok("hash", cb_ok, {hash_key = "/ab"}) 97 | local ok, err = checkups.ready_ok("hash", cb_ok, {hash_key = "/abc"}) 98 | local ok, err = checkups.ready_ok("hash", cb_ok, {hash_key = "/abc"}) 99 | '; 100 | } 101 | --- request 102 | GET /t 103 | --- response_body 104 | 127.0.0.1:12354 105 | 127.0.0.1:12354 106 | 127.0.0.1:12354 107 | 127.0.0.1:12354 108 | -------------------------------------------------------------------------------- /t/dyconfig.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et: 2 | 3 | use lib 'lib'; 4 | use Test::Nginx::Socket; 5 | use Cwd qw(cwd); 6 | use Test::Nginx::Socket 'no_plan'; 7 | 8 | workers(4); 9 | master_process_enabled(1); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | lua_shared_dict config 10m; 22 | 23 | server { 24 | listen 12350; 25 | return 200 12350; 26 | } 27 | 28 | server { 29 | listen 12351; 30 | return 200 12351; 31 | } 32 | 33 | server { 34 | listen 12352; 35 | return 200 12352; 36 | } 37 | 38 | server { 39 | listen 12353; 40 | return 200 12353; 41 | } 42 | 43 | upstream api.com { 44 | server 127.0.0.1:12350; 45 | server 127.0.0.1:12351; 46 | server 127.0.0.1:12352 backup; 47 | server 127.0.0.1:12353 backup; 48 | } 49 | 50 | init_by_lua ' 51 | local config = require "config_dyconfig" 52 | local checkups = require "resty.checkups" 53 | checkups.init(config) 54 | '; 55 | 56 | init_worker_by_lua ' 57 | local config = require "config_dyconfig" 58 | local checkups = require "resty.checkups" 59 | checkups.prepare_checker(config) 60 | checkups.create_checker() 61 | '; 62 | }; 63 | 64 | 65 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 66 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 67 | $ENV{TEST_NGINX_USE_HUP} = 1; 68 | $ENV{TEST_NGINX_PWD} = $pwd; 69 | 70 | #no_diff(); 71 | no_long_string(); 72 | 73 | run_tests(); 74 | 75 | __DATA__ 76 | 77 | === TEST 1: Add server 78 | --- http_config eval: $::HttpConfig 79 | --- config 80 | location = /12350 { 81 | proxy_pass http://127.0.0.1:12350/; 82 | } 83 | location = /12351 { 84 | proxy_pass http://127.0.0.1:12351/; 85 | } 86 | location = /12352 { 87 | proxy_pass http://127.0.0.1:12352/; 88 | } 89 | location = /12353 { 90 | proxy_pass http://127.0.0.1:12353/; 91 | } 92 | 93 | location = /t { 94 | content_by_lua ' 95 | local checkups = require "resty.checkups" 96 | 97 | local callback = function(host, port) 98 | local res = ngx.location.capture("/" .. port) 99 | ngx.say(res.body) 100 | return 1 101 | end 102 | 103 | local ok, err 104 | 105 | -- no upstream available 106 | ok, err = checkups.ready_ok("ups1", callback) 107 | if err then ngx.say(err) end 108 | 109 | -- add server to backup level 110 | ok, err = checkups.update_upstream("ups1", { 111 | { 112 | servers = { 113 | {host="127.0.0.1", port=12353}, 114 | } 115 | }, 116 | }) 117 | if err then ngx.say(err) end 118 | ngx.sleep(1) 119 | ok, err = checkups.ready_ok("ups1", callback) 120 | if err then ngx.say(err) end 121 | 122 | -- add server to primary level 123 | ok, err = checkups.update_upstream("ups1", { 124 | { 125 | servers = { 126 | {host="127.0.0.1", port=12353}, 127 | {host="127.0.0.1", port=12350}, 128 | } 129 | }, 130 | }) 131 | if err then ngx.say(err) end 132 | ngx.sleep(1) 133 | ok, err = checkups.ready_ok("ups1", callback) 134 | if err then ngx.say(err) end 135 | ok, err = checkups.ready_ok("ups1", callback) 136 | if err then ngx.say(err) end 137 | 138 | -- add server to primary level, ups2, server exists 139 | ok, err = checkups.update_upstream("ups2", { 140 | { 141 | servers = { 142 | {host="127.0.0.1", port=12350}, 143 | } 144 | }, 145 | }) 146 | if err then ngx.say(err) end 147 | ngx.sleep(1) 148 | ok, err = checkups.ready_ok("ups2", callback) 149 | if err then ngx.say(err) end 150 | 151 | -- add server to primary level, ups2, reset rr state 152 | ok, err = checkups.update_upstream("ups2", { 153 | { 154 | servers = { 155 | {host="127.0.0.1", port=12350}, 156 | {host="127.0.0.1", port=12351}, 157 | } 158 | }, 159 | }) 160 | if err then ngx.say(err) end 161 | ngx.sleep(1) 162 | ok, err = checkups.ready_ok("ups2", callback) 163 | if err then ngx.say(err) end 164 | ok, err = checkups.ready_ok("ups2", callback) 165 | if err then ngx.say(err) end 166 | '; 167 | } 168 | --- request 169 | GET /t 170 | --- response_body 171 | no servers available 172 | 12353 173 | 12353 174 | 12350 175 | 12350 176 | 12350 177 | 12351 178 | 179 | --- timeout: 10 180 | 181 | 182 | === TEST 2: Delete server 183 | --- http_config eval: $::HttpConfig 184 | --- config 185 | location = /12350 { 186 | proxy_pass http://127.0.0.1:12350/; 187 | } 188 | location = /12351 { 189 | proxy_pass http://127.0.0.1:12351/; 190 | } 191 | location = /12352 { 192 | proxy_pass http://127.0.0.1:12352/; 193 | } 194 | location = /12353 { 195 | proxy_pass http://127.0.0.1:12353/; 196 | } 197 | 198 | location = /t { 199 | content_by_lua ' 200 | local checkups = require "resty.checkups" 201 | 202 | local callback = function(host, port) 203 | local res = ngx.location.capture("/" .. port) 204 | ngx.say(res.body) 205 | return 1 206 | end 207 | 208 | local ok, err 209 | -- ups5, delete non-exist level 210 | ok, err = checkups.delete_upstream("ups5") 211 | if err then ngx.say(err) end 212 | 213 | ok, err = checkups.delete_upstream("ups2") 214 | if err then ngx.say(err) end 215 | 216 | ngx.sleep(1) 217 | 218 | ok, err = checkups.ready_ok("ups2", callback) 219 | if err then ngx.say(err) end 220 | 221 | -- add server to primary level, ups2, reset rr state 222 | ok, err = checkups.update_upstream("ups2", { 223 | { 224 | servers = { 225 | {host="127.0.0.1", port=12350}, 226 | {host="127.0.0.1", port=12351}, 227 | } 228 | }, 229 | }) 230 | if err then ngx.say(err) end 231 | ngx.sleep(1) 232 | ok, err = checkups.ready_ok("ups2", callback) 233 | if err then ngx.say(err) end 234 | ok, err = checkups.ready_ok("ups2", callback) 235 | if err then ngx.say(err) end 236 | 237 | ------------------------------- 238 | 239 | ok, err = checkups.ready_ok("ups3", callback) 240 | if err then ngx.say(err) end 241 | ok, err = checkups.ready_ok("ups3", callback) 242 | if err then ngx.say(err) end 243 | ok, err = checkups.ready_ok("ups3", callback) 244 | if err then ngx.say(err) end 245 | ok, err = checkups.ready_ok("ups3", callback) 246 | if err then ngx.say(err) end 247 | 248 | ok, err = checkups.delete_upstream("ups3") 249 | if err then ngx.say(err) end 250 | 251 | ngx.sleep(1) 252 | 253 | ok, err = checkups.ready_ok("ups3", callback) 254 | if err then ngx.say(err) end 255 | 256 | -- add server to primary level, ups3, reset rr state 257 | ok, err = checkups.update_upstream("ups3", { 258 | { 259 | servers = { 260 | {host="127.0.0.1", port=12352}, 261 | {host="127.0.0.1", port=12353}, 262 | } 263 | }, 264 | }) 265 | if err then ngx.say(err) end 266 | ngx.sleep(1) 267 | ok, err = checkups.ready_ok("ups3", callback) 268 | if err then ngx.say(err) end 269 | ok, err = checkups.ready_ok("ups3", callback) 270 | if err then ngx.say(err) end 271 | '; 272 | } 273 | --- request 274 | GET /t 275 | --- response_body 276 | upstream ups5 not found 277 | unknown skey ups2 278 | 12350 279 | 12351 280 | 12350 281 | 12351 282 | 12350 283 | 12351 284 | unknown skey ups3 285 | 12352 286 | 12353 287 | 288 | --- timeout: 10 289 | 290 | 291 | === TEST 3: add, delete servers extracted from nginx upstream 292 | --- http_config eval: $::HttpConfig 293 | --- config 294 | location = /12350 { 295 | proxy_pass http://127.0.0.1:12350/; 296 | } 297 | location = /12351 { 298 | proxy_pass http://127.0.0.1:12351/; 299 | } 300 | location = /12352 { 301 | proxy_pass http://127.0.0.1:12352/; 302 | } 303 | location = /12353 { 304 | proxy_pass http://127.0.0.1:12353/; 305 | } 306 | 307 | location = /t { 308 | content_by_lua ' 309 | local checkups = require "resty.checkups" 310 | 311 | local callback = function(host, port) 312 | local res = ngx.location.capture("/" .. port) 313 | ngx.say(res.body) 314 | return 1 315 | end 316 | 317 | local ok, err 318 | ok, err = checkups.delete_upstream("ups3") 319 | if err then ngx.say(err) end 320 | 321 | ngx.sleep(1) 322 | 323 | ok, err = checkups.ready_ok("ups3", callback) 324 | if err then ngx.say(err) end 325 | ok, err = checkups.ready_ok("ups3", callback) 326 | if err then ngx.say(err) end 327 | 328 | -- add server to primary level 329 | ok, err = checkups.update_upstream("ups3", { 330 | { 331 | servers = { 332 | {host="127.0.0.1", port=12352}, 333 | } 334 | }, 335 | }) 336 | if err then ngx.say(err) end 337 | ngx.sleep(1) 338 | 339 | ok, err = checkups.ready_ok("ups3", callback) 340 | if err then ngx.say(err) end 341 | ok, err = checkups.ready_ok("ups3", callback) 342 | if err then ngx.say(err) end 343 | '; 344 | } 345 | --- request 346 | GET /t 347 | --- response_body 348 | unknown skey ups3 349 | unknown skey ups3 350 | 12352 351 | 12352 352 | 353 | --- timeout: 10 354 | 355 | 356 | === TEST 4: update servers 357 | --- http_config eval: $::HttpConfig 358 | --- config 359 | location = /12350 { 360 | proxy_pass http://127.0.0.1:12350/; 361 | } 362 | location = /12351 { 363 | proxy_pass http://127.0.0.1:12351/; 364 | } 365 | location = /12352 { 366 | proxy_pass http://127.0.0.1:12352/; 367 | } 368 | location = /12353 { 369 | proxy_pass http://127.0.0.1:12353/; 370 | } 371 | 372 | location = /t { 373 | content_by_lua ' 374 | local checkups = require "resty.checkups" 375 | 376 | local callback = function(host, port) 377 | local res = ngx.location.capture("/" .. port) 378 | ngx.say(res.body) 379 | return 1 380 | end 381 | 382 | local ok, err 383 | ok, err = checkups.update_upstream("ups2", { 384 | { 385 | servers = { 386 | {host="127.0.0.1", port=12350}, 387 | {host="127.0.0.1", port=12351}, 388 | {host="127.0.0.1", port=12352}, 389 | {host="127.0.0.1", port=12353}, 390 | } 391 | }, 392 | }) 393 | if err then ngx.say(err) end 394 | 395 | ngx.sleep(1) 396 | 397 | ok, err = checkups.ready_ok("ups2", callback) 398 | if err then ngx.say(err) end 399 | ok, err = checkups.ready_ok("ups2", callback) 400 | if err then ngx.say(err) end 401 | ok, err = checkups.ready_ok("ups2", callback) 402 | if err then ngx.say(err) end 403 | ok, err = checkups.ready_ok("ups2", callback) 404 | if err then ngx.say(err) end 405 | '; 406 | } 407 | --- request 408 | GET /t 409 | --- response_body 410 | 12350 411 | 12351 412 | 12352 413 | 12353 414 | 415 | --- timeout: 10 416 | 417 | 418 | === TEST 5: add new upstream 419 | --- http_config eval: $::HttpConfig 420 | --- config 421 | location = /12350 { 422 | proxy_pass http://127.0.0.1:12350/; 423 | } 424 | location = /12351 { 425 | proxy_pass http://127.0.0.1:12351/; 426 | } 427 | location = /12352 { 428 | proxy_pass http://127.0.0.1:12352/; 429 | } 430 | location = /12353 { 431 | proxy_pass http://127.0.0.1:12353/; 432 | } 433 | 434 | location = /t { 435 | content_by_lua ' 436 | local checkups = require "resty.checkups" 437 | 438 | local callback = function(host, port) 439 | local res = ngx.location.capture("/" .. port) 440 | ngx.say(res.body) 441 | return 1 442 | end 443 | 444 | local ok, err 445 | 446 | ok, err = checkups.ready_ok("new_ups", callback) 447 | if err then ngx.say(err) end 448 | 449 | ok, err = checkups.update_upstream("new_ups", { 450 | { 451 | servers = { 452 | {host="127.0.0.1", port=12350}, 453 | {host="127.0.0.1", port=12351}, 454 | {host="127.0.0.1", port=12352}, 455 | {host="127.0.0.1", port=12353}, 456 | } 457 | }, 458 | }) 459 | if err then ngx.say(err) end 460 | 461 | ngx.sleep(1) 462 | 463 | ok, err = checkups.ready_ok("new_ups", callback) 464 | if err then ngx.say(err) end 465 | ok, err = checkups.ready_ok("new_ups", callback) 466 | if err then ngx.say(err) end 467 | ok, err = checkups.ready_ok("new_ups", callback) 468 | if err then ngx.say(err) end 469 | ok, err = checkups.ready_ok("new_ups", callback) 470 | if err then ngx.say(err) end 471 | ok, err = checkups.ready_ok("new_ups", callback) 472 | if err then ngx.say(err) end 473 | '; 474 | } 475 | --- request 476 | GET /t 477 | --- response_body 478 | unknown skey new_ups 479 | 12350 480 | 12351 481 | 12352 482 | 12353 483 | 12350 484 | 485 | --- timeout: 10 486 | 487 | 488 | === TEST 6: add new server to new upstream 489 | --- http_config eval: $::HttpConfig 490 | --- config 491 | location = /12350 { 492 | proxy_pass http://127.0.0.1:12350/; 493 | } 494 | location = /12351 { 495 | proxy_pass http://127.0.0.1:12351/; 496 | } 497 | location = /12352 { 498 | proxy_pass http://127.0.0.1:12352/; 499 | } 500 | location = /12353 { 501 | proxy_pass http://127.0.0.1:12353/; 502 | } 503 | 504 | location = /t { 505 | content_by_lua ' 506 | local checkups = require "resty.checkups" 507 | 508 | local callback = function(host, port) 509 | local res = ngx.location.capture("/" .. port) 510 | ngx.say(res.body) 511 | return 1 512 | end 513 | 514 | local ok, err 515 | 516 | ok, err = checkups.delete_upstream("new_ups") 517 | if err then ngx.say(err) end 518 | 519 | ok, err = checkups.update_upstream("new_ups", { 520 | { 521 | servers = { 522 | {host="127.0.0.1", port=12350}, 523 | {host="127.0.0.1", port=12351}, 524 | {host="127.0.0.1", port=12352}, 525 | {host="127.0.0.1", port=12353}, 526 | } 527 | }, 528 | }) 529 | if err then ngx.say(err) end 530 | 531 | ok, err = checkups.delete_upstream("new_ups") 532 | if err then ngx.say(err) end 533 | 534 | ok, err = checkups.delete_upstream("new_ups") 535 | if err then ngx.say(err) end 536 | 537 | ngx.sleep(1) 538 | 539 | ok, err = checkups.ready_ok("new_ups", callback) 540 | if err then ngx.say(err) end 541 | ok, err = checkups.ready_ok("new_ups", callback) 542 | if err then ngx.say(err) end 543 | 544 | ok, err = checkups.update_upstream("new_ups", { 545 | { 546 | servers = { 547 | {host="127.0.0.1", port=12352}, 548 | {host="127.0.0.1", port=12353}, 549 | {host="127.0.0.1", port=12350}, 550 | } 551 | }, 552 | }) 553 | if err then ngx.say(err) end 554 | ngx.sleep(1) 555 | 556 | ok, err = checkups.ready_ok("new_ups", callback) 557 | if err then ngx.say(err) end 558 | ok, err = checkups.ready_ok("new_ups", callback) 559 | if err then ngx.say(err) end 560 | ok, err = checkups.ready_ok("new_ups", callback) 561 | if err then ngx.say(err) end 562 | 563 | ok, err = checkups.delete_upstream("new_ups") 564 | if err then ngx.say(err) end 565 | 566 | ngx.sleep(1) 567 | 568 | ok, err = checkups.ready_ok("new_ups", callback) 569 | if err then ngx.say(err) end 570 | '; 571 | } 572 | --- request 573 | GET /t 574 | --- response_body 575 | upstream new_ups not found 576 | upstream new_ups not found 577 | unknown skey new_ups 578 | unknown skey new_ups 579 | 12352 580 | 12353 581 | 12350 582 | unknown skey new_ups 583 | 584 | --- timeout: 10 585 | 586 | 587 | === TEST 7: add new level to new upstream 588 | --- http_config eval: $::HttpConfig 589 | --- config 590 | location = /12350 { 591 | proxy_pass http://127.0.0.1:12350/; 592 | } 593 | location = /12351 { 594 | proxy_pass http://127.0.0.1:12351/; 595 | } 596 | location = /12352 { 597 | proxy_pass http://127.0.0.1:12352/; 598 | } 599 | location = /12353 { 600 | proxy_pass http://127.0.0.1:12353/; 601 | } 602 | 603 | location = /t { 604 | content_by_lua ' 605 | local checkups = require "resty.checkups" 606 | 607 | local callback = function(host, port) 608 | local res = ngx.location.capture("/" .. port) 609 | ngx.say(res.body) 610 | return 1 611 | end 612 | 613 | local ok, err 614 | 615 | ok, err = checkups.update_upstream("new_ups", { 616 | { 617 | servers = { 618 | {host="127.0.0.1", port=12350}, 619 | {host="127.0.0.1", port=12351}, 620 | } 621 | }, 622 | }) 623 | if err then ngx.say(err) end 624 | 625 | ngx.sleep(1) 626 | 627 | ok, err = checkups.ready_ok("new_ups", callback) 628 | if err then ngx.say(err) end 629 | 630 | ok, err = checkups.ready_ok("new_ups", callback) 631 | if err then ngx.say(err) end 632 | 633 | ok, err = checkups.update_upstream("new_ups", { 634 | { 635 | servers = { 636 | {host="127.0.0.1", port=12350}, 637 | {host="127.0.0.1", port=12351}, 638 | {host="127.0.0.1", port=12352}, 639 | {host="127.0.0.1", port=12353}, 640 | } 641 | }, 642 | }) 643 | 644 | ngx.sleep(1) 645 | 646 | ok, err = checkups.ready_ok("new_ups", callback) 647 | if err then ngx.say(err) end 648 | ok, err = checkups.ready_ok("new_ups", callback) 649 | if err then ngx.say(err) end 650 | ok, err = checkups.ready_ok("new_ups", callback) 651 | if err then ngx.say(err) end 652 | ok, err = checkups.ready_ok("new_ups", callback) 653 | if err then ngx.say(err) end 654 | ok, err = checkups.ready_ok("new_ups", callback) 655 | if err then ngx.say(err) end 656 | ok, err = checkups.ready_ok("new_ups", callback) 657 | if err then ngx.say(err) end 658 | 659 | ok, err = checkups.delete_upstream("new_ups") 660 | if err then ngx.say(err) end 661 | ngx.sleep(1) 662 | 663 | ok, err = checkups.ready_ok("new_ups", callback) 664 | if err then ngx.say(err) end 665 | 666 | ok, err = checkups.update_upstream("new_ups", { 667 | { 668 | servers = { 669 | {host="127.0.0.1", port=12352}, 670 | {host="127.0.0.1", port=12353}, 671 | } 672 | }, 673 | }) 674 | 675 | ngx.sleep(1) 676 | 677 | ok, err = checkups.ready_ok("new_ups", callback) 678 | if err then ngx.say(err) end 679 | 680 | ok, err = checkups.ready_ok("new_ups", callback) 681 | if err then ngx.say(err) end 682 | '; 683 | } 684 | --- request 685 | GET /t 686 | --- response_body 687 | 12350 688 | 12351 689 | 12350 690 | 12351 691 | 12352 692 | 12353 693 | 12350 694 | 12351 695 | unknown skey new_ups 696 | 12352 697 | 12353 698 | 699 | --- timeout: 10 700 | 701 | 702 | === TEST 8: get timer status 703 | --- http_config eval: $::HttpConfig 704 | --- config 705 | location = /12350 { 706 | proxy_pass http://127.0.0.1:12350/; 707 | } 708 | location = /12351 { 709 | proxy_pass http://127.0.0.1:12351/; 710 | } 711 | location = /12352 { 712 | proxy_pass http://127.0.0.1:12352/; 713 | } 714 | location = /12353 { 715 | proxy_pass http://127.0.0.1:12353/; 716 | } 717 | 718 | location = /t { 719 | content_by_lua ' 720 | local checkups = require "resty.checkups" 721 | ngx.sleep(2) 722 | local cjson = require "cjson.safe" 723 | 724 | local callback = function(host, port) 725 | local res = ngx.location.capture("/" .. port) 726 | ngx.say(res.body) 727 | return 1 728 | end 729 | 730 | local ok, err = checkups.update_upstream("ups2", { 731 | { 732 | servers = { 733 | {host="127.0.0.1", port=12350}, 734 | {host="127.0.0.1", port=12351}, 735 | {host="127.0.0.1", port=12352}, 736 | {host="127.0.0.1", port=12353}, 737 | } 738 | }, 739 | }) 740 | if err then ngx.say(err) end 741 | 742 | ngx.sleep(1) 743 | 744 | local status = checkups.get_status() 745 | ngx.say(status["config_timer"]["worker-0"]) 746 | ngx.say(status["config_timer"]["worker-1"]) 747 | ngx.say(status["config_timer"]["worker-2"]) 748 | ngx.say(status["config_timer"]["worker-3"]) 749 | '; 750 | } 751 | --- request 752 | GET /t 753 | --- response_body 754 | alive 755 | alive 756 | alive 757 | alive 758 | 759 | --- timeout: 10 760 | -------------------------------------------------------------------------------- /t/dyconfig_opts.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | lua_shared_dict config 10m; 22 | 23 | server { 24 | listen 12354; 25 | location = /status { 26 | return 200; 27 | } 28 | } 29 | 30 | server { 31 | listen 12355; 32 | location = /status { 33 | return 502; 34 | } 35 | } 36 | 37 | server { 38 | listen 12356; 39 | location = /status { 40 | return 404; 41 | } 42 | } 43 | 44 | server { 45 | listen 12360; 46 | location = /status { 47 | return 200; 48 | } 49 | } 50 | 51 | server { 52 | listen 12361; 53 | location = /status { 54 | return 200; 55 | } 56 | } 57 | 58 | init_by_lua ' 59 | local config = require "config_dyconfig_opts" 60 | local checkups = require "resty.checkups" 61 | checkups.init({global = config.global}) 62 | '; 63 | 64 | init_worker_by_lua ' 65 | local config = require "config_dyconfig_opts" 66 | local checkups = require "resty.checkups" 67 | checkups.prepare_checker({global = config.global}) 68 | checkups.create_checker() 69 | '; 70 | 71 | }; 72 | 73 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 74 | $ENV{TEST_NGINX_USE_HUP} = 1; 75 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 76 | #no_diff(); 77 | no_long_string(); 78 | 79 | run_tests(); 80 | 81 | __DATA__ 82 | 83 | === TEST 1: rr to consistent hash 84 | --- http_config eval: $::HttpConfig 85 | --- config 86 | location = /t { 87 | access_log off; 88 | content_by_lua ' 89 | local checkups = require "resty.checkups" 90 | ngx.sleep(1) 91 | 92 | local config = require "config_dyconfig_opts" 93 | -- update_upstream to rr 94 | ok, err = checkups.update_upstream("dyconfig", config.dyconfig_rr) 95 | if err then ngx.say(err) end 96 | ngx.sleep(2) 97 | 98 | local cb_ok = function(host, port) 99 | ngx.say(host .. ":" .. port) 100 | return 1 101 | end 102 | 103 | 104 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 105 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 106 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 107 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 108 | 109 | 110 | -- update_upstream to hash 111 | ok, err = checkups.update_upstream("dyconfig", config.dyconfig_hash) 112 | if err then ngx.say(err) end 113 | ngx.sleep(2) 114 | 115 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 116 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 117 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 118 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 119 | '; 120 | } 121 | --- request 122 | GET /t 123 | --- response_body 124 | 127.0.0.1:12355 125 | 127.0.0.1:12356 126 | 127.0.0.1:12355 127 | 127.0.0.1:12356 128 | 127.0.0.1:12354 129 | 127.0.0.1:12354 130 | 127.0.0.1:12354 131 | 127.0.0.1:12354 132 | --- timeout: 10 133 | 134 | 135 | === TEST 2: consistent hash continue 136 | --- http_config eval: $::HttpConfig 137 | --- config 138 | location = /t { 139 | access_log off; 140 | content_by_lua ' 141 | local checkups = require "resty.checkups" 142 | ngx.sleep(1) 143 | 144 | local config = require "config_dyconfig_opts" 145 | 146 | local cb_ok = function(host, port) 147 | ngx.say(host .. ":" .. port) 148 | return 1 149 | end 150 | 151 | -- update_upstream to hash 152 | ok, err = checkups.update_upstream("dyconfig", config.dyconfig_hash) 153 | if err then ngx.say(err) end 154 | ngx.sleep(2) 155 | 156 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 157 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 158 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 159 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 160 | 161 | -- update_upstream 162 | ok, err = checkups.update_upstream("dyconfig", config.dyconfig_rr.cluster) 163 | if err then ngx.say(err) end 164 | ngx.sleep(2) 165 | 166 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 167 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 168 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 169 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 170 | '; 171 | } 172 | --- request 173 | GET /t 174 | --- response_body 175 | 127.0.0.1:12354 176 | 127.0.0.1:12354 177 | 127.0.0.1:12354 178 | 127.0.0.1:12354 179 | 127.0.0.1:12355 180 | 127.0.0.1:12355 181 | 127.0.0.1:12355 182 | 127.0.0.1:12355 183 | --- timeout: 10 184 | 185 | 186 | === TEST 3: http_opts 187 | --- http_config eval: $::HttpConfig 188 | --- config 189 | location = /t { 190 | access_log off; 191 | content_by_lua ' 192 | local checkups = require "resty.checkups" 193 | ngx.sleep(1) 194 | 195 | local config = require "config_dyconfig_opts" 196 | -- update_upstream to rr 197 | ok, err = checkups.update_upstream("dyconfig", config.dyconfig_rr) 198 | if err then ngx.say(err) end 199 | ngx.sleep(2) 200 | 201 | local cb_ok = function(host, port) 202 | ngx.say(host .. ":" .. port) 203 | return 1 204 | end 205 | 206 | 207 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 208 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 209 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 210 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 211 | 212 | 213 | -- update_upstream 214 | ok, err = checkups.update_upstream("dyconfig", config.dyconfig_rr_http) 215 | if err then ngx.say(err) end 216 | ngx.sleep(2) 217 | 218 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 219 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/ab"}) 220 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 221 | local ok, err = checkups.ready_ok("dyconfig", cb_ok, {hash_key = "/abc"}) 222 | '; 223 | } 224 | --- request 225 | GET /t 226 | --- response_body 227 | 127.0.0.1:12355 228 | 127.0.0.1:12356 229 | 127.0.0.1:12355 230 | 127.0.0.1:12356 231 | 127.0.0.1:12356 232 | 127.0.0.1:12356 233 | 127.0.0.1:12356 234 | 127.0.0.1:12356 235 | --- timeout: 10 236 | -------------------------------------------------------------------------------- /t/feedback.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { listen 12354; location = /status { return 200; } } 23 | 24 | server { listen 12355; location = /status { return 502; } } 25 | 26 | server { listen 12356; location = /status { return 404; } } 27 | 28 | server { listen 12357; location = /status { content_by_lua ' ngx.sleep(3) ngx.status = 200 '; } } 29 | 30 | init_by_lua ' 31 | local config = require "config_feedback" 32 | local checkups = require "resty.checkups" 33 | checkups.init(config) 34 | '; 35 | 36 | init_worker_by_lua ' 37 | local config = require "config_feedback" 38 | local checkups = require "resty.checkups" 39 | checkups.prepare_checker(config) 40 | checkups.create_checker() 41 | '; 42 | 43 | }; 44 | 45 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 46 | $ENV{TEST_NGINX_USE_HUP} = 1; 47 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 48 | #no_diff(); 49 | no_long_string(); 50 | 51 | run_tests(); 52 | 53 | __DATA__ 54 | 55 | === TEST 1: http 56 | --- http_config eval: $::HttpConfig 57 | --- config 58 | location = /t { 59 | access_log off; 60 | content_by_lua ' 61 | local checkups = require "resty.checkups" 62 | ngx.sleep(5) 63 | local cb_ok = function(host, port) 64 | ngx.say(host .. ":" .. port) 65 | return 1 66 | end 67 | 68 | local ok, err = checkups.feedback_status("status", "127.0.0.1", 12354, true) 69 | if not ok then 70 | ngx.say(err) 71 | end 72 | 73 | local ok, err = checkups.feedback_status("status", "127.0.0.1", 12356, true) 74 | if not ok then 75 | ngx.say(err) 76 | end 77 | 78 | 79 | local ok, err = checkups.ready_ok("status", cb_ok) 80 | if err then 81 | ngx.say(err) 82 | end 83 | local ok, err = checkups.ready_ok("status", cb_ok) 84 | if err then 85 | ngx.say(err) 86 | end 87 | '; 88 | } 89 | --- request 90 | GET /t 91 | --- response_body 92 | 127.0.0.1:12357 93 | 127.0.0.1:12357 94 | --- grep_error_log eval: qr/failed to connect: 127.0.0.1:\d+, connection refused|failed to receive status line from: 127.0.0.1:\d+, timeout/ 95 | --- grep_error_log_out 96 | failed to connect: 127.0.0.1:12360, connection refused 97 | failed to connect: 127.0.0.1:12361, connection refused 98 | --- timeout: 10 99 | -------------------------------------------------------------------------------- /t/get_redis_info.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | init_by_lua ' 23 | local config = require "config_redis" 24 | local checkups = require "resty.checkups" 25 | checkups.init(config) 26 | '; 27 | 28 | init_worker_by_lua ' 29 | local config = require "config_redis" 30 | local checkups = require "resty.checkups" 31 | checkups.prepare_checker(config) 32 | checkups.create_checker() 33 | '; 34 | 35 | }; 36 | 37 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 38 | $ENV{TEST_NGINX_USE_HUP} = 1; 39 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 40 | #no_diff(); 41 | no_long_string(); 42 | 43 | run_tests(); 44 | 45 | __DATA__ 46 | 47 | === TEST 1: redis replication info 48 | --- http_config eval: $::HttpConfig 49 | --- config 50 | location = /t { 51 | access_log off; 52 | content_by_lua ' 53 | local checkups = require "resty.checkups" 54 | ngx.sleep(1) 55 | 56 | local callback = function(host, port) 57 | ngx.say(host .. ":" .. port .. " " .. "OK") 58 | end 59 | checkups.ready_ok("redis", callback) 60 | 61 | local st = checkups.get_status() 62 | ngx.say(st["cls:redis"][1][1].status) 63 | ngx.say(st["cls:redis"][1][1].redis_version) 64 | ngx.say(st["cls:redis"][1][1].msg) 65 | ngx.say(st["cls:redis"][1][1].replication.role) 66 | ngx.say(st["cls:redis"][1][1].replication.master_link_status) 67 | ngx.say(st["cls:redis"][1][1].replication.master_host) 68 | ngx.say(st["cls:redis"][1][1].replication.master_port) 69 | ngx.say(st["cls:redis"][1][1].replication.master_link_down_since_seconds) 70 | ngx.say(st["cls:redis"][1][1].replication.master_last_io_seconds_ago) 71 | 72 | ngx.say("") 73 | ngx.sleep(2) 74 | local st = checkups.get_status() 75 | ngx.say(st["cls:redis"][1][1].status) 76 | ngx.say(st["cls:redis"][1][1].redis_version) 77 | ngx.say(st["cls:redis"][1][1].msg) 78 | ngx.say(st["cls:redis"][1][1].replication.role) 79 | ngx.say(st["cls:redis"][1][1].replication.master_link_status) 80 | ngx.say(st["cls:redis"][1][1].replication.master_host) 81 | ngx.say(st["cls:redis"][1][1].replication.master_port) 82 | ngx.say(st["cls:redis"][1][1].replication.master_link_down_since_seconds) 83 | ngx.say(st["cls:redis"][1][1].replication.master_last_io_seconds_ago) 84 | '; 85 | } 86 | --- request 87 | GET /t 88 | --- response_body_like 89 | 127.0.0.1:6379 OK 90 | ok 91 | \d\.\d\.\d 92 | null 93 | master|slave 94 | nil|down|up 95 | nil|\d.\d.\d.\d 96 | nil|\d 97 | nil|\d 98 | nil|\d 99 | 100 | ok 101 | \d\.\d\.\d 102 | null 103 | master|slave 104 | nil|down|up 105 | nil|\d.\d.\d.\d 106 | nil|\d 107 | nil|\d 108 | nil|\d 109 | --- timeout: 10 110 | -------------------------------------------------------------------------------- /t/get_status.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | location = /status { 25 | return 200; 26 | } 27 | } 28 | 29 | server { 30 | listen 12355; 31 | location = /status { 32 | return 404; 33 | } 34 | } 35 | 36 | server { 37 | listen 12360; 38 | location = /status { 39 | return 404; 40 | } 41 | } 42 | 43 | init_by_lua ' 44 | local config = require "config_api" 45 | local checkups = require "resty.checkups" 46 | checkups.init(config) 47 | '; 48 | 49 | init_worker_by_lua ' 50 | local checkups = require "resty.checkups" 51 | local config = require "config_api" 52 | checkups.prepare_checker(config) 53 | checkups.create_checker() 54 | '; 55 | 56 | }; 57 | 58 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 59 | $ENV{TEST_NGINX_USE_HUP} = 1; 60 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 61 | #no_diff(); 62 | no_long_string(); 63 | 64 | run_tests(); 65 | 66 | __DATA__ 67 | 68 | === TEST 1: get status 69 | --- http_config eval: $::HttpConfig 70 | --- config 71 | location = /t { 72 | access_log off; 73 | content_by_lua ' 74 | local checkups = require "resty.checkups" 75 | ngx.sleep(1) 76 | local st = checkups.get_status() 77 | ngx.say(st["cls:api"][1][1].status) 78 | ngx.say(st["cls:api"][1][2].status) 79 | ngx.say(st["cls:api"][1][3].status) 80 | ngx.say(st["cls:api"][1][3].msg) 81 | ngx.say(st["cls:api"][2][1].status) 82 | ngx.say(st["cls:api"][2][2].status) 83 | ngx.say(st["cls:api"][2][2].msg) 84 | ngx.say(st["cls:acm"][1][1].status) 85 | '; 86 | } 87 | --- request 88 | GET /t 89 | --- response_body 90 | ok 91 | ok 92 | err 93 | connection refused 94 | ok 95 | err 96 | connection refused 97 | unchecked 98 | --- grep_error_log eval: qr/cb_heartbeat\(\): failed to connect: 127.0.0.1:\d+, connection refused/ 99 | --- grep_error_log_out 100 | cb_heartbeat(): failed to connect: 127.0.0.1:12356, connection refused 101 | cb_heartbeat(): failed to connect: 127.0.0.1:12361, connection refused 102 | 103 | 104 | === TEST 2: get status with passive 105 | --- http_config eval: $::HttpConfig 106 | --- config 107 | location = /t { 108 | access_log off; 109 | content_by_lua ' 110 | local checkups = require "resty.checkups" 111 | ngx.sleep(1) 112 | local st = checkups.get_status() 113 | ngx.say(st["cls:api"][1][1].status) 114 | ngx.say(st["cls:api"][1][2].status) 115 | ngx.say(st["cls:api"][1][3].status) 116 | ngx.say(st["cls:api"][1][3].msg) 117 | ngx.say(st["cls:api"][2][1].status) 118 | ngx.say(st["cls:api"][2][2].status) 119 | ngx.say(st["cls:api"][2][2].msg) 120 | 121 | local cb_err = function(host, port) 122 | ngx.say(host .. ":" .. port .. " " .. "ERR") 123 | end 124 | checkups.ready_ok("api", cb_err) 125 | 126 | local st = checkups.get_status() 127 | ngx.say(st["cls:api"][1][1].status) 128 | ngx.say(st["cls:api"][1][1].msg) 129 | '; 130 | } 131 | --- request 132 | GET /t 133 | --- response_body 134 | ok 135 | ok 136 | err 137 | connection refused 138 | ok 139 | err 140 | connection refused 141 | 127.0.0.1:12354 ERR 142 | 127.0.0.1:12355 ERR 143 | ok 144 | null 145 | --- grep_error_log eval: qr/cb_heartbeat\(\): failed to connect: 127.0.0.1:\d+, connection refused/ 146 | --- grep_error_log_out 147 | cb_heartbeat(): failed to connect: 127.0.0.1:12356, connection refused 148 | cb_heartbeat(): failed to connect: 127.0.0.1:12361, connection refused 149 | 150 | 151 | === TEST 3: clear fail counter 152 | --- http_config eval: $::HttpConfig 153 | --- config 154 | location = /t { 155 | access_log off; 156 | content_by_lua ' 157 | local checkups = require "resty.checkups" 158 | ngx.sleep(1) 159 | 160 | local cb_err = function(host, port) 161 | ngx.say(host .. ":" .. port .. " " .. "ERR") 162 | end 163 | checkups.ready_ok("api", cb_err) 164 | 165 | local st = checkups.get_status() 166 | ngx.say(st["cls:api"][1][1].status) 167 | ngx.say(st["cls:api"][1][1].msg) 168 | 169 | ngx.sleep(2) 170 | local st = checkups.get_status() 171 | ngx.say(st["cls:api"][1][1].status) 172 | ngx.say(st["cls:api"][1][1].msg) 173 | '; 174 | } 175 | --- request 176 | GET /t 177 | --- response_body 178 | 127.0.0.1:12354 ERR 179 | 127.0.0.1:12355 ERR 180 | ok 181 | null 182 | ok 183 | null 184 | --- grep_error_log eval: qr/cb_heartbeat\(\): failed to connect: 127.0.0.1:\d+, connection refused/ 185 | --- grep_error_log_out 186 | cb_heartbeat(): failed to connect: 127.0.0.1:12356, connection refused 187 | cb_heartbeat(): failed to connect: 127.0.0.1:12361, connection refused 188 | cb_heartbeat(): failed to connect: 127.0.0.1:12356, connection refused 189 | cb_heartbeat(): failed to connect: 127.0.0.1:12361, connection refused 190 | --- timeout: 10 191 | -------------------------------------------------------------------------------- /t/heartbeat_cb.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | location = /status { 25 | return 200; 26 | } 27 | } 28 | 29 | server { 30 | listen 12355; 31 | location = /status { 32 | return 502; 33 | } 34 | } 35 | 36 | server { 37 | listen 12356; 38 | location = /status { 39 | return 404; 40 | } 41 | } 42 | 43 | init_worker_by_lua ' 44 | local checkups = require "resty.checkups" 45 | checkups.create_checker() 46 | '; 47 | }; 48 | 49 | our $InitConfig = qq{ 50 | init_by_lua ' 51 | local config = require "config_api" 52 | local checkups = require "resty.checkups" 53 | -- customize heartbeat callback 54 | config.api.heartbeat = function(host, port, ups) 55 | return checkups.STATUS_ERR, "down" 56 | end 57 | checkups.init(config) 58 | checkups.prepare_checker(config) 59 | '; 60 | }; 61 | 62 | our $_InitConfig = qq{ 63 | init_by_lua ' 64 | local config = require "config_api" 65 | local checkups = require "resty.checkups" 66 | local flag = true 67 | -- customize heartbeat callback 68 | config.api.heartbeat = function(host, port, ups) 69 | if flag then 70 | flag = false 71 | return checkups.STATUS_OK 72 | end 73 | return checkups.STATUS_ERR, "down" 74 | end 75 | checkups.init(config) 76 | checkups.prepare_checker(config) 77 | '; 78 | }; 79 | 80 | 81 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 82 | $ENV{TEST_NGINX_USE_HUP} = 1; 83 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 84 | #no_diff(); 85 | no_long_string(); 86 | 87 | run_tests(); 88 | 89 | __DATA__ 90 | 91 | === TEST 1: http 92 | --- http_config eval 93 | "$::HttpConfig" . "$::InitConfig" 94 | --- config 95 | location = /t { 96 | access_log off; 97 | content_by_lua ' 98 | local checkups = require "resty.checkups" 99 | ngx.sleep(2) 100 | local cb_ok = function(host, port) 101 | ngx.say(host .. ":" .. port) 102 | return checkups.STATUS_OK 103 | end 104 | 105 | local ok, err = checkups.ready_ok("api", cb_ok) 106 | if err then 107 | ngx.say(err) 108 | end 109 | local ok, err = checkups.ready_ok("api", cb_ok) 110 | if err then 111 | ngx.say(err) 112 | end 113 | 114 | local st = checkups.get_status() 115 | ngx.say(st["cls:api"][1][1].status) 116 | ngx.say(st["cls:api"][1][1].msg) 117 | ngx.say(st["cls:api"][2][2].status) 118 | ngx.say(st["cls:api"][2][2].msg) 119 | '; 120 | } 121 | --- request 122 | GET /t 123 | --- response_body 124 | 127.0.0.1:12354 125 | 127.0.0.1:12354 126 | unstable 127 | down 128 | err 129 | down 130 | --- no_error_log 131 | --- timeout: 10 132 | 133 | === TEST 2: all servers are down on the phase of heartbeat 134 | --- http_config eval 135 | "$::HttpConfig" . "$::_InitConfig" 136 | --- config 137 | location = /t { 138 | access_log off; 139 | content_by_lua ' 140 | local checkups = require "resty.checkups" 141 | ngx.sleep(12) 142 | local cb_ok = function(host, port) 143 | ngx.say(host .. ":" .. port) 144 | return checkups.STATUS_OK 145 | end 146 | 147 | local ok, err = checkups.ready_ok("api", cb_ok) 148 | if err then 149 | ngx.say(err) 150 | end 151 | local ok, err = checkups.ready_ok("api", cb_ok) 152 | if err then 153 | ngx.say(err) 154 | end 155 | 156 | local st = checkups.get_status() 157 | ngx.say(st["cls:api"][1][1].status) 158 | ngx.say(st["cls:api"][1][1].msg) 159 | ngx.say(st["cls:api"][1][2].status) 160 | ngx.say(st["cls:api"][1][2].msg) 161 | ngx.say(st["cls:api"][1][3].status) 162 | ngx.say(st["cls:api"][1][3].msg) 163 | 164 | ngx.say(st["cls:api"][2][1].status) 165 | ngx.say(st["cls:api"][2][1].msg) 166 | ngx.say(st["cls:api"][2][2].status) 167 | ngx.say(st["cls:api"][2][2].msg) 168 | '; 169 | } 170 | --- request 171 | GET /t 172 | --- response_body 173 | 127.0.0.1:12354 174 | 127.0.0.1:12354 175 | unstable 176 | down 177 | err 178 | down 179 | err 180 | down 181 | err 182 | down 183 | err 184 | down 185 | --- no_error_log 186 | --- timeout: 20 187 | -------------------------------------------------------------------------------- /t/http.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | location = /status { 25 | return 200; 26 | } 27 | } 28 | 29 | server { 30 | listen 12355; 31 | location = /status { 32 | return 502; 33 | } 34 | } 35 | 36 | server { 37 | listen 12356; 38 | location = /status { 39 | return 404; 40 | } 41 | } 42 | 43 | server { 44 | listen 12357; 45 | location = /status { 46 | content_by_lua ' 47 | ngx.sleep(3) 48 | ngx.status = 200 49 | '; 50 | } 51 | } 52 | 53 | init_by_lua ' 54 | local config = require "config_http" 55 | local checkups = require "resty.checkups" 56 | checkups.init(config) 57 | '; 58 | 59 | init_worker_by_lua ' 60 | local config = require "config_http" 61 | local checkups = require "resty.checkups" 62 | checkups.prepare_checker(config) 63 | checkups.create_checker() 64 | '; 65 | 66 | }; 67 | 68 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 69 | $ENV{TEST_NGINX_USE_HUP} = 1; 70 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 71 | #no_diff(); 72 | no_long_string(); 73 | 74 | run_tests(); 75 | 76 | __DATA__ 77 | 78 | === TEST 1: http 79 | --- http_config eval: $::HttpConfig 80 | --- config 81 | location = /t { 82 | access_log off; 83 | content_by_lua ' 84 | local checkups = require "resty.checkups" 85 | ngx.sleep(5) 86 | local cb_ok = function(host, port) 87 | ngx.say(host .. ":" .. port) 88 | return 1 89 | end 90 | 91 | local ok, err = checkups.ready_ok("status", cb_ok) 92 | if err then 93 | ngx.say(err) 94 | end 95 | local ok, err = checkups.ready_ok("status", cb_ok) 96 | if err then 97 | ngx.say(err) 98 | end 99 | '; 100 | } 101 | --- request 102 | GET /t 103 | --- response_body 104 | 127.0.0.1:12354 105 | 127.0.0.1:12356 106 | --- grep_error_log eval: qr/failed to connect: 127.0.0.1:\d+, connection refused|failed to receive status line from: 127.0.0.1:\d+, timeout/ 107 | --- grep_error_log_out 108 | failed to receive status line from: 127.0.0.1:12357, timeout 109 | failed to connect: 127.0.0.1:12360, connection refused 110 | failed to connect: 127.0.0.1:12361, connection refused 111 | --- timeout: 10 112 | 113 | 114 | === TEST 2: fail with status code 115 | --- http_config eval: $::HttpConfig 116 | --- config 117 | location = /t { 118 | access_log off; 119 | content_by_lua ' 120 | local checkups = require "resty.checkups" 121 | ngx.sleep(5) 122 | local cb = function(host, port) 123 | ngx.say(host .. ":" .. port) 124 | return {status = 502} 125 | end 126 | 127 | local ok = checkups.ready_ok("status", cb) 128 | '; 129 | } 130 | --- request 131 | GET /t 132 | --- response_body 133 | 127.0.0.1:12354 134 | 127.0.0.1:12356 135 | --- grep_error_log eval: qr/failed to connect: 127.0.0.1:\d+, connection refused|failed to receive status line from: 127.0.0.1:\d+, timeout/ 136 | --- grep_error_log_out 137 | failed to receive status line from: 127.0.0.1:12357, timeout 138 | failed to connect: 127.0.0.1:12360, connection refused 139 | failed to connect: 127.0.0.1:12361, connection refused 140 | --- timeout: 10 141 | -------------------------------------------------------------------------------- /t/lib/config_api.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 2, 6 | checkup_timer_overtime = 10, 7 | } 8 | 9 | _M.api = { 10 | timeout = 2, 11 | try = 2, 12 | 13 | cluster = { 14 | { -- level 1 15 | servers = { 16 | { host = "127.0.0.1", port = 12354 }, 17 | { host = "127.0.0.1", port = 12355 }, 18 | { host = "127.0.0.1", port = 12356 }, 19 | } 20 | }, 21 | { -- level 2 22 | servers = { 23 | { host = "127.0.0.1", port = 12360 }, 24 | { host = "127.0.0.1", port = 12361 }, 25 | } 26 | }, 27 | }, 28 | } 29 | 30 | _M.acm = { 31 | enable = false, 32 | 33 | cluster = { 34 | { 35 | servers = { 36 | { host = "127.0.0.1", port = 1234 }, 37 | } 38 | } 39 | } 40 | } 41 | 42 | return _M 43 | -------------------------------------------------------------------------------- /t/lib/config_down.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 2, 6 | checkup_timer_overtime = 10, 7 | 8 | ups_status_sync_enable = true, 9 | ups_status_timer_interval = 1, 10 | } 11 | 12 | _M.api = { 13 | timeout = 2, 14 | 15 | cluster = { 16 | { -- level 1 17 | upstream = "api.com", 18 | }, 19 | }, 20 | } 21 | 22 | return _M 23 | -------------------------------------------------------------------------------- /t/lib/config_dyconfig.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 5, 6 | checkup_timer_overtime = 10, 7 | checkup_shd_sync_enable = true, 8 | shd_config_timer_interval = 0.5, 9 | } 10 | 11 | _M.ups1 = { 12 | timeout = 2, 13 | try = 2, 14 | 15 | cluster = { 16 | { -- level 1 17 | servers = { 18 | } 19 | }, 20 | { -- level 2 21 | servers = { 22 | } 23 | }, 24 | }, 25 | } 26 | 27 | _M.ups2 = { 28 | timeout = 2, 29 | try = 2, 30 | 31 | cluster = { 32 | { -- level 1 33 | servers = { 34 | { host = "127.0.0.1", port = 12350 }, 35 | } 36 | }, 37 | { -- level 2 38 | servers = { 39 | } 40 | }, 41 | }, 42 | } 43 | 44 | _M.ups3 = { 45 | timeout = 2, 46 | try = 2, 47 | 48 | cluster = { 49 | { -- level 1 50 | upstream = "api.com", 51 | }, 52 | { -- level 2 53 | upstream = "api.com", 54 | upstream_only_backup = true, 55 | }, 56 | }, 57 | } 58 | 59 | return _M 60 | -------------------------------------------------------------------------------- /t/lib/config_dyconfig_opts.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 5, 6 | checkup_timer_overtime = 10, 7 | checkup_shd_sync_enable = true, 8 | shd_config_timer_interval = 0.5, 9 | } 10 | 11 | _M.dyconfig_rr = { 12 | timeout = 2, 13 | typ = "http", 14 | try = 3, 15 | http_opts = { 16 | query = "GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n", 17 | }, 18 | 19 | cluster = { 20 | { -- level 1 21 | servers = { 22 | { host = "127.0.0.1", port = 12355 }, 23 | { host = "127.0.0.1", port = 12356 }, 24 | } 25 | }, 26 | { -- level 2 27 | servers = { 28 | { host = "127.0.0.1", port = 12360 }, 29 | { host = "127.0.0.1", port = 12361 }, 30 | } 31 | }, 32 | }, 33 | } 34 | 35 | _M.dyconfig_hash = { 36 | timeout = 2, 37 | typ = "http", 38 | mode = "hash", 39 | try = 3, 40 | http_opts = { 41 | query = "GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n", 42 | }, 43 | 44 | cluster = { 45 | { -- level 1 46 | servers = { 47 | { host = "127.0.0.1", port = 12354 }, 48 | { host = "127.0.0.1", port = 12355 }, 49 | { host = "127.0.0.1", port = 12356 }, 50 | } 51 | }, 52 | { -- level 2 53 | servers = { 54 | { host = "127.0.0.1", port = 12360 }, 55 | { host = "127.0.0.1", port = 12361 }, 56 | } 57 | }, 58 | }, 59 | } 60 | 61 | 62 | _M.dyconfig_rr_http = { 63 | timeout = 2, 64 | typ = "http", 65 | try = 3, 66 | http_opts = { 67 | query = "GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n", 68 | statuses = { 69 | ["502"] = false, 70 | ["501"] = false, 71 | ["500"] = false, 72 | }, 73 | }, 74 | 75 | cluster = { 76 | { -- level 1 77 | servers = { 78 | { host = "127.0.0.1", port = 12355 }, 79 | { host = "127.0.0.1", port = 12356 }, 80 | } 81 | }, 82 | { -- level 2 83 | servers = { 84 | { host = "127.0.0.1", port = 12360 }, 85 | { host = "127.0.0.1", port = 12361 }, 86 | } 87 | }, 88 | }, 89 | } 90 | 91 | 92 | return _M 93 | -------------------------------------------------------------------------------- /t/lib/config_fails.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 2, 6 | checkup_timer_overtime = 10, 7 | } 8 | 9 | _M.s1 = { 10 | timeout = 2, 11 | typ = "http", 12 | try = 1, 13 | http_opts = { 14 | statuses = { 15 | ["502"] = false, 16 | }, 17 | }, 18 | 19 | cluster = { 20 | { -- level 1 21 | servers = { 22 | { host = "127.0.0.1", port = 12354, max_fails = 2, fail_timeout = 2 }, 23 | { host = "127.0.0.1", port = 12355, max_fails = 0 }, 24 | } 25 | }, 26 | { -- level 2 27 | servers = { 28 | { host = "127.0.0.1", port = 12356 }, 29 | } 30 | }, 31 | }, 32 | } 33 | 34 | _M.s2 = { 35 | timeout = 2, 36 | typ = "http", 37 | try = 1, 38 | http_opts = { 39 | statuses = { 40 | ["502"] = false, 41 | }, 42 | }, 43 | 44 | cluster = { 45 | { -- level 1 46 | servers = { 47 | { host = "127.0.0.1", port = 12354, max_fails = 1, fail_timeout = 2 }, 48 | { host = "127.0.0.1", port = 12355, max_fails = 1, fail_timeout = 2 }, 49 | } 50 | }, 51 | }, 52 | } 53 | 54 | _M.s3 = { 55 | timeout = 2, 56 | typ = "http", 57 | try = 1, 58 | http_opts = { 59 | statuses = { 60 | ["502"] = false, 61 | }, 62 | }, 63 | 64 | cluster = { 65 | { -- level 1 66 | servers = { 67 | { host = "127.0.0.1", port = 12354, max_fails = 1, fail_timeout = 2 }, 68 | { host = "127.0.0.1", port = 12355, max_fails = 1, fail_timeout = 2 }, 69 | } 70 | }, 71 | { -- level 2 72 | servers = { 73 | { host = "127.0.0.1", port = 12356, max_fails = 1, fail_timeout = 2 }, 74 | } 75 | }, 76 | }, 77 | } 78 | 79 | return _M 80 | -------------------------------------------------------------------------------- /t/lib/config_feedback.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 2, 6 | checkup_timer_overtime = 10, 7 | } 8 | 9 | _M.status = { 10 | timeout = 5, 11 | typ = "http", 12 | try = 1, 13 | http_opts = { 14 | query = "GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n", 15 | statuses = { 16 | ["502"] = false, 17 | }, 18 | }, 19 | 20 | cluster = { 21 | { -- level 1 22 | servers = { 23 | { host = "127.0.0.1", port = 12354, max_fails = 1 }, 24 | { host = "127.0.0.1", port = 12355 }, 25 | { host = "127.0.0.1", port = 12356, max_fails = 1 }, 26 | { host = "127.0.0.1", port = 12357 }, 27 | } 28 | }, 29 | { -- level 2 30 | servers = { 31 | { host = "127.0.0.1", port = 12360 }, 32 | { host = "127.0.0.1", port = 12361 }, 33 | } 34 | }, 35 | }, 36 | } 37 | 38 | return _M 39 | -------------------------------------------------------------------------------- /t/lib/config_hash.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 2, 6 | checkup_timer_overtime = 10, 7 | } 8 | 9 | _M.hash = { 10 | timeout = 2, 11 | typ = "http", 12 | mode = "hash", 13 | http_opts = { 14 | query = "GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n", 15 | statuses = { 16 | ["502"] = false, 17 | }, 18 | }, 19 | 20 | cluster = { 21 | { -- level 1 22 | try = 3, 23 | hash_backup_node = 1, 24 | servers = { 25 | { host = "127.0.0.1", port = 12354 }, 26 | { host = "127.0.0.1", port = 12355 }, 27 | { host = "127.0.0.1", port = 12356 }, 28 | } 29 | }, 30 | { -- level 2 31 | servers = { 32 | { host = "127.0.0.1", port = 12360 }, 33 | { host = "127.0.0.1", port = 12361 }, 34 | } 35 | }, 36 | }, 37 | } 38 | 39 | return _M 40 | -------------------------------------------------------------------------------- /t/lib/config_http.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 2, 6 | checkup_timer_overtime = 10, 7 | } 8 | 9 | _M.status = { 10 | timeout = 2, 11 | typ = "http", 12 | try = 2, 13 | http_opts = { 14 | query = "GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n", 15 | statuses = { 16 | ["502"] = false, 17 | }, 18 | }, 19 | 20 | cluster = { 21 | { -- level 1 22 | servers = { 23 | { host = "127.0.0.1", port = 12354 }, 24 | { host = "127.0.0.1", port = 12355 }, 25 | { host = "127.0.0.1", port = 12356 }, 26 | { host = "127.0.0.1", port = 12357 }, 27 | } 28 | }, 29 | { -- level 2 30 | servers = { 31 | { host = "127.0.0.1", port = 12360 }, 32 | { host = "127.0.0.1", port = 12361 }, 33 | } 34 | }, 35 | }, 36 | } 37 | 38 | return _M 39 | -------------------------------------------------------------------------------- /t/lib/config_key.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 2, 6 | checkup_timer_overtime = 10, 7 | } 8 | 9 | _M.upyun = { 10 | timeout = 2, 11 | typ = "http", 12 | try = 5, 13 | http_opts = { 14 | query = "GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n", 15 | statuses = { 16 | ["502"] = false, 17 | ["501"] = false, 18 | ["500"] = false, 19 | }, 20 | }, 21 | 22 | cluster = { 23 | c1 = { 24 | servers = { 25 | { host = "127.0.0.1", port = 12354 }, 26 | { host = "127.0.0.1", port = 12355 }, 27 | { host = "127.0.0.1", port = 12356 }, 28 | { host = "127.0.0.1", port = 12357 }, 29 | } 30 | }, 31 | c2 = { 32 | servers = { 33 | { host = "127.0.0.1", port = 12355 }, 34 | { host = "127.0.0.1", port = 12356 }, 35 | { host = "127.0.0.1", port = 12357 }, 36 | { host = "127.0.0.1", port = 12354 }, 37 | } 38 | }, 39 | }, 40 | } 41 | 42 | return _M 43 | -------------------------------------------------------------------------------- /t/lib/config_redis.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 2, 6 | checkup_timer_overtime = 10, 7 | } 8 | 9 | _M.redis = { 10 | enable = true, 11 | typ = "redis", 12 | 13 | cluster = { 14 | { 15 | servers = { 16 | { host = "127.0.0.1", port = 6379 }, 17 | } 18 | } 19 | } 20 | } 21 | 22 | 23 | return _M 24 | -------------------------------------------------------------------------------- /t/lib/config_round_robin.lua: -------------------------------------------------------------------------------- 1 | _M = {} 2 | 3 | _M.global = { 4 | checkup_timer_interval = 2, 5 | checkup_timer_overtime = 10, 6 | } 7 | 8 | _M.single_host = { 9 | cluster = { 10 | { 11 | servers = { 12 | { host = "127.0.0.1", port = 12350 }, 13 | } 14 | } 15 | } 16 | } 17 | 18 | _M.single_level = { 19 | timeout = 2, 20 | 21 | cluster = { 22 | { -- level 1 23 | servers = { 24 | { host = "127.0.0.1", port = 12351, weight = 1 }, 25 | { host = "127.0.0.1", port = 12352, weight = 4 }, 26 | { host = "127.0.0.1", port = 12353, weight = 3 }, 27 | { host = "127.0.0.1", port = 12355, weight = 6 }, 28 | } 29 | }, 30 | }, 31 | } 32 | 33 | _M.multi_level = { 34 | timeout = 2, 35 | try = 8, 36 | 37 | cluster = { 38 | { -- level 1 39 | servers = { 40 | { host = "127.0.0.1", port = 12354, weight = 2 }, -- fake 41 | { host = "127.0.0.1", port = 12355, weight = 3 }, 42 | { host = "127.0.0.1", port = 12356, weight = 2 }, 43 | } 44 | }, 45 | { -- level 2 46 | servers = { 47 | { host = "127.0.0.1", port = 12357, weight = 3 }, -- fake 48 | { host = "127.0.0.1", port = 12358, weight = 2 }, 49 | { host = "127.0.0.1", port = 12359, weight = 2 }, -- fake 50 | } 51 | }, 52 | }, 53 | } 54 | 55 | _M.single_key = { 56 | timeout = 2, 57 | 58 | cluster = { 59 | c1 = { 60 | servers = { 61 | { host = "127.0.0.1", port = 12351, weight = 1 }, 62 | { host = "127.0.0.1", port = 12352, weight = 2 }, 63 | { host = "127.0.0.1", port = 12353, weight = 3 }, 64 | } 65 | }, 66 | }, 67 | } 68 | 69 | _M.multi_key = { 70 | timeout = 2, 71 | try = 6, 72 | 73 | cluster = { 74 | c1 = { 75 | servers = { 76 | { host = "127.0.0.1", port = 12354, weight = 2 }, -- fake 77 | { host = "127.0.0.1", port = 12355, weight = 3 }, 78 | { host = "127.0.0.1", port = 12356, weight = 2 }, 79 | } 80 | }, 81 | 82 | c2 = { 83 | servers = { 84 | { host = "127.0.0.1", port = 12357, weight = 3 }, -- fake 85 | { host = "127.0.0.1", port = 12358, weight = 1 }, 86 | { host = "127.0.0.1", port = 12359, weight = 2 }, -- fake 87 | } 88 | }, 89 | }, 90 | } 91 | 92 | 93 | _M.multi_fake_c1 = { 94 | timeout = 2, 95 | try = 6, 96 | 97 | cluster = { 98 | c1 = { 99 | servers = { 100 | { host = "127.0.0.1", port = 12357, weight = 3000 }, -- fake 101 | { host = "127.0.0.1", port = 12359, weight = 3200000 }, -- fake 102 | { host = "127.0.0.1", port = 12356, weight = 1 }, 103 | } 104 | }, 105 | 106 | c2 = { 107 | servers = { 108 | { host = "127.0.0.1", port = 12357, weight = 3 }, -- fake 109 | { host = "127.0.0.1", port = 12358, weight = 1 }, 110 | { host = "127.0.0.1", port = 12359, weight = 2 }, -- fake 111 | } 112 | }, 113 | }, 114 | } 115 | 116 | 117 | return _M 118 | -------------------------------------------------------------------------------- /t/lib/config_timeout.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 200, 6 | checkup_timer_overtime = 10, 7 | } 8 | 9 | 10 | _M.amount = { 11 | try = 6, 12 | http_opts = { 13 | query = "GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n", 14 | statuses = { 15 | ["502"] = false, 16 | }, 17 | }, 18 | 19 | cluster = { 20 | { 21 | servers = { 22 | { host = "127.0.0.1", port = 12358 }, 23 | { host = "127.0.0.1", port = 12359 }, 24 | { host = "127.0.0.1", port = 12360 }, 25 | { host = "127.0.0.1", port = 12361 }, 26 | } 27 | }, 28 | }, 29 | } 30 | 31 | 32 | _M.amount_ups = { 33 | try = 6, 34 | try_timeout = 4.1, 35 | http_opts = { 36 | query = "GET /status HTTP/1.1\r\nHost: localhost\r\n\r\n", 37 | statuses = { 38 | ["502"] = false, 39 | }, 40 | }, 41 | 42 | cluster = { 43 | { 44 | servers = { 45 | { host = "127.0.0.1", port = 12358 }, 46 | { host = "127.0.0.1", port = 12359 }, 47 | { host = "127.0.0.1", port = 12360 }, 48 | { host = "127.0.0.1", port = 12361 }, 49 | } 50 | }, 51 | }, 52 | } 53 | 54 | 55 | return _M 56 | 57 | -------------------------------------------------------------------------------- /t/lib/config_unprotected_api.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 2, 6 | checkup_timer_overtime = 10, 7 | } 8 | 9 | _M.api = { 10 | timeout = 2, 11 | try = 2, 12 | 13 | protected = false, 14 | 15 | cluster = { 16 | { -- level 1 17 | servers = { 18 | { host = "127.0.0.1", port = 12354 }, 19 | { host = "127.0.0.1", port = 12355 }, 20 | { host = "127.0.0.1", port = 12356 }, 21 | } 22 | }, 23 | { -- level 2 24 | servers = { 25 | { host = "127.0.0.1", port = 12360 }, 26 | { host = "127.0.0.1", port = 12361 }, 27 | } 28 | }, 29 | }, 30 | } 31 | 32 | return _M 33 | -------------------------------------------------------------------------------- /t/lib/config_ups.lua: -------------------------------------------------------------------------------- 1 | 2 | _M = {} 3 | 4 | _M.global = { 5 | checkup_timer_interval = 2, 6 | checkup_timer_overtime = 10, 7 | 8 | ups_status_sync_enable = true, 9 | ups_status_timer_interval = 1, 10 | } 11 | 12 | _M.api = { 13 | timeout = 2, 14 | 15 | cluster = { 16 | { -- level 1 17 | upstream = "api.com", 18 | }, 19 | { -- level 2 20 | upstream = "api.com", 21 | upstream_only_backup = true, 22 | }, 23 | }, 24 | } 25 | 26 | return _M 27 | -------------------------------------------------------------------------------- /t/max_fails.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(1); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | listen 12355; 25 | listen 12356; 26 | location /b { 27 | lua_need_request_body on; 28 | content_by_lua ' 29 | local args = ngx.req.get_uri_args() 30 | if args then 31 | ngx.status = args.code 32 | ngx.print(args.host .. ":" .. args.port .. ":" .. args.code) 33 | end 34 | '; 35 | } 36 | } 37 | 38 | init_by_lua ' 39 | local config = require "config_fails" 40 | local checkups = require "resty.checkups" 41 | checkups.init(config) 42 | '; 43 | 44 | init_worker_by_lua ' 45 | local checkups = require "resty.checkups" 46 | local config = require "config_fails" 47 | checkups.prepare_checker(config) 48 | checkups.create_checker() 49 | '; 50 | 51 | }; 52 | 53 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 54 | $ENV{TEST_NGINX_USE_HUP} = 1; 55 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 56 | #no_diff(); 57 | no_long_string(); 58 | 59 | run_tests(); 60 | 61 | __DATA__ 62 | 63 | === TEST 1: max_fails timeout 64 | --- http_config eval: $::HttpConfig 65 | --- config 66 | location = /t { 67 | access_log off; 68 | content_by_lua ' 69 | local checkups = require "resty.checkups" 70 | 71 | local res = checkups.ready_ok("s1", function(host, port) 72 | local r = ngx.location.capture("/a", { args = { host = host, port = port, code = 200 } }) 73 | if r then ngx.say(r.body) else ngx.say("ERR") end 74 | return r 75 | end) 76 | 77 | for i=1, 10 do 78 | local res = checkups.ready_ok("s1", function(host, port) 79 | local r = ngx.location.capture("/a", { args = { host = host, port = port, code = 502 } }) 80 | if r then ngx.say(r.body) else ngx.say("ERR") end 81 | return r 82 | end) 83 | end 84 | 85 | ngx.sleep(2.5) 86 | 87 | for i=1, 10 do 88 | local res = checkups.ready_ok("s1", function(host, port) 89 | local r = ngx.location.capture("/a", { args = { host = host, port = port, code = 502 } }) 90 | if r then ngx.say(r.body) else ngx.say("ERR") end 91 | return r 92 | end) 93 | end 94 | '; 95 | } 96 | location /a { 97 | proxy_pass http://127.0.0.1:$arg_port/b?code=$arg_code&host=$arg_host&port=$arg_port; 98 | } 99 | --- request 100 | GET /t 101 | --- response_body 102 | 127.0.0.1:12354:200 103 | 127.0.0.1:12355:502 104 | 127.0.0.1:12354:502 105 | 127.0.0.1:12355:502 106 | 127.0.0.1:12354:502 107 | 127.0.0.1:12355:502 108 | 127.0.0.1:12355:502 109 | 127.0.0.1:12355:502 110 | 127.0.0.1:12355:502 111 | 127.0.0.1:12355:502 112 | 127.0.0.1:12355:502 113 | 127.0.0.1:12355:502 114 | 127.0.0.1:12354:502 115 | 127.0.0.1:12355:502 116 | 127.0.0.1:12354:502 117 | 127.0.0.1:12355:502 118 | 127.0.0.1:12355:502 119 | 127.0.0.1:12355:502 120 | 127.0.0.1:12355:502 121 | 127.0.0.1:12355:502 122 | 127.0.0.1:12355:502 123 | --- timeout: 10 124 | 125 | 126 | === TEST 2: the last server will not be marked down 127 | --- http_config eval: $::HttpConfig 128 | --- config 129 | location = /t { 130 | access_log off; 131 | content_by_lua ' 132 | local checkups = require "resty.checkups" 133 | 134 | for i=1, 5 do 135 | local res = checkups.ready_ok("s2", function(host, port) 136 | local r = ngx.location.capture("/a", { args = { host = host, port = port, code = 502 } }) 137 | if r then ngx.say(r.body) else ngx.say("ERR") end 138 | return r 139 | end) 140 | end 141 | 142 | ngx.sleep(2.5) 143 | 144 | for i=1, 5 do 145 | local res = checkups.ready_ok("s2", function(host, port) 146 | local r = ngx.location.capture("/a", { args = { host = host, port = port, code = 502 } }) 147 | if r then ngx.say(r.body) else ngx.say("ERR") end 148 | return r 149 | end) 150 | end 151 | '; 152 | } 153 | location /a { 154 | proxy_pass http://127.0.0.1:$arg_port/b?code=$arg_code&host=$arg_host&port=$arg_port; 155 | } 156 | --- request 157 | GET /t 158 | --- response_body 159 | 127.0.0.1:12354:502 160 | 127.0.0.1:12355:502 161 | 127.0.0.1:12355:502 162 | 127.0.0.1:12355:502 163 | 127.0.0.1:12355:502 164 | 127.0.0.1:12355:502 165 | 127.0.0.1:12354:502 166 | 127.0.0.1:12355:502 167 | 127.0.0.1:12355:502 168 | 127.0.0.1:12355:502 169 | --- timeout: 10 170 | 171 | 172 | === TEST 3: backup server 173 | --- http_config eval: $::HttpConfig 174 | --- config 175 | location = /t { 176 | access_log off; 177 | content_by_lua ' 178 | local checkups = require "resty.checkups" 179 | 180 | for i=1, 5 do 181 | local res = checkups.ready_ok("s3", function(host, port) 182 | local r = ngx.location.capture("/a", { args = { host = host, port = port, code = 502 } }) 183 | if r then ngx.say(r.body) else ngx.say("ERR") end 184 | return r 185 | end) 186 | end 187 | 188 | ngx.sleep(2.5) 189 | 190 | for i=1, 5 do 191 | local res = checkups.ready_ok("s3", function(host, port) 192 | local r = ngx.location.capture("/a", { args = { host = host, port = port, code = 502 } }) 193 | if r then ngx.say(r.body) else ngx.say("ERR") end 194 | return r 195 | end) 196 | end 197 | '; 198 | } 199 | location /a { 200 | proxy_pass http://127.0.0.1:$arg_port/b?code=$arg_code&host=$arg_host&port=$arg_port; 201 | } 202 | --- request 203 | GET /t 204 | --- response_body 205 | 127.0.0.1:12354:502 206 | 127.0.0.1:12355:502 207 | 127.0.0.1:12356:502 208 | 127.0.0.1:12356:502 209 | 127.0.0.1:12356:502 210 | 127.0.0.1:12355:502 211 | 127.0.0.1:12354:502 212 | 127.0.0.1:12356:502 213 | 127.0.0.1:12356:502 214 | 127.0.0.1:12356:502 215 | --- timeout: 10 216 | -------------------------------------------------------------------------------- /t/passive.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | location = /status { 25 | return 200; 26 | } 27 | } 28 | 29 | server { 30 | listen 12355; 31 | location = /status { 32 | return 404; 33 | } 34 | } 35 | 36 | server { 37 | listen 12356; 38 | location = /status { 39 | return 503; 40 | } 41 | } 42 | 43 | init_by_lua ' 44 | local config = require "config_api" 45 | local checkups = require "resty.checkups" 46 | checkups.init(config) 47 | '; 48 | 49 | init_worker_by_lua ' 50 | local checkups = require "resty.checkups" 51 | local config = require "config_api" 52 | checkups.prepare_checker(config) 53 | checkups.create_checker() 54 | '; 55 | 56 | }; 57 | 58 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 59 | $ENV{TEST_NGINX_USE_HUP} = 1; 60 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 61 | #no_diff(); 62 | no_long_string(); 63 | 64 | run_tests(); 65 | 66 | __DATA__ 67 | 68 | === TEST 1: ready_ok 69 | --- http_config eval: $::HttpConfig 70 | --- config 71 | location = /t { 72 | access_log off; 73 | content_by_lua ' 74 | local checkups = require "resty.checkups" 75 | local cb = function(host, port) 76 | ngx.say(host .. ":" .. port) 77 | return 1 78 | end 79 | checkups.ready_ok("api", cb) 80 | checkups.ready_ok("api", cb) 81 | checkups.ready_ok("api", cb) 82 | checkups.ready_ok("api", cb) 83 | checkups.ready_ok("api", cb) 84 | checkups.ready_ok("api", cb) 85 | '; 86 | } 87 | --- request 88 | GET /t 89 | --- response_body 90 | 127.0.0.1:12354 91 | 127.0.0.1:12355 92 | 127.0.0.1:12356 93 | 127.0.0.1:12354 94 | 127.0.0.1:12355 95 | 127.0.0.1:12356 96 | --- grep_error_log eval: qr/cb_heartbeat\(\): failed to connect: 127.0.0.1:\d+, connection refused/ 97 | --- grep_error_log_out 98 | cb_heartbeat(): failed to connect: 127.0.0.1:12360, connection refused 99 | cb_heartbeat(): failed to connect: 127.0.0.1:12361, connection refused 100 | 101 | 102 | === TEST 2: max_acc_fails 103 | --- http_config eval: $::HttpConfig 104 | --- config 105 | location = /t { 106 | access_log off; 107 | content_by_lua ' 108 | local checkups = require "resty.checkups" 109 | local cb = function(host, port) 110 | ngx.say(host .. ":" .. port) 111 | return 1 112 | end 113 | checkups.ready_ok("api", function(host, port) 114 | ngx.say(host .. ":" .. port .. " " .. "ERR") 115 | end) 116 | checkups.ready_ok("api", cb) 117 | checkups.ready_ok("api", cb) 118 | checkups.ready_ok("api", cb) 119 | '; 120 | } 121 | --- request 122 | GET /t 123 | --- response_body 124 | 127.0.0.1:12354 ERR 125 | 127.0.0.1:12355 ERR 126 | 127.0.0.1:12356 127 | 127.0.0.1:12355 128 | 127.0.0.1:12356 129 | 130 | 131 | === TEST 3: no server available 132 | --- http_config eval: $::HttpConfig 133 | --- config 134 | location = /t { 135 | access_log off; 136 | content_by_lua ' 137 | local checkups = require "resty.checkups" 138 | local cb_ok = function(host, port) 139 | ngx.say(host .. ":" .. port) 140 | return 1 141 | end 142 | local cb_err = function(host, port) 143 | ngx.say(host .. ":" .. port .. " " .. "ERR") 144 | return nil, "max try exceeded" 145 | end 146 | 147 | local ok, err = checkups.ready_ok("api", cb_err) 148 | if err then 149 | ngx.say(err) 150 | end 151 | local ok, err = checkups.ready_ok("api", cb_err) 152 | if err then 153 | ngx.say(err) 154 | end 155 | local ok, err = checkups.ready_ok("api", cb_ok) 156 | if err then 157 | ngx.say(err) 158 | end 159 | local ok, err = checkups.ready_ok("api", cb_ok) 160 | if err then 161 | ngx.say(err) 162 | end 163 | '; 164 | } 165 | --- request 166 | GET /t 167 | --- response_body 168 | 127.0.0.1:12354 ERR 169 | 127.0.0.1:12355 ERR 170 | max try exceeded 171 | 127.0.0.1:12356 ERR 172 | 127.0.0.1:12355 ERR 173 | max try exceeded 174 | 127.0.0.1:12354 175 | 127.0.0.1:12355 176 | -------------------------------------------------------------------------------- /t/positive.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | location = /status { 25 | return 200; 26 | } 27 | } 28 | 29 | server { 30 | listen 12355; 31 | location = /status { 32 | return 404; 33 | } 34 | } 35 | 36 | server { 37 | listen 12360; 38 | location = /status { 39 | return 404; 40 | } 41 | } 42 | 43 | init_by_lua ' 44 | local config = require "config_api" 45 | local checkups = require "resty.checkups" 46 | checkups.init(config) 47 | checkups.prepare_checker(config) 48 | '; 49 | 50 | init_worker_by_lua ' 51 | local checkups = require "resty.checkups" 52 | checkups.create_checker() 53 | '; 54 | 55 | }; 56 | 57 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 58 | $ENV{TEST_NGINX_USE_HUP} = 1; 59 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 60 | #no_diff(); 61 | no_long_string(); 62 | 63 | run_tests(); 64 | 65 | __DATA__ 66 | 67 | === TEST 1: check without passive 68 | --- http_config eval: $::HttpConfig 69 | --- config 70 | location = /t { 71 | access_log off; 72 | content_by_lua ' 73 | local checkups = require "resty.checkups" 74 | ngx.sleep(1) 75 | local cb_ok = function(host, port) 76 | ngx.say(host .. ":" .. port) 77 | return 1 78 | end 79 | local cb_err = function(host, port) 80 | ngx.say(host .. ":" .. port .. " " .. "ERR") 81 | return nil, "max try exceeded" 82 | end 83 | 84 | local ok, err = checkups.ready_ok("api", cb_err) 85 | if err then 86 | ngx.say(err) 87 | end 88 | local ok, err = checkups.ready_ok("api", cb_err) 89 | if err then 90 | ngx.say(err) 91 | end 92 | local ok, err = checkups.ready_ok("api", cb_ok) 93 | if err then 94 | ngx.say(err) 95 | end 96 | local ok, err = checkups.ready_ok("api", cb_ok) 97 | if err then 98 | ngx.say(err) 99 | end 100 | '; 101 | } 102 | --- request 103 | GET /t 104 | --- response_body 105 | 127.0.0.1:12354 ERR 106 | 127.0.0.1:12355 ERR 107 | max try exceeded 108 | 127.0.0.1:12355 ERR 109 | 127.0.0.1:12354 ERR 110 | max try exceeded 111 | 127.0.0.1:12354 112 | 127.0.0.1:12355 113 | --- grep_error_log eval: qr/cb_heartbeat\(\): failed to connect: 127.0.0.1:\d+, connection refused/ 114 | --- grep_error_log_out 115 | cb_heartbeat(): failed to connect: 127.0.0.1:12356, connection refused 116 | cb_heartbeat(): failed to connect: 127.0.0.1:12361, connection refused 117 | 118 | 119 | === TEST 2: check timer 120 | --- http_config eval: $::HttpConfig 121 | --- config 122 | location = /t { 123 | access_log off; 124 | content_by_lua ' 125 | local checkups = require "resty.checkups" 126 | ngx.sleep(3) 127 | '; 128 | } 129 | --- request 130 | GET /t 131 | --- response_body 132 | --- grep_error_log eval: qr/cb_heartbeat\(\): failed to connect: 127.0.0.1:\d+, connection refused/ 133 | --- grep_error_log_out 134 | cb_heartbeat(): failed to connect: 127.0.0.1:12356, connection refused 135 | cb_heartbeat(): failed to connect: 127.0.0.1:12361, connection refused 136 | cb_heartbeat(): failed to connect: 127.0.0.1:12356, connection refused 137 | cb_heartbeat(): failed to connect: 127.0.0.1:12361, connection refused 138 | --- timeout: 10 139 | -------------------------------------------------------------------------------- /t/round_robin.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et: 2 | 3 | use lib 'lib'; 4 | use Test::Nginx::Socket; 5 | use Cwd qw(cwd); 6 | use Test::Nginx::Socket 'no_plan'; 7 | 8 | workers(4); 9 | 10 | my $pwd = cwd(); 11 | 12 | our $HttpConfig = qq{ 13 | lua_socket_log_errors off; 14 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 15 | error_log logs/error.log debug; 16 | 17 | lua_shared_dict state 10m; 18 | lua_shared_dict mutex 1m; 19 | lua_shared_dict locks 1m; 20 | lua_shared_dict ip_black_lists 10m; 21 | lua_shared_dict round_robin_state 10m; 22 | 23 | server { 24 | listen 12350; 25 | } 26 | 27 | server { 28 | listen 12351; 29 | } 30 | 31 | server { 32 | listen 12352; 33 | } 34 | 35 | server { 36 | listen 12353; 37 | } 38 | 39 | server { 40 | listen 12355; 41 | } 42 | 43 | server { 44 | listen 12356; 45 | } 46 | 47 | server { 48 | listen 12358; 49 | } 50 | 51 | init_worker_by_lua ' 52 | local checkups = require "resty.checkups" 53 | checkups.create_checker() 54 | '; 55 | }; 56 | 57 | our $InitConfig = qq{ 58 | init_by_lua ' 59 | local config = require "config_round_robin" 60 | local checkups = require "resty.checkups" 61 | checkups.init(config) 62 | checkups.prepare_checker(config) 63 | '; 64 | }; 65 | 66 | 67 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 68 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 69 | $ENV{TEST_NGINX_USE_HUP} = 1; 70 | $ENV{TEST_NGINX_PWD} = $pwd; 71 | 72 | #no_diff(); 73 | no_long_string(); 74 | 75 | run_tests(); 76 | 77 | __DATA__ 78 | 79 | === TEST 1: Round robin method, single host 80 | --- http_config eval 81 | "$::HttpConfig" . "$::InitConfig" 82 | --- config 83 | location = /t { 84 | content_by_lua ' 85 | local checkups = require "resty.checkups" 86 | ngx.sleep(2) 87 | local cb_ok = function(host, port) 88 | ngx.say(host .. ":" .. port) 89 | return 1 90 | end 91 | 92 | local ok, err = checkups.ready_ok("single_host", cb_ok) 93 | if err then 94 | ngx.say(err) 95 | end 96 | '; 97 | } 98 | --- request 99 | GET /t 100 | --- response_body 101 | 127.0.0.1:12350 102 | 103 | === TEST 2: Round robin is consistent, try by level 104 | --- http_config eval 105 | "$::HttpConfig" . "$::InitConfig" 106 | --- config 107 | location = /t { 108 | content_by_lua ' 109 | local checkups = require "resty.checkups" 110 | ngx.sleep(2) 111 | local dict = { 112 | [12351] = "A", 113 | [12352] = "B", 114 | [12353] = "C", 115 | [12355] = "E", 116 | } 117 | local cb_ok = function(host, port) 118 | ngx.print(dict[port]) 119 | return 1 120 | end 121 | 122 | for i = 1, 28, 1 do 123 | local ok, err = checkups.ready_ok("single_level", cb_ok) 124 | if err then 125 | ngx.say(err) 126 | end 127 | end 128 | '; 129 | } 130 | --- request 131 | GET /t 132 | --- response_body: EBCEBEACEBECBEEBCEBEACEBECBE 133 | 134 | === TEST 3: Round robin with fake hosts, try by level 135 | --- http_config eval 136 | "$::HttpConfig" . "$::InitConfig" 137 | --- config 138 | location = /t { 139 | content_by_lua ' 140 | local checkups = require "resty.checkups" 141 | ngx.sleep(2) 142 | local dict = { 143 | [12355] = "E", 144 | [12356] = "F", 145 | [12358] = "H", 146 | } 147 | local cb_ok = function(host, port) 148 | ngx.print(dict[port]) 149 | return 150 | end 151 | 152 | for i = 1, 5, 1 do 153 | local ok, err = checkups.ready_ok("multi_level", cb_ok) 154 | if i ~= 5 then 155 | ngx.print(" ") 156 | end 157 | end 158 | '; 159 | } 160 | --- request 161 | GET /t 162 | --- response_body: EFH FEH EFH FEH EFH 163 | 164 | === TEST 4: Round robin is consistent, try by key 165 | --- http_config eval 166 | "$::HttpConfig" . "$::InitConfig" 167 | --- config 168 | location = /t { 169 | content_by_lua ' 170 | local checkups = require "resty.checkups" 171 | ngx.sleep(2) 172 | local dict = { 173 | [12351] = "A", 174 | [12352] = "B", 175 | [12353] = "C", 176 | } 177 | local cb_ok = function(host, port) 178 | ngx.print(dict[port]) 179 | return 1 180 | end 181 | 182 | for i = 1, 18, 1 do 183 | local ok, err = checkups.ready_ok("single_key", cb_ok, {cluster_key = {"c1"}}) 184 | if err then 185 | ngx.say(err) 186 | end 187 | end 188 | '; 189 | } 190 | --- request 191 | GET /t 192 | --- response_body: CBACBCCBACBCCBACBC 193 | 194 | === TEST 5: Round robin with fake hosts, try by key 195 | --- http_config eval 196 | "$::HttpConfig" . "$::InitConfig" 197 | --- config 198 | location = /t { 199 | content_by_lua ' 200 | local checkups = require "resty.checkups" 201 | ngx.sleep(2) 202 | local dict = { 203 | [12355] = "E", 204 | [12356] = "F", 205 | [12358] = "H", 206 | } 207 | local cb_ok = function(host, port) 208 | ngx.print(dict[port]) 209 | return 210 | end 211 | 212 | for i = 1, 5, 1 do 213 | local ok, err = checkups.ready_ok("multi_key", cb_ok, {cluster_key = {"c1", "c2"}}) 214 | if i ~= 5 then 215 | ngx.print(" ") 216 | end 217 | end 218 | '; 219 | } 220 | --- request 221 | GET /t 222 | --- response_body: EFH FEH EFH FEH EFH 223 | 224 | === TEST 6: Round robin with multiple fake hosts and large weight, try by key 225 | --- http_config eval 226 | "$::HttpConfig" . "$::InitConfig" 227 | --- config 228 | location = /t { 229 | content_by_lua ' 230 | local checkups = require "resty.checkups" 231 | ngx.sleep(2) 232 | local dict = { 233 | [12356] = "F", 234 | [12358] = "H", 235 | } 236 | local cb_ok = function(host, port) 237 | ngx.print(dict[port]) 238 | return 239 | end 240 | 241 | local ok, err = checkups.ready_ok("multi_fake_c1", cb_ok, {cluster_key = {"c1", "c2"}}) 242 | if err then 243 | ngx.print(" ") 244 | ngx.say(err) 245 | end 246 | '; 247 | } 248 | --- request 249 | GET /t 250 | --- response_body 251 | FH no servers available 252 | --- timeout: 10 253 | -------------------------------------------------------------------------------- /t/sensibility.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | location = /status { 25 | return 200; 26 | } 27 | } 28 | 29 | server { 30 | listen 12355; 31 | location = /status { 32 | return 200; 33 | } 34 | } 35 | 36 | server { 37 | listen 12360; 38 | location = /status { 39 | return 200; 40 | } 41 | } 42 | 43 | init_by_lua ' 44 | local config = require "config_api" 45 | local checkups = require "resty.checkups" 46 | config.api.sensibility = 2 47 | checkups.init(config) 48 | checkups.prepare_checker(config) 49 | '; 50 | 51 | init_worker_by_lua ' 52 | local checkups = require "resty.checkups" 53 | checkups.create_checker() 54 | '; 55 | 56 | }; 57 | 58 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 59 | $ENV{TEST_NGINX_USE_HUP} = 1; 60 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 61 | #no_diff(); 62 | no_long_string(); 63 | 64 | run_tests(); 65 | 66 | __DATA__ 67 | 68 | === TEST 1: get status 69 | --- http_config eval: $::HttpConfig 70 | --- config 71 | location = /t { 72 | access_log off; 73 | content_by_lua ' 74 | local checkups = require "resty.checkups" 75 | ngx.sleep(1) 76 | local st = checkups.get_status() 77 | ngx.say(st["cls:api"][1][1].status) 78 | ngx.say(st["cls:api"][1][1].fail_num) 79 | ngx.say(st["cls:api"][1][2].status) 80 | ngx.say(st["cls:api"][1][2].fail_num) 81 | ngx.say(st["cls:api"][1][3].status) 82 | ngx.say(st["cls:api"][1][3].fail_num) 83 | ngx.say(st["cls:api"][1][3].msg) 84 | ngx.say(st["cls:api"][2][1].status) 85 | ngx.say(st["cls:api"][2][2].status) 86 | ngx.say(st["cls:api"][2][2].msg) 87 | 88 | ngx.sleep(2) 89 | 90 | st = checkups.get_status() 91 | ngx.say(st["cls:api"][1][1].status) 92 | ngx.say(st["cls:api"][1][1].fail_num) 93 | ngx.say(st["cls:api"][1][3].status) 94 | ngx.say(st["cls:api"][1][3].fail_num) 95 | ngx.say(st["cls:api"][1][3].msg) 96 | '; 97 | } 98 | --- request 99 | GET /t 100 | --- response_body 101 | ok 102 | 0 103 | ok 104 | 0 105 | ok 106 | 1 107 | connection refused 108 | ok 109 | ok 110 | connection refused 111 | ok 112 | 0 113 | err 114 | 2 115 | connection refused 116 | --- grep_error_log eval: qr/cb_heartbeat\(\): failed to connect: 127.0.0.1:\d+, connection refused/ 117 | --- grep_error_log_out 118 | cb_heartbeat(): failed to connect: 127.0.0.1:12356, connection refused 119 | cb_heartbeat(): failed to connect: 127.0.0.1:12361, connection refused 120 | cb_heartbeat(): failed to connect: 127.0.0.1:12356, connection refused 121 | cb_heartbeat(): failed to connect: 127.0.0.1:12361, connection refused 122 | --- timeout: 10 123 | -------------------------------------------------------------------------------- /t/try_timeout.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig1 = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error1.log debug; 17 | access_log logs/access1.log; 18 | 19 | lua_shared_dict state 10m; 20 | lua_shared_dict mutex 1m; 21 | lua_shared_dict locks 1m; 22 | 23 | server { listen 12358; location = /status { 24 | content_by_lua ' ngx.sleep(2); ngx.exit(502) '; } } 25 | 26 | server { listen 12359; location = /status { 27 | content_by_lua ' ngx.sleep(2); ngx.exit(502) '; } } 28 | 29 | server { listen 12360; location = /status { 30 | content_by_lua ' ngx.sleep(1); ngx.exit(502) '; } } 31 | 32 | server { listen 12361; location = /status { 33 | content_by_lua ' ngx.sleep(1); ngx.exit(200) '; } } 34 | 35 | init_by_lua ' 36 | local config = require "config_timeout" 37 | local checkups = require "resty.checkups" 38 | checkups.init(config) 39 | checkups.prepare_checker(config) 40 | '; 41 | 42 | init_worker_by_lua ' 43 | local checkups = require "resty.checkups" 44 | checkups.create_checker() 45 | '; 46 | }; 47 | 48 | 49 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 50 | $ENV{TEST_NGINX_USE_HUP} = 1; 51 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 52 | #no_diff(); 53 | no_long_string(); 54 | 55 | run_tests(); 56 | 57 | __DATA__ 58 | 59 | 60 | === TEST 1: try_timeout in ups 61 | --- http_config eval: $::HttpConfig1 62 | --- config 63 | location = /t { 64 | access_log logs/access11.log; 65 | error_log logs/error11.log debug; 66 | content_by_lua_block { 67 | local checkups = require "resty.checkups" 68 | 69 | local cb_ok = function(host, port) 70 | local sock = ngx.socket.tcp() 71 | sock:settimeout(10000) 72 | local ok, err = sock:connect(host, port) 73 | h = host 74 | p = port 75 | local bytes, err = sock:send("GET /status HTTP/1.0\r\n\r\n") 76 | local data, err, partial = sock:receive() 77 | if data == "HTTP/1.1 200 OK" then 78 | return 1 79 | end 80 | return 81 | end 82 | 83 | local ok, err = checkups.ready_ok("amount_ups", cb_ok) 84 | if not ok then 85 | ngx.say("type ok: ", type(ok), " ", h, " ", p, " ", "err: ", err) 86 | else 87 | ngx.say("ok", " port: ", p, " host: ", h) 88 | end 89 | } 90 | 91 | } 92 | --- request 93 | GET /t 94 | --- response_body 95 | type ok: nil 127.0.0.1 12360 err: try_timeout excceed 96 | --- timeout: 20 97 | 98 | 99 | 100 | === TEST 2: try_timeout in opts 101 | --- http_config eval: $::HttpConfig1 102 | --- config 103 | location = /t { 104 | access_log logs/access22.log; 105 | error_log logs/error22.log debug; 106 | content_by_lua_block { 107 | local checkups = require "resty.checkups" 108 | 109 | local h, p 110 | local cb_ok = function(host, port) 111 | local sock = ngx.socket.tcp() 112 | sock:settimeout(10000) 113 | local ok, err = sock:connect(host, port) 114 | h = host 115 | p = port 116 | local bytes, err = sock:send("GET /status HTTP/1.0\r\n\r\n") 117 | local data, err, partial = sock:receive() 118 | if data == "HTTP/1.1 200 OK" then 119 | return 1 120 | end 121 | return 122 | end 123 | 124 | local ok, err = checkups.ready_ok("amount", cb_ok, {try_timeout = 4.1}) 125 | if not ok then 126 | ngx.say("type ok: ", type(ok), " ", h, " ", p, " ", "err: ", err) 127 | else 128 | ngx.say("ok", " port: ", p, " host: ", h) 129 | end 130 | } 131 | 132 | } 133 | --- request 134 | GET /t 135 | --- response_body 136 | type ok: nil 127.0.0.1 12360 err: try_timeout excceed 137 | --- timeout: 20 138 | 139 | 140 | === TEST 3: try_timeout = 0 141 | --- http_config eval: $::HttpConfig1 142 | --- config 143 | location = /t { 144 | access_log logs/access33.log; 145 | error_log logs/error33.log debug; 146 | content_by_lua_block { 147 | local checkups = require "resty.checkups" 148 | 149 | local h, p 150 | local cb_ok = function(host, port) 151 | local sock = ngx.socket.tcp() 152 | sock:settimeout(10000) 153 | local ok, err = sock:connect(host, port) 154 | h = host 155 | p = port 156 | local bytes, err = sock:send("GET /status HTTP/1.0\r\n\r\n") 157 | local data, err, partial = sock:receive() 158 | if data == "HTTP/1.1 200 OK" then 159 | return 1 160 | end 161 | return 162 | end 163 | 164 | local ok, err = checkups.ready_ok("amount", cb_ok, {try_timeout = 0}) 165 | if not ok then 166 | ngx.say("type ok: ", type(ok), " ", h, " ", p, " ", "err: ", err) 167 | else 168 | ngx.say("ok", " port: ", p, " host: ", h) 169 | end 170 | } 171 | 172 | } 173 | --- request 174 | GET /t 175 | --- response_body 176 | ok port: 12361 host: 127.0.0.1 177 | --- timeout: 20 178 | 179 | -------------------------------------------------------------------------------- /t/unprotected.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | location = /status { 25 | return 200; 26 | } 27 | } 28 | 29 | server { 30 | listen 12355; 31 | location = /status { 32 | return 502; 33 | } 34 | } 35 | 36 | server { 37 | listen 12356; 38 | location = /status { 39 | return 404; 40 | } 41 | } 42 | 43 | init_by_lua ' 44 | local config = require "config_unprotected_api" 45 | local checkups = require "resty.checkups" 46 | -- customize heartbeat callback 47 | config.api.heartbeat = function(host, port, ups) 48 | return checkups.STATUS_ERR, "down" 49 | end 50 | checkups.init(config) 51 | checkups.prepare_checker(config) 52 | '; 53 | 54 | init_worker_by_lua ' 55 | local checkups = require "resty.checkups" 56 | checkups.create_checker() 57 | '; 58 | 59 | }; 60 | 61 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 62 | $ENV{TEST_NGINX_USE_HUP} = 1; 63 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 64 | #no_diff(); 65 | no_long_string(); 66 | 67 | run_tests(); 68 | 69 | __DATA__ 70 | 71 | === TEST 1: http 72 | --- http_config eval: $::HttpConfig 73 | --- config 74 | location = /t { 75 | access_log off; 76 | content_by_lua ' 77 | local checkups = require "resty.checkups" 78 | ngx.sleep(2) 79 | local cb_ok = function(host, port) 80 | ngx.say(host .. ":" .. port) 81 | return checkups.STATUS_OK 82 | end 83 | 84 | local ok, err = checkups.ready_ok("api", cb_ok) 85 | ngx.say(err) 86 | local ok, err = checkups.ready_ok("api", cb_ok) 87 | ngx.say(err) 88 | 89 | local st = checkups.get_status() 90 | ngx.say(st["cls:api"][2][1].status) 91 | ngx.say(st["cls:api"][2][1].msg) 92 | ngx.say(st["cls:api"][2][2].status) 93 | ngx.say(st["cls:api"][2][2].msg) 94 | '; 95 | } 96 | --- request 97 | GET /t 98 | --- response_body 99 | no servers available 100 | no servers available 101 | err 102 | down 103 | err 104 | down 105 | --- no_error_log 106 | -------------------------------------------------------------------------------- /t/ups_down.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | #use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | repeat_each(1); 8 | master_on(); 9 | 10 | workers(16); 11 | 12 | my $pwd = cwd(); 13 | 14 | our $HttpConfig = qq{ 15 | lua_socket_log_errors off; 16 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 17 | error_log logs/error.log debug; 18 | 19 | lua_shared_dict state 10m; 20 | lua_shared_dict mutex 1m; 21 | lua_shared_dict locks 1m; 22 | 23 | server { 24 | listen 12354; 25 | location = /status { 26 | return 200; 27 | } 28 | } 29 | 30 | server { 31 | listen 12356; 32 | location = /status { 33 | return 200; 34 | } 35 | } 36 | 37 | upstream api.com { 38 | server 127.0.0.1:12355; 39 | server 127.0.0.1:12356; 40 | } 41 | 42 | init_by_lua ' 43 | local config = require "config_down" 44 | local checkups = require "resty.checkups" 45 | checkups.init(config) 46 | '; 47 | 48 | init_worker_by_lua ' 49 | local config = require "config_down" 50 | local checkups = require "resty.checkups" 51 | checkups.prepare_checker(config) 52 | checkups.create_checker() 53 | '; 54 | 55 | }; 56 | 57 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 58 | $ENV{TEST_NGINX_USE_HUP} = 1; 59 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 60 | no_long_string(); 61 | no_diff(); 62 | 63 | run_tests(); 64 | 65 | __DATA__ 66 | 67 | === TEST 1: set upstream down 68 | --- http_config eval: $::HttpConfig 69 | --- config 70 | location = /t { 71 | access_log off; 72 | content_by_lua ' 73 | local upstream = require "ngx.upstream" 74 | 75 | ngx.sleep(1) 76 | local srvs = upstream.get_primary_peers("api.com") 77 | 78 | ngx.say(srvs[1].down) 79 | ngx.say(srvs[2].down) 80 | '; 81 | } 82 | --- request eval 83 | [ 84 | "GET /t", 85 | "GET /t", 86 | "GET /t", 87 | "GET /t", 88 | "GET /t", 89 | "GET /t", 90 | "GET /t", 91 | "GET /t", 92 | "GET /t", 93 | "GET /t", 94 | ] 95 | --- response_body eval 96 | [ 97 | "true\nnil\n", 98 | "true\nnil\n", 99 | "true\nnil\n", 100 | "true\nnil\n", 101 | "true\nnil\n", 102 | "true\nnil\n", 103 | "true\nnil\n", 104 | "true\nnil\n", 105 | "true\nnil\n", 106 | "true\nnil\n", 107 | ] 108 | --- timeout: 20 109 | 110 | 111 | -------------------------------------------------------------------------------- /t/upstream.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et fdm=marker: 2 | use lib 'lib'; 3 | use Test::Nginx::Socket; 4 | use Cwd qw(cwd); 5 | use Test::Nginx::Socket 'no_plan'; 6 | 7 | #repeat_each(2); 8 | 9 | workers(4); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | 22 | server { 23 | listen 12354; 24 | location = /status { 25 | return 200; 26 | } 27 | } 28 | 29 | server { 30 | listen 12356; 31 | location = /status { 32 | return 404; 33 | } 34 | } 35 | 36 | upstream api.com { 37 | server 127.0.0.1:12354; 38 | server 127.0.0.1:12355; 39 | server 127.0.0.1:12356 backup; 40 | server 127.0.0.1:12357 backup; 41 | } 42 | 43 | init_by_lua ' 44 | local config = require "config_ups" 45 | local checkups = require "resty.checkups" 46 | checkups.init(config) 47 | '; 48 | 49 | init_worker_by_lua ' 50 | local checkups = require "resty.checkups" 51 | local config = require "config_ups" 52 | checkups.prepare_checker(config) 53 | checkups.create_checker() 54 | '; 55 | 56 | }; 57 | 58 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 59 | $ENV{TEST_NGINX_USE_HUP} = 1; 60 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 61 | #no_diff(); 62 | no_long_string(); 63 | 64 | run_tests(); 65 | 66 | __DATA__ 67 | 68 | === TEST 1: set upstream down 69 | --- http_config eval: $::HttpConfig 70 | --- config 71 | location = /t { 72 | access_log off; 73 | content_by_lua ' 74 | local upstream = require "ngx.upstream" 75 | local checkups = require "resty.checkups" 76 | 77 | local srvs = upstream.get_primary_peers("api.com") 78 | ngx.say(srvs[1].down) 79 | ngx.say(srvs[2].down) 80 | 81 | local srvs = upstream.get_backup_peers("api.com") 82 | ngx.say(srvs[1].down) 83 | ngx.say(srvs[2].down) 84 | 85 | ngx.sleep(2) 86 | 87 | local srvs = upstream.get_primary_peers("api.com") 88 | ngx.say(srvs[1].down) 89 | ngx.say(srvs[2].down) 90 | 91 | local srvs = upstream.get_backup_peers("api.com") 92 | ngx.say(srvs[1].down) 93 | ngx.say(srvs[2].down) 94 | '; 95 | } 96 | --- request 97 | GET /t 98 | --- response_body 99 | nil 100 | nil 101 | nil 102 | nil 103 | nil 104 | true 105 | nil 106 | true 107 | --- timeout: 10 108 | -------------------------------------------------------------------------------- /t/worker_crash.t: -------------------------------------------------------------------------------- 1 | # vim:set ft= ts=4 sw=4 et: 2 | 3 | use lib 'lib'; 4 | use Test::Nginx::Socket; 5 | use Cwd qw(cwd); 6 | use Test::Nginx::Socket 'no_plan'; 7 | 8 | workers(1); 9 | master_process_enabled(1); 10 | 11 | my $pwd = cwd(); 12 | 13 | our $HttpConfig = qq{ 14 | lua_socket_log_errors off; 15 | lua_package_path "$pwd/../lua-resty-lock/?.lua;$pwd/lib/?.lua;$pwd/t/lib/?.lua;;"; 16 | error_log logs/error.log debug; 17 | 18 | lua_shared_dict state 10m; 19 | lua_shared_dict mutex 1m; 20 | lua_shared_dict locks 1m; 21 | lua_shared_dict config 10m; 22 | 23 | server { 24 | listen 12350; 25 | return 200 12350; 26 | } 27 | 28 | server { 29 | listen 12351; 30 | return 200 12351; 31 | } 32 | 33 | server { 34 | listen 12352; 35 | return 200 12352; 36 | } 37 | 38 | server { 39 | listen 12353; 40 | return 200 12353; 41 | } 42 | 43 | upstream api.com { 44 | server 127.0.0.1:12350; 45 | server 127.0.0.1:12351; 46 | server 127.0.0.1:12352 backup; 47 | server 127.0.0.1:12353 backup; 48 | } 49 | 50 | init_by_lua ' 51 | local config = require "config_dyconfig" 52 | local checkups = require "resty.checkups" 53 | checkups.init(config) 54 | '; 55 | 56 | init_worker_by_lua ' 57 | local config = require "config_dyconfig" 58 | local checkups = require "resty.checkups" 59 | checkups.prepare_checker(config) 60 | checkups.create_checker() 61 | '; 62 | }; 63 | 64 | 65 | $ENV{TEST_NGINX_RESOLVER} = '8.8.8.8'; 66 | $ENV{TEST_NGINX_CHECK_LEAK} = 1; 67 | $ENV{TEST_NGINX_USE_HUP} = 1; 68 | $ENV{TEST_NGINX_PWD} = $pwd; 69 | 70 | #no_diff(); 71 | no_long_string(); 72 | 73 | run_tests(); 74 | 75 | __DATA__ 76 | 77 | === TEST 1: Add server 78 | --- http_config eval: $::HttpConfig 79 | --- config 80 | location = /12350 { 81 | proxy_pass http://127.0.0.1:12350/; 82 | } 83 | location = /12351 { 84 | proxy_pass http://127.0.0.1:12351/; 85 | } 86 | location = /12352 { 87 | proxy_pass http://127.0.0.1:12352/; 88 | } 89 | location = /12353 { 90 | proxy_pass http://127.0.0.1:12353/; 91 | } 92 | 93 | location = /t { 94 | content_by_lua ' 95 | local checkups = require "resty.checkups" 96 | 97 | local callback = function(host, port) 98 | local res = ngx.location.capture("/" .. port) 99 | ngx.say(res.body) 100 | return 1 101 | end 102 | 103 | local ok, err 104 | 105 | -- no upstream available 106 | ok, err = checkups.ready_ok("ups1", callback) 107 | if err then ngx.say(err) end 108 | 109 | -- add server to backup level 110 | ok, err = checkups.update_upstream("ups1", { 111 | { 112 | servers = { 113 | {host="127.0.0.1", port=12353}, 114 | } 115 | }, 116 | }) 117 | if err then ngx.say(err) end 118 | ngx.sleep(2) 119 | local pid = ngx.worker.pid() 120 | os.execute("kill " .. pid) 121 | '; 122 | } 123 | 124 | location = /tt { 125 | content_by_lua ' 126 | ngx.sleep(2) 127 | local checkups = require "resty.checkups" 128 | 129 | local callback = function(host, port) 130 | local res = ngx.location.capture("/" .. port) 131 | ngx.say(res.body) 132 | return 1 133 | end 134 | 135 | local ok, err 136 | ok, err = checkups.ready_ok("ups1", callback) 137 | if err then ngx.say(err) end 138 | '; 139 | } 140 | --- request eval 141 | ["GET /t", "GET /tt"] 142 | --- response_body eval 143 | ["no servers available\n", "12353\n"] 144 | 145 | --- timeout: 10 146 | -------------------------------------------------------------------------------- /util/lua-releng: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | 3 | use strict; 4 | use warnings; 5 | 6 | sub file_contains ($$); 7 | 8 | my $version; 9 | for my $file (map glob, qw{ *.lua lib/*.lua lib/*/*.lua lib/*/*/*.lua lib/*/*/*/*.lua lib/*/*/*/*/*.lua }) { 10 | # Check the sanity of each .lua file 11 | open my $in, $file or 12 | die "ERROR: Can't open $file for reading: $!\n"; 13 | my $found_ver; 14 | while (<$in>) { 15 | my ($ver, $skipping); 16 | if (/(?x) (?:_VERSION) \s* = .*? ([\d\.]*\d+) (.*? SKIP)?/) { 17 | my $orig_ver = $ver = $1; 18 | $found_ver = 1; 19 | # $skipping = $2; 20 | $ver =~ s{^(\d+)\.(\d{3})(\d{3})$}{join '.', int($1), int($2), int($3)}e; 21 | warn "$file: $orig_ver ($ver)\n"; 22 | 23 | } elsif (/(?x) (?:_VERSION) \s* = \s* ([a-zA-Z_]\S*)/) { 24 | warn "$file: $1\n"; 25 | $found_ver = 1; 26 | last; 27 | } 28 | 29 | if ($ver and $version and !$skipping) { 30 | if ($version ne $ver) { 31 | # die "$file: $ver != $version\n"; 32 | } 33 | } elsif ($ver and !$version) { 34 | $version = $ver; 35 | } 36 | } 37 | if (!$found_ver) { 38 | warn "WARNING: No \"_VERSION\" or \"version\" field found in `$file`.\n"; 39 | } 40 | close $in; 41 | 42 | print "Checking use of Lua global variables in file $file ...\n"; 43 | # use tee: use Capture::Tiny 'tee' 44 | #my $output = tee {system("luac -p -l $file | grep ETGLOBAL | grep -vE 'require|type|tostring|error|ngx|ndk|jit|setmetatable|getmetatable|string|table|io|os|print|tonumber|math|pcall|xpcall|unpack|pairs|ipairs|assert|module|package|coroutine|[gs]etfenv|next|select|rawset|rawget|debug'")}; 45 | # or this simple way 46 | my $output = `luac5.1 -p -l $file | grep ETGLOBAL | grep -vE 'require|type|tostring|error|ngx|ndk|jit|setmetatable|getmetatable|string|table|io|os|print|tonumber|math|pcall|xpcall|unpack|pairs|ipairs|assert|module|package|coroutine|[gs]etfenv|next|select|rawset|rawget|debug'`; 47 | print $output; 48 | 49 | if ($output =~ /(SET|GET)GLOBAL/) { 50 | exit 1; 51 | } 52 | 53 | #file_contains($file, "attempt to write to undeclared variable"); 54 | system("grep -H -n -E --color '.{120}' $file"); 55 | } 56 | 57 | sub file_contains ($$) { 58 | my ($file, $regex) = @_; 59 | open my $in, $file 60 | or die "Cannot open $file fo reading: $!\n"; 61 | my $content = do { local $/; <$in> }; 62 | close $in; 63 | #print "$content"; 64 | return scalar ($content =~ /$regex/); 65 | } 66 | 67 | if (-d 't') { 68 | for my $file (map glob, qw{ t/*.t t/*/*.t t/*/*/*.t }) { 69 | system(qq{grep -H -n --color -E '\\--- ?(ONLY|LAST)' $file}); 70 | } 71 | } 72 | 73 | --------------------------------------------------------------------------------