├── .gitattributes ├── .github └── FUNDING.yml ├── .gitignore ├── .gitmodules ├── .luacov ├── Makefile ├── README.md ├── dist.ini ├── lib ├── qless-lib.lua ├── qless.lua └── resty │ ├── qless.lua │ └── qless │ ├── job.lua │ ├── luascript.lua │ ├── queue.lua │ ├── recurring_job.lua │ ├── reserver │ ├── ordered.lua │ ├── round_robin.lua │ └── shuffled_round_robin.lua │ └── worker.lua ├── lua-resty-qless-0.12-0.rockspec ├── t ├── 01-sanity.t ├── 02-queue.t ├── 03-job.t ├── 04-recurring_job.t ├── 05-worker.t ├── 06-reserver.t └── 07-events.t └── util └── lua-releng /.gitattributes: -------------------------------------------------------------------------------- 1 | *.t linguist-language=lua 2 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: pintsized 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | t/servroot/ 2 | t/error.log 3 | luacov.* 4 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "qless-core"] 2 | path = qless-core 3 | url = https://github.com/seomoz/qless-core.git 4 | -------------------------------------------------------------------------------- /.luacov: -------------------------------------------------------------------------------- 1 | modules = { 2 | ["resty.qless"] = "lib/resty/qless.lua", 3 | ["resty.qless.*"] = "lib", 4 | ["resty.qless.reserver.*"] = "lib", 5 | } 6 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | OPENRESTY_PREFIX=/usr/local/openresty 2 | 3 | PREFIX ?= /usr/local 4 | LUA_INCLUDE_DIR ?= $(PREFIX)/include 5 | LUA_LIB_DIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) 6 | INSTALL ?= install 7 | TEST_FILE ?= t 8 | 9 | TEST_REDIS_PORT ?= 6379 10 | TEST_REDIS_DATABASE ?= 6 11 | 12 | REDIS_CLI := redis-cli -p $(TEST_REDIS_PORT) -n $(TEST_REDIS_DATABASE) 13 | 14 | .PHONY: all test install 15 | 16 | all: ; 17 | 18 | install: all 19 | $(INSTALL) -d $(DESTDIR)/$(LUA_LIB_DIR)/resty/qless 20 | $(INSTALL) lib/resty/qless/*.lua $(DESTDIR)/$(LUA_LIB_DIR)/resty/qless/ 21 | 22 | test: all 23 | util/lua-releng 24 | -@echo "Flushing Redis DB" 25 | @$(REDIS_CLI) flushdb 26 | @rm -f luacov.stats.out 27 | PATH=$(OPENRESTY_PREFIX)/nginx/sbin:$$PATH TEST_REDIS_DATABASE=$(TEST_REDIS_DATABASE) TEST_REDIS_PORT=$(TEST_REDIS_PORT) TEST_NGINX_NO_SHUFFLE=1 prove -I../test-nginx/lib -r $(TEST_FILE) 28 | @luacov 29 | @tail -14 luacov.report.out 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | lua-resty-qless 2 | =============== 3 | 4 | **lua-resty-qless** is a binding to [qless-core](https://github.com/seomoz/qless-core) from [Moz](https://github.com/seomoz) - a powerful Redis based job queueing system inspired by 5 | [resque](https://github.com/defunkt/resque#readme), but instead implemented as a collection of Lua scripts for Redis. 6 | 7 | This binding provides a full implementation of **Qless** via Lua script running in [OpenResty](http://openresty.org/) / [lua-nginx-module](https://github.com/openresty/lua-nginx-module), including workers which can be started during the `init_worker_by_lua` phase. 8 | 9 | Essentially, with this module and a modern Redis instance, you can turn your OpenResty server into a quite sophisticated yet lightweight job queuing system, which is also compatible with the reference Ruby implementation, [Qless](https://github.com/seomoz/qless). 10 | 11 | *Note: This module is not designed to work in a pure Lua environment.* 12 | 13 | Status 14 | ====== 15 | 16 | This module should be considered experimental. 17 | 18 | 19 | Requirements 20 | ============ 21 | 22 | * Redis >= 2.8.x 23 | * OpenResty >= 1.9.x 24 | * [lua-resty-redis-connector](https://github.com/pintsized/lua-resty-redis-connector) >= 0.05 25 | 26 | 27 | Philosophy and Nomenclature 28 | =========================== 29 | A `job` is a unit of work identified by a job id or `jid`. A `queue` can contain 30 | several jobs that are scheduled to be run at a certain time, several jobs that are 31 | waiting to run, and jobs that are currently running. A `worker` is a process on a 32 | host, identified uniquely, that asks for jobs from the queue, performs some process 33 | associated with that job, and then marks it as complete. When it's completed, it 34 | can be put into another queue. 35 | 36 | Jobs can only be in one queue at a time. That queue is whatever queue they were last 37 | put in. So if a worker is working on a job, and you move it, the worker's request to 38 | complete the job will be ignored. 39 | 40 | A job can be `canceled`, which means it disappears into the ether, and we'll never 41 | pay it any mind ever again. A job can be `dropped`, which is when a worker fails 42 | to heartbeat or complete the job in a timely fashion, or a job can be `failed`, 43 | which is when a host recognizes some systematically problematic state about the 44 | job. A worker should only fail a job if the error is likely not a transient one; 45 | otherwise, that worker should just drop it and let the system reclaim it. 46 | 47 | Features 48 | ======== 49 | 50 | 1. __Jobs don't get dropped on the floor__ Sometimes workers drop jobs. Qless 51 | automatically picks them back up and gives them to another worker 52 | 1. __Tagging / Tracking__ Some jobs are more interesting than others. Track those 53 | jobs to get updates on their progress. 54 | 1. __Job Dependencies__ One job might need to wait for another job to complete 55 | 1. __Stats__ Qless automatically keeps statistics about how long jobs wait 56 | to be processed and how long they take to be processed. Currently, we keep 57 | track of the count, mean, standard deviation, and a histogram of these times. 58 | 1. __Job data is stored temporarily__ Job info sticks around for a configurable 59 | amount of time so you can still look back on a job's history, data, etc. 60 | 1. __Priority__ Jobs with the same priority get popped in the order they were 61 | inserted; a higher priority means that it gets popped faster 62 | 1. __Retry logic__ Every job has a number of retries associated with it, which are 63 | renewed when it is put into a new queue or completed. If a job is repeatedly 64 | dropped, then it is presumed to be problematic, and is automatically failed. 65 | 1. __Web App__ [lua-resty-qless-web](https://github.com/hamishforbes/lua-resty-qless-web) gives you visibility and control over certain operational issues 66 | 1. __Scheduled Work__ Until a job waits for a specified delay (defaults to 0), 67 | jobs cannot be popped by workers 68 | 1. __Recurring Jobs__ Scheduling's all well and good, but we also support 69 | jobs that need to recur periodically. 70 | 1. __Notifications__ Tracked jobs emit events on pubsub channels as they get 71 | completed, failed, put, popped, etc. Use these events to get notified of 72 | progress on jobs you're interested in. 73 | 74 | Connecting 75 | ============= 76 | First things first, require `resty.qless` and create a client, specifying your Redis connection details. 77 | 78 | ```lua 79 | local qless = require("resty.qless").new({ 80 | host = "127.0.0.1", 81 | port = 6379, 82 | }) 83 | ``` 84 | 85 | Parameters passed to `new` are forwarded to [lua-resty-redis-connector](https://github.com/pintsized/lua-resty-redis-connector). Please review the documentation there for connection options, including how to use Redis Sentinel etc. 86 | 87 | Additionally, if your application has a Redis connection that you wish to reuse, there are two ways you can integrate this: 88 | 89 | 1) Using an already established connection directly 90 | 91 | ```lua 92 | local qless = require("resty.qless").new({ 93 | redis_client = my_redis, 94 | }) 95 | ``` 96 | 97 | 2) Providing callbacks for connecting and closing the connection 98 | 99 | ```lua 100 | local qless = require("resty.qless").new({ 101 | get_redis_client = my_connection_callback, 102 | close_redis_client = my_close_callback, 103 | }) 104 | ``` 105 | 106 | When finished with Qless, you should call `qless:set_keepalive()` which will attempt to put Redis back on the keepalive pool, either using settings you provide directly, or via parameters sent to `lua-resty-redis-connector`, or by calling your `close_redis_client` callback. 107 | 108 | 109 | Enqueing Jobs 110 | ============= 111 | 112 | Jobs themselves are modules, which must be loadable via `require` and provide a `perform` function, which accepts a single `job` argument. 113 | 114 | 115 | ```lua 116 | -- my/test/job.lua (the job's "klass" becomes "my.test.job") 117 | 118 | local _M = {} 119 | 120 | function _M.perform(job) 121 | -- job is an instance of Qless_Job and provides access to 122 | -- job.data (which is a Lua table), a means to cancel the 123 | -- job (job:cancel()), and more. 124 | 125 | -- return "nil, err_type, err_msg" to indicate an unexpected failure 126 | 127 | if not job.data then 128 | return nil, "job-error", "data missing" 129 | end 130 | 131 | -- Do work 132 | end 133 | 134 | return _M 135 | ``` 136 | 137 | Now you can access a queue, and add a job to that queue. 138 | 139 | ```lua 140 | -- This references a new or existing queue 'testing' 141 | local queue = qless.queues['testing'] 142 | 143 | -- Let's add a job, with some data. Returns Job ID 144 | local jid = queue:put("my.test.job", { hello = "howdy" }) 145 | -- = "0c53b0404c56012f69fa482a1427ab7d" 146 | 147 | -- Now we can ask for a job 148 | local job = queue:pop() 149 | 150 | -- And we can do the work associated with it! 151 | job:perform() 152 | ``` 153 | 154 | The job data must be a table (which is serialised to JSON internally). 155 | 156 | The value returned by `queue:put()` is the job ID, or jid. Every Qless 157 | job has a unique jid, and it provides a means to interact with an 158 | existing job: 159 | 160 | ```lua 161 | -- find an existing job by it's jid 162 | local job = qless.jobs:get(jid) 163 | 164 | -- Query it to find out details about it: 165 | job.klass -- the class of the job 166 | job.queue -- the queue the job is in 167 | job.data -- the data for the job 168 | job.history -- the history of what has happened to the job sofar 169 | job.dependencies -- the jids of other jobs that must complete before this one 170 | job.dependents -- the jids of other jobs that depend on this one 171 | job.priority -- the priority of this job 172 | job.tags -- table of tags for this job 173 | job.original_retries -- the number of times the job is allowed to be retried 174 | job.retries_left -- the number of retries left 175 | 176 | -- You can also change the job in various ways: 177 | job:requeue("some_other_queue") -- move it to a new queue 178 | job:cancel() -- cancel the job 179 | job:tag("foo") -- add a tag 180 | job:untag("foo") -- remove a tag 181 | ``` 182 | 183 | Running Workers 184 | ================ 185 | 186 | Traditionally, Qless offered a forking Ruby worker script inspired by Resque. 187 | 188 | In lua-resty-qless, we take advantage of the `init_lua_by_worker` phase 189 | and `ngx.timer.at` API in order run workers in independent "light threads", 190 | scalable across your worker processes. 191 | 192 | You can run many light threads concurrently per worker process, which Nginx 193 | will schedule for you. 194 | 195 | ```lua 196 | init_worker_by_lua ' 197 | local resty_qless_worker = require "resty.qless.worker" 198 | 199 | local worker = resty_qless_worker.new(redis_params) 200 | 201 | worker:start({ 202 | interval = 1, 203 | concurrency = 4, 204 | reserver = "ordered", 205 | queues = { "my_queue", "my_other_queue" }, 206 | }) 207 | '; 208 | ``` 209 | 210 | Workers support three strategies (reservers) for what order to pop jobs off the queues: **ordered**, **round-robin** and **shuffled round-robin**. 211 | 212 | The ordered reserver will keep popping jobs off the first queue until 213 | it is empty, before trying to pop jobs off the second queue. The 214 | round-robin reserver will pop a job off the first queue, then the second 215 | queue, and so on. Shuffled simply ensures the rounb-robin selection is unpredictable. 216 | 217 | You could also easily implement your own. Follow the other reservers as a guide, and ensure yours 218 | is "requireable" with `require "resty.qless.reserver.myreserver"`. 219 | 220 | Middleware 221 | ========= 222 | 223 | Workers also support middleware which can be used to inject 224 | logic around the processing of a single job. This can be useful, for example, when you need to re-establish a database connection. 225 | 226 | To do this you set the worker's `middleware` to a function, and call `coroutine.yield` where you want 227 | the job to be performed. 228 | 229 | ```lua 230 | local worker = resty_qless_worker.new(redis_params) 231 | 232 | worker.middleware = function(job) 233 | -- Do pre job work 234 | coroutine.yield() 235 | -- Do post job work 236 | end 237 | 238 | worker:start({ queues = "my_queue" }) 239 | ``` 240 | 241 | 242 | Job Dependencies 243 | ================ 244 | Let's say you have one job that depends on another, but the task definitions are 245 | fundamentally different. You need to cook a turkey, and you need to make stuffing, 246 | but you can't make the turkey until the stuffing is made: 247 | 248 | ```lua 249 | local queue = qless.queues['cook'] 250 | local stuffing_jid = queue:put("jobs.make.stuffing", 251 | { lots = "of butter" } 252 | ) 253 | local turkey_jid = queue:put("jobs.make.turkey", 254 | { with = "stuffing" }, 255 | { depends = stuffing_jid } 256 | ) 257 | ``` 258 | 259 | When the stuffing job completes, the turkey job is unlocked and free to be processed. 260 | 261 | Priority 262 | ======== 263 | Some jobs need to get popped sooner than others. Whether it's a trouble ticket, or 264 | debugging, you can do this pretty easily when you put a job in a queue: 265 | 266 | ```lua 267 | queue:put("jobs.test", { foo = "bar" }, { priority = 10 }) 268 | ``` 269 | 270 | What happens when you want to adjust a job's priority while it's still waiting in 271 | a queue? 272 | 273 | ```lua 274 | local job = qless.jobs:get("0c53b0404c56012f69fa482a1427ab7d") 275 | job.priority = 10 276 | -- Now this will get popped before any job of lower priority 277 | ``` 278 | 279 | *Note: Setting the priority field above is all you need to do, thanks to Lua metamethods which are invoked to update 280 | Redis. This may look a little "auto-magic", but the intention is to retain API design compatibility with the Ruby 281 | client as much as possible.* 282 | 283 | Scheduled Jobs 284 | ============== 285 | If you don't want a job to be run right away but some time in the future, you can 286 | specify a delay: 287 | 288 | ```lua 289 | -- Run at least 10 minutes from now 290 | queue:put("jobs.test", { foo = "bar" }, { delay = 600 }) 291 | ``` 292 | 293 | This doesn't guarantee that job will be run exactly at 10 minutes. You can accomplish 294 | this by changing the job's priority so that once 10 minutes has elapsed, it's put before 295 | lesser-priority jobs: 296 | 297 | ```lua 298 | -- Run in 10 minutes 299 | queue:put("jobs.test", 300 | { foo = "bar" }, 301 | { delay = 600, priority = 100 } 302 | ) 303 | ``` 304 | 305 | Recurring Jobs 306 | ============== 307 | Sometimes it's not enough simply to schedule one job, but you want to run jobs regularly. 308 | In particular, maybe you have some batch operation that needs to get run once an hour and 309 | you don't care what worker runs it. Recurring jobs are specified much like other jobs: 310 | 311 | ```lua 312 | -- Run every hour 313 | local recurring_jid = queue:recur("jobs.test", { widget = "warble" }, 3600) 314 | -- = 22ac75008a8011e182b24cf9ab3a8f3b 315 | ``` 316 | 317 | You can even access them in much the same way as you would normal jobs: 318 | 319 | ```lua 320 | local job = qless.jobs:get("22ac75008a8011e182b24cf9ab3a8f3b") 321 | ``` 322 | 323 | Changing the interval at which it runs after the fact is trivial: 324 | 325 | ```lua 326 | -- I think I only need it to run once every two hours 327 | job.interval = 7200 328 | ``` 329 | 330 | If you want it to run every hour on the hour, but it's 2:37 right now, you can specify 331 | an offset which is how long it should wait before popping the first job: 332 | 333 | ```lua 334 | -- 23 minutes of waiting until it should go 335 | queue:recur("jobs.test", 336 | { howdy = "hello" }, 337 | 3600, 338 | { offset = (23 * 60) } 339 | ) 340 | ``` 341 | 342 | Recurring jobs also have priority, a configurable number of retries, and tags. These 343 | settings don't apply to the recurring jobs, but rather the jobs that they spawn. In the 344 | case where more than one interval passes before a worker tries to pop the job, __more than 345 | one job is created__. The thinking is that while it's completely client-managed, the state 346 | should not be dependent on how often workers are trying to pop jobs. 347 | 348 | ```lua 349 | -- Recur every minute 350 | queue:recur("jobs.test", { lots = "of jobs" }, 60) 351 | 352 | -- Wait 5 minutes 353 | 354 | local jobs = queue:pop(10) 355 | ngx.say(#jobs, " jobs got popped") 356 | 357 | -- = 5 jobs got popped 358 | ``` 359 | 360 | Configuration Options 361 | ===================== 362 | You can get and set global (in the context of the same Redis instance) configuration 363 | to change the behaviour for heartbeating, and so forth. There aren't a tremendous number 364 | of configuration options, but an important one is how long job data is kept around. Job 365 | data is expired after it has been completed for `jobs-history` seconds, but is limited to 366 | the last `jobs-history-count` completed jobs. These default to 50k jobs, and 30 days, but 367 | depending on volume, your needs may change. To only keep the last 500 jobs for up to 7 days: 368 | 369 | ```lua 370 | qless:config_set("jobs-history", 7 * 86400) 371 | qless:config_get("jobs-history-count", 500) 372 | ``` 373 | 374 | Tagging / Tracking 375 | ================== 376 | In qless, 'tracking' means flagging a job as important. Tracked jobs emit subscribable events as they make progress 377 | (more on that below). 378 | 379 | ```lua 380 | local job = qless.jobs:get("b1882e009a3d11e192d0b174d751779d") 381 | job:track() 382 | ``` 383 | 384 | Jobs can be tagged with strings which are indexed for quick searches. For example, jobs 385 | might be associated with customer accounts, or some other key that makes sense for your 386 | project. 387 | 388 | ```lua 389 | queue:put("jobs.test", {}, 390 | { tags = { "12345", "foo", "bar" } } 391 | ) 392 | ``` 393 | 394 | This makes them searchable in the Ruby / Sinatra web interface, or from code: 395 | 396 | ```lua 397 | local jids = qless.jobs:tagged("foo") 398 | ``` 399 | 400 | You can add or remove tags at will, too: 401 | 402 | ```lua 403 | local job = qless.jobs:get('b1882e009a3d11e192d0b174d751779d') 404 | job:tag("howdy", "hello") 405 | job:untag("foo", "bar") 406 | ``` 407 | 408 | Notifications 409 | ============= 410 | **Tracked** jobs emit events on specific pubsub channels as things happen to them. Whether 411 | it's getting popped off of a queue, completed by a worker, etc. 412 | 413 | Those familiar with Redis pub/sub will note that a Redis connection can only be used 414 | for pubsub-y commands once listening. For this reason, the events module is passed Redis connection 415 | parameters independently. 416 | 417 | ```lua 418 | local events = qless.events(redis_params) 419 | 420 | events:listen({ "canceled", "failed" }, function(channel, jid) 421 | ngx.log(ngx.INFO, jid, ": ", channel) 422 | -- logs "b1882e009a3d11e192d0b174d751779d: canceled" etc. 423 | end 424 | ``` 425 | 426 | You can also listen to the "log" channel, whilch gives a JSON structure of all logged events. 427 | 428 | ```lua 429 | local events = qless.events(redis_params) 430 | 431 | events:listen({ "log" }, function(channel, message) 432 | local message = cjson.decode(message) 433 | ngx.log(ngx.INFO, message.event, " ", message.jid) 434 | end 435 | ``` 436 | 437 | Heartbeating 438 | ============ 439 | When a worker is given a job, it is given an exclusive lock to that job. That means 440 | that job won't be given to any other worker, so long as the worker checks in with 441 | progress on the job. By default, jobs have to either report back progress every 60 442 | seconds, or complete it, but that's a configurable option. For longer jobs, this 443 | may not make sense. 444 | 445 | ``` lua 446 | -- Hooray! We've got a piece of work! 447 | local job = queue:pop() 448 | 449 | -- How long until I have to check in? 450 | job:ttl() 451 | -- = 59 452 | 453 | -- Hey! I'm still working on it! 454 | job:heartbeat() 455 | -- = 1331326141.0 456 | 457 | -- Ok, I've got some more time. Oh! Now I'm done! 458 | job:complete() 459 | ``` 460 | 461 | If you want to increase the heartbeat in all queues, 462 | 463 | ```lua 464 | -- Now jobs get 10 minutes to check in 465 | qless:set_config("heartbeat", 600) 466 | 467 | -- But the testing queue doesn't get as long. 468 | qless.queues["testing"].heartbeat = 300 469 | ``` 470 | 471 | When choosing a heartbeat interval, note that this is the amount of time that 472 | can pass before qless realizes if a job has been dropped. At the same time, you don't 473 | want to burden qless with heartbeating every 10 seconds if your job is expected to 474 | take several hours. 475 | 476 | An idiom you're encouraged to use for long-running jobs that want to check in their 477 | progress periodically: 478 | 479 | ``` lua 480 | -- Wait until we have 5 minutes left on the heartbeat, and if we find that 481 | -- we've lost our lock on a job, then honorably fall on our sword 482 | if job:ttl() < 300 and not job:heartbeat() then 483 | -- exit 484 | end 485 | ``` 486 | 487 | Stats 488 | ===== 489 | One nice feature of Qless is that you can get statistics about usage. Stats are 490 | aggregated by day, so when you want stats about a queue, you need to say what queue 491 | and what day you're talking about. By default, you just get the stats for today. 492 | These stats include information about the mean job wait time, standard deviation, 493 | and histogram. This same data is also provided for job completion: 494 | 495 | ```lua 496 | -- So, how're we doing today? 497 | local stats = queue:stats() 498 | -- = { 'run' = { 'mean' = ..., }, 'wait' = {'mean' = ..., } } 499 | ``` 500 | 501 | Time 502 | ==== 503 | It's important to note that Redis doesn't allow access to the system time if you're 504 | going to be making any manipulations to data (which our scripts do). And yet, we 505 | have heartbeating. This means that the clients actually send the current time when 506 | making most requests, and for consistency's sake, means that your workers must be 507 | relatively synchronized. This doesn't mean down to the tens of milliseconds, but if 508 | you're experiencing appreciable clock drift, you should investigate NTP. 509 | 510 | Ensuring Job Uniqueness 511 | ======================= 512 | 513 | As mentioned above, Jobs are uniquely identied by an id--their jid. 514 | Qless will generate a UUID for each enqueued job or you can specify 515 | one manually: 516 | 517 | ```lua 518 | queue:put("jobs.test", { hello = 'howdy' }, { jid = 'my-job-jid' }) 519 | ``` 520 | 521 | This can be useful when you want to ensure a job's uniqueness: simply 522 | create a jid that is a function of the Job's class and data, it'll 523 | guaranteed that Qless won't have multiple jobs with the same class 524 | and data. 525 | 526 | 527 | 528 | ## Author 529 | 530 | James Hurst 531 | 532 | Based on the Ruby [Qless reference implementation](https://github.com/seomoz/qless). Documentation also adapted from the 533 | original project. 534 | 535 | ## Licence 536 | 537 | This module is licensed under the 2-clause BSD license. 538 | 539 | Copyright (c) James Hurst 540 | 541 | All rights reserved. 542 | 543 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 544 | 545 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 546 | 547 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 548 | 549 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 550 | -------------------------------------------------------------------------------- /dist.ini: -------------------------------------------------------------------------------- 1 | name=lua-resty-qless 2 | abstract=Lua binding to Qless (Queue / Pipeline management) for OpenResty 3 | author=James Hurst 4 | is_original=yes 5 | license=2bsd 6 | lib_dir=lib 7 | doc_dir=lib 8 | repo_link=https://github.com/ledgetech/lua-resty-qless 9 | main_module=lib/resty/qless.lua 10 | requires=luajit, openresty/lua-resty-redis, ledgetech/lua-resty-redis-connector >= 0.04 11 | -------------------------------------------------------------------------------- /lib/qless.lua: -------------------------------------------------------------------------------- 1 | -- Current SHA: 13cf4159aaa159ad943872ad24904a267efacbde 2 | -- This is a generated file 3 | local Qless = { 4 | ns = 'ql:' 5 | } 6 | 7 | local QlessQueue = { 8 | ns = Qless.ns .. 'q:' 9 | } 10 | QlessQueue.__index = QlessQueue 11 | 12 | local QlessWorker = { 13 | ns = Qless.ns .. 'w:' 14 | } 15 | QlessWorker.__index = QlessWorker 16 | 17 | local QlessJob = { 18 | ns = Qless.ns .. 'j:' 19 | } 20 | QlessJob.__index = QlessJob 21 | 22 | local QlessRecurringJob = {} 23 | QlessRecurringJob.__index = QlessRecurringJob 24 | 25 | Qless.config = {} 26 | 27 | local function tbl_extend(self, other) 28 | for i, v in ipairs(other) do 29 | table.insert(self, v) 30 | end 31 | end 32 | 33 | function Qless.publish(channel, message) 34 | redis.call('publish', Qless.ns .. channel, message) 35 | end 36 | 37 | function Qless.job(jid) 38 | assert(jid, 'Job(): no jid provided') 39 | local job = {} 40 | setmetatable(job, QlessJob) 41 | job.jid = jid 42 | return job 43 | end 44 | 45 | function Qless.recurring(jid) 46 | assert(jid, 'Recurring(): no jid provided') 47 | local job = {} 48 | setmetatable(job, QlessRecurringJob) 49 | job.jid = jid 50 | return job 51 | end 52 | 53 | function Qless.failed(group, start, limit) 54 | start = assert(tonumber(start or 0), 55 | 'Failed(): Arg "start" is not a number: ' .. (start or 'nil')) 56 | limit = assert(tonumber(limit or 25), 57 | 'Failed(): Arg "limit" is not a number: ' .. (limit or 'nil')) 58 | 59 | if group then 60 | return { 61 | total = redis.call('llen', 'ql:f:' .. group), 62 | jobs = redis.call('lrange', 'ql:f:' .. group, start, start + limit - 1) 63 | } 64 | else 65 | local response = {} 66 | local groups = redis.call('smembers', 'ql:failures') 67 | for index, group in ipairs(groups) do 68 | response[group] = redis.call('llen', 'ql:f:' .. group) 69 | end 70 | return response 71 | end 72 | end 73 | 74 | function Qless.jobs(now, state, ...) 75 | assert(state, 'Jobs(): Arg "state" missing') 76 | if state == 'complete' then 77 | local offset = assert(tonumber(arg[1] or 0), 78 | 'Jobs(): Arg "offset" not a number: ' .. tostring(arg[1])) 79 | local count = assert(tonumber(arg[2] or 25), 80 | 'Jobs(): Arg "count" not a number: ' .. tostring(arg[2])) 81 | return redis.call('zrevrange', 'ql:completed', offset, 82 | offset + count - 1) 83 | else 84 | local name = assert(arg[1], 'Jobs(): Arg "queue" missing') 85 | local offset = assert(tonumber(arg[2] or 0), 86 | 'Jobs(): Arg "offset" not a number: ' .. tostring(arg[2])) 87 | local count = assert(tonumber(arg[3] or 25), 88 | 'Jobs(): Arg "count" not a number: ' .. tostring(arg[3])) 89 | 90 | local queue = Qless.queue(name) 91 | if state == 'running' then 92 | return queue.locks.peek(now, offset, count) 93 | elseif state == 'stalled' then 94 | return queue.locks.expired(now, offset, count) 95 | elseif state == 'scheduled' then 96 | queue:check_scheduled(now, queue.scheduled.length()) 97 | return queue.scheduled.peek(now, offset, count) 98 | elseif state == 'depends' then 99 | return queue.depends.peek(now, offset, count) 100 | elseif state == 'recurring' then 101 | return queue.recurring.peek(math.huge, offset, count) 102 | else 103 | error('Jobs(): Unknown type "' .. state .. '"') 104 | end 105 | end 106 | end 107 | 108 | function Qless.track(now, command, jid) 109 | if command ~= nil then 110 | assert(jid, 'Track(): Arg "jid" missing') 111 | assert(Qless.job(jid):exists(), 'Track(): Job does not exist') 112 | if string.lower(command) == 'track' then 113 | Qless.publish('track', jid) 114 | return redis.call('zadd', 'ql:tracked', now, jid) 115 | elseif string.lower(command) == 'untrack' then 116 | Qless.publish('untrack', jid) 117 | return redis.call('zrem', 'ql:tracked', jid) 118 | else 119 | error('Track(): Unknown action "' .. command .. '"') 120 | end 121 | else 122 | local response = { 123 | jobs = {}, 124 | expired = {} 125 | } 126 | local jids = redis.call('zrange', 'ql:tracked', 0, -1) 127 | for index, jid in ipairs(jids) do 128 | local data = Qless.job(jid):data() 129 | if data then 130 | table.insert(response.jobs, data) 131 | else 132 | table.insert(response.expired, jid) 133 | end 134 | end 135 | return response 136 | end 137 | end 138 | 139 | function Qless.tag(now, command, ...) 140 | assert(command, 141 | 'Tag(): Arg "command" must be "add", "remove", "get" or "top"') 142 | 143 | if command == 'add' then 144 | local jid = assert(arg[1], 'Tag(): Arg "jid" missing') 145 | local tags = redis.call('hget', QlessJob.ns .. jid, 'tags') 146 | if tags then 147 | tags = cjson.decode(tags) 148 | local _tags = {} 149 | for i,v in ipairs(tags) do _tags[v] = true end 150 | 151 | for i=2,#arg do 152 | local tag = arg[i] 153 | if _tags[tag] == nil or _tags[tag] == false then 154 | _tags[tag] = true 155 | table.insert(tags, tag) 156 | end 157 | redis.call('zadd', 'ql:t:' .. tag, now, jid) 158 | redis.call('zincrby', 'ql:tags', 1, tag) 159 | end 160 | 161 | redis.call('hset', QlessJob.ns .. jid, 'tags', cjson.encode(tags)) 162 | return tags 163 | else 164 | error('Tag(): Job ' .. jid .. ' does not exist') 165 | end 166 | elseif command == 'remove' then 167 | local jid = assert(arg[1], 'Tag(): Arg "jid" missing') 168 | local tags = redis.call('hget', QlessJob.ns .. jid, 'tags') 169 | if tags then 170 | tags = cjson.decode(tags) 171 | local _tags = {} 172 | for i,v in ipairs(tags) do _tags[v] = true end 173 | 174 | for i=2,#arg do 175 | local tag = arg[i] 176 | _tags[tag] = nil 177 | redis.call('zrem', 'ql:t:' .. tag, jid) 178 | redis.call('zincrby', 'ql:tags', -1, tag) 179 | end 180 | 181 | local results = {} 182 | for i,tag in ipairs(tags) do if _tags[tag] then table.insert(results, tag) end end 183 | 184 | redis.call('hset', QlessJob.ns .. jid, 'tags', cjson.encode(results)) 185 | return results 186 | else 187 | error('Tag(): Job ' .. jid .. ' does not exist') 188 | end 189 | elseif command == 'get' then 190 | local tag = assert(arg[1], 'Tag(): Arg "tag" missing') 191 | local offset = assert(tonumber(arg[2] or 0), 192 | 'Tag(): Arg "offset" not a number: ' .. tostring(arg[2])) 193 | local count = assert(tonumber(arg[3] or 25), 194 | 'Tag(): Arg "count" not a number: ' .. tostring(arg[3])) 195 | return { 196 | total = redis.call('zcard', 'ql:t:' .. tag), 197 | jobs = redis.call('zrange', 'ql:t:' .. tag, offset, offset + count - 1) 198 | } 199 | elseif command == 'top' then 200 | local offset = assert(tonumber(arg[1] or 0) , 'Tag(): Arg "offset" not a number: ' .. tostring(arg[1])) 201 | local count = assert(tonumber(arg[2] or 25), 'Tag(): Arg "count" not a number: ' .. tostring(arg[2])) 202 | return redis.call('zrevrangebyscore', 'ql:tags', '+inf', 2, 'limit', offset, count) 203 | else 204 | error('Tag(): First argument must be "add", "remove" or "get"') 205 | end 206 | end 207 | 208 | function Qless.cancel(...) 209 | local dependents = {} 210 | for _, jid in ipairs(arg) do 211 | dependents[jid] = redis.call( 212 | 'smembers', QlessJob.ns .. jid .. '-dependents') or {} 213 | end 214 | 215 | for i, jid in ipairs(arg) do 216 | for j, dep in ipairs(dependents[jid]) do 217 | if dependents[dep] == nil or dependents[dep] == false then 218 | error('Cancel(): ' .. jid .. ' is a dependency of ' .. dep .. 219 | ' but is not mentioned to be canceled') 220 | end 221 | end 222 | end 223 | 224 | for _, jid in ipairs(arg) do 225 | local state, queue, failure, worker = unpack(redis.call( 226 | 'hmget', QlessJob.ns .. jid, 'state', 'queue', 'failure', 'worker')) 227 | 228 | if state ~= 'complete' then 229 | local encoded = cjson.encode({ 230 | jid = jid, 231 | worker = worker, 232 | event = 'canceled', 233 | queue = queue 234 | }) 235 | Qless.publish('log', encoded) 236 | 237 | if worker and (worker ~= '') then 238 | redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid) 239 | Qless.publish('w:' .. worker, encoded) 240 | end 241 | 242 | if queue then 243 | local queue = Qless.queue(queue) 244 | queue.work.remove(jid) 245 | queue.locks.remove(jid) 246 | queue.scheduled.remove(jid) 247 | queue.depends.remove(jid) 248 | end 249 | 250 | for i, j in ipairs(redis.call( 251 | 'smembers', QlessJob.ns .. jid .. '-dependencies')) do 252 | redis.call('srem', QlessJob.ns .. j .. '-dependents', jid) 253 | end 254 | 255 | redis.call('del', QlessJob.ns .. jid .. '-dependencies') 256 | 257 | if state == 'failed' then 258 | failure = cjson.decode(failure) 259 | redis.call('lrem', 'ql:f:' .. failure.group, 0, jid) 260 | if redis.call('llen', 'ql:f:' .. failure.group) == 0 then 261 | redis.call('srem', 'ql:failures', failure.group) 262 | end 263 | local bin = failure.when - (failure.when % 86400) 264 | local failed = redis.call( 265 | 'hget', 'ql:s:stats:' .. bin .. ':' .. queue, 'failed') 266 | redis.call('hset', 267 | 'ql:s:stats:' .. bin .. ':' .. queue, 'failed', failed - 1) 268 | end 269 | 270 | local tags = cjson.decode( 271 | redis.call('hget', QlessJob.ns .. jid, 'tags') or '{}') 272 | for i, tag in ipairs(tags) do 273 | redis.call('zrem', 'ql:t:' .. tag, jid) 274 | redis.call('zincrby', 'ql:tags', -1, tag) 275 | end 276 | 277 | if redis.call('zscore', 'ql:tracked', jid) ~= false then 278 | Qless.publish('canceled', jid) 279 | end 280 | 281 | redis.call('del', QlessJob.ns .. jid) 282 | redis.call('del', QlessJob.ns .. jid .. '-history') 283 | end 284 | end 285 | 286 | return arg 287 | end 288 | 289 | 290 | Qless.config.defaults = { 291 | ['application'] = 'qless', 292 | ['heartbeat'] = 60, 293 | ['grace-period'] = 10, 294 | ['stats-history'] = 30, 295 | ['histogram-history'] = 7, 296 | ['jobs-history-count'] = 50000, 297 | ['jobs-history'] = 604800 298 | } 299 | 300 | Qless.config.get = function(key, default) 301 | if key then 302 | return redis.call('hget', 'ql:config', key) or 303 | Qless.config.defaults[key] or default 304 | else 305 | local reply = redis.call('hgetall', 'ql:config') 306 | for i = 1, #reply, 2 do 307 | Qless.config.defaults[reply[i]] = reply[i + 1] 308 | end 309 | return Qless.config.defaults 310 | end 311 | end 312 | 313 | Qless.config.set = function(option, value) 314 | assert(option, 'config.set(): Arg "option" missing') 315 | assert(value , 'config.set(): Arg "value" missing') 316 | Qless.publish('log', cjson.encode({ 317 | event = 'config_set', 318 | option = option, 319 | value = value 320 | })) 321 | 322 | redis.call('hset', 'ql:config', option, value) 323 | end 324 | 325 | Qless.config.unset = function(option) 326 | assert(option, 'config.unset(): Arg "option" missing') 327 | Qless.publish('log', cjson.encode({ 328 | event = 'config_unset', 329 | option = option 330 | })) 331 | 332 | redis.call('hdel', 'ql:config', option) 333 | end 334 | 335 | function QlessJob:data(...) 336 | local job = redis.call( 337 | 'hmget', QlessJob.ns .. self.jid, 'jid', 'klass', 'state', 'queue', 338 | 'worker', 'priority', 'expires', 'retries', 'remaining', 'data', 339 | 'tags', 'failure', 'spawned_from_jid') 340 | 341 | if not job[1] then 342 | return nil 343 | end 344 | 345 | local data = { 346 | jid = job[1], 347 | klass = job[2], 348 | state = job[3], 349 | queue = job[4], 350 | worker = job[5] or '', 351 | tracked = redis.call( 352 | 'zscore', 'ql:tracked', self.jid) ~= false, 353 | priority = tonumber(job[6]), 354 | expires = tonumber(job[7]) or 0, 355 | retries = tonumber(job[8]), 356 | remaining = math.floor(tonumber(job[9])), 357 | data = job[10], 358 | tags = cjson.decode(job[11]), 359 | history = self:history(), 360 | failure = cjson.decode(job[12] or '{}'), 361 | spawned_from_jid = job[13], 362 | dependents = redis.call( 363 | 'smembers', QlessJob.ns .. self.jid .. '-dependents'), 364 | dependencies = redis.call( 365 | 'smembers', QlessJob.ns .. self.jid .. '-dependencies') 366 | } 367 | 368 | if #arg > 0 then 369 | local response = {} 370 | for index, key in ipairs(arg) do 371 | table.insert(response, data[key]) 372 | end 373 | return response 374 | else 375 | return data 376 | end 377 | end 378 | 379 | function QlessJob:complete(now, worker, queue, raw_data, ...) 380 | assert(worker, 'Complete(): Arg "worker" missing') 381 | assert(queue , 'Complete(): Arg "queue" missing') 382 | local data = assert(cjson.decode(raw_data), 383 | 'Complete(): Arg "data" missing or not JSON: ' .. tostring(raw_data)) 384 | 385 | local options = {} 386 | for i = 1, #arg, 2 do options[arg[i]] = arg[i + 1] end 387 | 388 | local nextq = options['next'] 389 | local delay = assert(tonumber(options['delay'] or 0)) 390 | local depends = assert(cjson.decode(options['depends'] or '[]'), 391 | 'Complete(): Arg "depends" not JSON: ' .. tostring(options['depends'])) 392 | 393 | if options['delay'] and nextq == nil then 394 | error('Complete(): "delay" cannot be used without a "next".') 395 | end 396 | 397 | if options['depends'] and nextq == nil then 398 | error('Complete(): "depends" cannot be used without a "next".') 399 | end 400 | 401 | local bin = now - (now % 86400) 402 | 403 | local lastworker, state, priority, retries, current_queue = unpack( 404 | redis.call('hmget', QlessJob.ns .. self.jid, 'worker', 'state', 405 | 'priority', 'retries', 'queue')) 406 | 407 | if lastworker == false then 408 | error('Complete(): Job ' .. self.jid .. ' does not exist') 409 | elseif (state ~= 'running') then 410 | error('Complete(): Job ' .. self.jid .. ' is not currently running: ' .. 411 | state) 412 | elseif lastworker ~= worker then 413 | error('Complete(): Job ' .. self.jid .. 414 | ' has been handed out to another worker: ' .. tostring(lastworker)) 415 | elseif queue ~= current_queue then 416 | error('Complete(): Job ' .. self.jid .. ' running in another queue: ' .. 417 | tostring(current_queue)) 418 | end 419 | 420 | self:history(now, 'done') 421 | 422 | if raw_data then 423 | redis.call('hset', QlessJob.ns .. self.jid, 'data', raw_data) 424 | end 425 | 426 | local queue_obj = Qless.queue(queue) 427 | queue_obj.work.remove(self.jid) 428 | queue_obj.locks.remove(self.jid) 429 | queue_obj.scheduled.remove(self.jid) 430 | 431 | local time = tonumber( 432 | redis.call('hget', QlessJob.ns .. self.jid, 'time') or now) 433 | local waiting = now - time 434 | Qless.queue(queue):stat(now, 'run', waiting) 435 | redis.call('hset', QlessJob.ns .. self.jid, 436 | 'time', string.format("%.20f", now)) 437 | 438 | redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) 439 | 440 | if redis.call('zscore', 'ql:tracked', self.jid) ~= false then 441 | Qless.publish('completed', self.jid) 442 | end 443 | 444 | if nextq then 445 | queue_obj = Qless.queue(nextq) 446 | Qless.publish('log', cjson.encode({ 447 | jid = self.jid, 448 | event = 'advanced', 449 | queue = queue, 450 | to = nextq 451 | })) 452 | 453 | self:history(now, 'put', {q = nextq}) 454 | 455 | if redis.call('zscore', 'ql:queues', nextq) == false then 456 | redis.call('zadd', 'ql:queues', now, nextq) 457 | end 458 | 459 | redis.call('hmset', QlessJob.ns .. self.jid, 460 | 'state', 'waiting', 461 | 'worker', '', 462 | 'failure', '{}', 463 | 'queue', nextq, 464 | 'expires', 0, 465 | 'remaining', tonumber(retries)) 466 | 467 | if (delay > 0) and (#depends == 0) then 468 | queue_obj.scheduled.add(now + delay, self.jid) 469 | return 'scheduled' 470 | else 471 | local count = 0 472 | for i, j in ipairs(depends) do 473 | local state = redis.call('hget', QlessJob.ns .. j, 'state') 474 | if (state and state ~= 'complete') then 475 | count = count + 1 476 | redis.call( 477 | 'sadd', QlessJob.ns .. j .. '-dependents',self.jid) 478 | redis.call( 479 | 'sadd', QlessJob.ns .. self.jid .. '-dependencies', j) 480 | end 481 | end 482 | if count > 0 then 483 | queue_obj.depends.add(now, self.jid) 484 | redis.call('hset', QlessJob.ns .. self.jid, 'state', 'depends') 485 | if delay > 0 then 486 | queue_obj.depends.add(now, self.jid) 487 | redis.call('hset', QlessJob.ns .. self.jid, 'scheduled', now + delay) 488 | end 489 | return 'depends' 490 | else 491 | queue_obj.work.add(now, priority, self.jid) 492 | return 'waiting' 493 | end 494 | end 495 | else 496 | Qless.publish('log', cjson.encode({ 497 | jid = self.jid, 498 | event = 'completed', 499 | queue = queue 500 | })) 501 | 502 | redis.call('hmset', QlessJob.ns .. self.jid, 503 | 'state', 'complete', 504 | 'worker', '', 505 | 'failure', '{}', 506 | 'queue', '', 507 | 'expires', 0, 508 | 'remaining', tonumber(retries)) 509 | 510 | local count = Qless.config.get('jobs-history-count') 511 | local time = Qless.config.get('jobs-history') 512 | 513 | count = tonumber(count or 50000) 514 | time = tonumber(time or 7 * 24 * 60 * 60) 515 | 516 | redis.call('zadd', 'ql:completed', now, self.jid) 517 | 518 | local jids = redis.call('zrangebyscore', 'ql:completed', 0, now - time) 519 | for index, jid in ipairs(jids) do 520 | local tags = cjson.decode( 521 | redis.call('hget', QlessJob.ns .. jid, 'tags') or '{}') 522 | for i, tag in ipairs(tags) do 523 | redis.call('zrem', 'ql:t:' .. tag, jid) 524 | redis.call('zincrby', 'ql:tags', -1, tag) 525 | end 526 | redis.call('del', QlessJob.ns .. jid) 527 | redis.call('del', QlessJob.ns .. jid .. '-history') 528 | end 529 | redis.call('zremrangebyscore', 'ql:completed', 0, now - time) 530 | 531 | jids = redis.call('zrange', 'ql:completed', 0, (-1-count)) 532 | for index, jid in ipairs(jids) do 533 | local tags = cjson.decode( 534 | redis.call('hget', QlessJob.ns .. jid, 'tags') or '{}') 535 | for i, tag in ipairs(tags) do 536 | redis.call('zrem', 'ql:t:' .. tag, jid) 537 | redis.call('zincrby', 'ql:tags', -1, tag) 538 | end 539 | redis.call('del', QlessJob.ns .. jid) 540 | redis.call('del', QlessJob.ns .. jid .. '-history') 541 | end 542 | redis.call('zremrangebyrank', 'ql:completed', 0, (-1-count)) 543 | 544 | for i, j in ipairs(redis.call( 545 | 'smembers', QlessJob.ns .. self.jid .. '-dependents')) do 546 | redis.call('srem', QlessJob.ns .. j .. '-dependencies', self.jid) 547 | if redis.call( 548 | 'scard', QlessJob.ns .. j .. '-dependencies') == 0 then 549 | local q, p, scheduled = unpack( 550 | redis.call('hmget', QlessJob.ns .. j, 'queue', 'priority', 'scheduled')) 551 | if q then 552 | local queue = Qless.queue(q) 553 | queue.depends.remove(j) 554 | if scheduled then 555 | queue.scheduled.add(scheduled, j) 556 | redis.call('hset', QlessJob.ns .. j, 'state', 'scheduled') 557 | redis.call('hdel', QlessJob.ns .. j, 'scheduled') 558 | else 559 | queue.work.add(now, p, j) 560 | redis.call('hset', QlessJob.ns .. j, 'state', 'waiting') 561 | end 562 | end 563 | end 564 | end 565 | 566 | redis.call('del', QlessJob.ns .. self.jid .. '-dependents') 567 | 568 | return 'complete' 569 | end 570 | end 571 | 572 | function QlessJob:fail(now, worker, group, message, data) 573 | local worker = assert(worker , 'Fail(): Arg "worker" missing') 574 | local group = assert(group , 'Fail(): Arg "group" missing') 575 | local message = assert(message , 'Fail(): Arg "message" missing') 576 | 577 | local bin = now - (now % 86400) 578 | 579 | if data then 580 | data = cjson.decode(data) 581 | end 582 | 583 | local queue, state, oldworker = unpack(redis.call( 584 | 'hmget', QlessJob.ns .. self.jid, 'queue', 'state', 'worker')) 585 | 586 | if not state then 587 | error('Fail(): Job ' .. self.jid .. 'does not exist') 588 | elseif state ~= 'running' then 589 | error('Fail(): Job ' .. self.jid .. 'not currently running: ' .. state) 590 | elseif worker ~= oldworker then 591 | error('Fail(): Job ' .. self.jid .. ' running with another worker: ' .. 592 | oldworker) 593 | end 594 | 595 | Qless.publish('log', cjson.encode({ 596 | jid = self.jid, 597 | event = 'failed', 598 | worker = worker, 599 | group = group, 600 | message = message 601 | })) 602 | 603 | if redis.call('zscore', 'ql:tracked', self.jid) ~= false then 604 | Qless.publish('failed', self.jid) 605 | end 606 | 607 | redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) 608 | 609 | self:history(now, 'failed', {worker = worker, group = group}) 610 | 611 | redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'failures', 1) 612 | redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'failed' , 1) 613 | 614 | local queue_obj = Qless.queue(queue) 615 | queue_obj.work.remove(self.jid) 616 | queue_obj.locks.remove(self.jid) 617 | queue_obj.scheduled.remove(self.jid) 618 | 619 | if data then 620 | redis.call('hset', QlessJob.ns .. self.jid, 'data', cjson.encode(data)) 621 | end 622 | 623 | redis.call('hmset', QlessJob.ns .. self.jid, 624 | 'state', 'failed', 625 | 'worker', '', 626 | 'expires', '', 627 | 'failure', cjson.encode({ 628 | ['group'] = group, 629 | ['message'] = message, 630 | ['when'] = math.floor(now), 631 | ['worker'] = worker 632 | })) 633 | 634 | redis.call('sadd', 'ql:failures', group) 635 | redis.call('lpush', 'ql:f:' .. group, self.jid) 636 | 637 | 638 | return self.jid 639 | end 640 | 641 | function QlessJob:retry(now, queue, worker, delay, group, message) 642 | assert(queue , 'Retry(): Arg "queue" missing') 643 | assert(worker, 'Retry(): Arg "worker" missing') 644 | delay = assert(tonumber(delay or 0), 645 | 'Retry(): Arg "delay" not a number: ' .. tostring(delay)) 646 | 647 | local oldqueue, state, retries, oldworker, priority, failure = unpack( 648 | redis.call('hmget', QlessJob.ns .. self.jid, 'queue', 'state', 649 | 'retries', 'worker', 'priority', 'failure')) 650 | 651 | if oldworker == false then 652 | error('Retry(): Job ' .. self.jid .. ' does not exist') 653 | elseif state ~= 'running' then 654 | error('Retry(): Job ' .. self.jid .. ' is not currently running: ' .. 655 | state) 656 | elseif oldworker ~= worker then 657 | error('Retry(): Job ' .. self.jid .. 658 | ' has been given to another worker: ' .. oldworker) 659 | end 660 | 661 | local remaining = tonumber(redis.call( 662 | 'hincrby', QlessJob.ns .. self.jid, 'remaining', -1)) 663 | redis.call('hdel', QlessJob.ns .. self.jid, 'grace') 664 | 665 | Qless.queue(oldqueue).locks.remove(self.jid) 666 | 667 | redis.call('zrem', 'ql:w:' .. worker .. ':jobs', self.jid) 668 | 669 | if remaining < 0 then 670 | local group = group or 'failed-retries-' .. queue 671 | self:history(now, 'failed', {['group'] = group}) 672 | 673 | redis.call('hmset', QlessJob.ns .. self.jid, 'state', 'failed', 674 | 'worker', '', 675 | 'expires', '') 676 | if group ~= nil and message ~= nil then 677 | redis.call('hset', QlessJob.ns .. self.jid, 678 | 'failure', cjson.encode({ 679 | ['group'] = group, 680 | ['message'] = message, 681 | ['when'] = math.floor(now), 682 | ['worker'] = worker 683 | }) 684 | ) 685 | else 686 | redis.call('hset', QlessJob.ns .. self.jid, 687 | 'failure', cjson.encode({ 688 | ['group'] = group, 689 | ['message'] = 690 | 'Job exhausted retries in queue "' .. oldqueue .. '"', 691 | ['when'] = now, 692 | ['worker'] = unpack(self:data('worker')) 693 | })) 694 | end 695 | 696 | redis.call('sadd', 'ql:failures', group) 697 | redis.call('lpush', 'ql:f:' .. group, self.jid) 698 | local bin = now - (now % 86400) 699 | redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'failures', 1) 700 | redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. queue, 'failed' , 1) 701 | else 702 | local queue_obj = Qless.queue(queue) 703 | if delay > 0 then 704 | queue_obj.scheduled.add(now + delay, self.jid) 705 | redis.call('hset', QlessJob.ns .. self.jid, 'state', 'scheduled') 706 | else 707 | queue_obj.work.add(now, priority, self.jid) 708 | redis.call('hset', QlessJob.ns .. self.jid, 'state', 'waiting') 709 | end 710 | 711 | if group ~= nil and message ~= nil then 712 | redis.call('hset', QlessJob.ns .. self.jid, 713 | 'failure', cjson.encode({ 714 | ['group'] = group, 715 | ['message'] = message, 716 | ['when'] = math.floor(now), 717 | ['worker'] = worker 718 | }) 719 | ) 720 | end 721 | end 722 | 723 | return math.floor(remaining) 724 | end 725 | 726 | function QlessJob:depends(now, command, ...) 727 | assert(command, 'Depends(): Arg "command" missing') 728 | local state = redis.call('hget', QlessJob.ns .. self.jid, 'state') 729 | if state ~= 'depends' then 730 | error('Depends(): Job ' .. self.jid .. 731 | ' not in the depends state: ' .. tostring(state)) 732 | end 733 | 734 | if command == 'on' then 735 | for i, j in ipairs(arg) do 736 | local state = redis.call('hget', QlessJob.ns .. j, 'state') 737 | if (state and state ~= 'complete') then 738 | redis.call( 739 | 'sadd', QlessJob.ns .. j .. '-dependents' , self.jid) 740 | redis.call( 741 | 'sadd', QlessJob.ns .. self.jid .. '-dependencies', j) 742 | end 743 | end 744 | return true 745 | elseif command == 'off' then 746 | if arg[1] == 'all' then 747 | for i, j in ipairs(redis.call( 748 | 'smembers', QlessJob.ns .. self.jid .. '-dependencies')) do 749 | redis.call('srem', QlessJob.ns .. j .. '-dependents', self.jid) 750 | end 751 | redis.call('del', QlessJob.ns .. self.jid .. '-dependencies') 752 | local q, p = unpack(redis.call( 753 | 'hmget', QlessJob.ns .. self.jid, 'queue', 'priority')) 754 | if q then 755 | local queue_obj = Qless.queue(q) 756 | queue_obj.depends.remove(self.jid) 757 | queue_obj.work.add(now, p, self.jid) 758 | redis.call('hset', QlessJob.ns .. self.jid, 'state', 'waiting') 759 | end 760 | else 761 | for i, j in ipairs(arg) do 762 | redis.call('srem', QlessJob.ns .. j .. '-dependents', self.jid) 763 | redis.call( 764 | 'srem', QlessJob.ns .. self.jid .. '-dependencies', j) 765 | if redis.call('scard', 766 | QlessJob.ns .. self.jid .. '-dependencies') == 0 then 767 | local q, p = unpack(redis.call( 768 | 'hmget', QlessJob.ns .. self.jid, 'queue', 'priority')) 769 | if q then 770 | local queue_obj = Qless.queue(q) 771 | queue_obj.depends.remove(self.jid) 772 | queue_obj.work.add(now, p, self.jid) 773 | redis.call('hset', 774 | QlessJob.ns .. self.jid, 'state', 'waiting') 775 | end 776 | end 777 | end 778 | end 779 | return true 780 | else 781 | error('Depends(): Argument "command" must be "on" or "off"') 782 | end 783 | end 784 | 785 | function QlessJob:heartbeat(now, worker, data) 786 | assert(worker, 'Heatbeat(): Arg "worker" missing') 787 | 788 | local queue = redis.call('hget', QlessJob.ns .. self.jid, 'queue') or '' 789 | local expires = now + tonumber( 790 | Qless.config.get(queue .. '-heartbeat') or 791 | Qless.config.get('heartbeat', 60)) 792 | 793 | if data then 794 | data = cjson.decode(data) 795 | end 796 | 797 | local job_worker, state = unpack( 798 | redis.call('hmget', QlessJob.ns .. self.jid, 'worker', 'state')) 799 | if job_worker == false then 800 | error('Heartbeat(): Job ' .. self.jid .. ' does not exist') 801 | elseif state ~= 'running' then 802 | error( 803 | 'Heartbeat(): Job ' .. self.jid .. ' not currently running: ' .. state) 804 | elseif job_worker ~= worker or #job_worker == 0 then 805 | error( 806 | 'Heartbeat(): Job ' .. self.jid .. 807 | ' given out to another worker: ' .. job_worker) 808 | else 809 | if data then 810 | redis.call('hmset', QlessJob.ns .. self.jid, 'expires', 811 | expires, 'worker', worker, 'data', cjson.encode(data)) 812 | else 813 | redis.call('hmset', QlessJob.ns .. self.jid, 814 | 'expires', expires, 'worker', worker) 815 | end 816 | 817 | redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, self.jid) 818 | 819 | local queue = Qless.queue( 820 | redis.call('hget', QlessJob.ns .. self.jid, 'queue')) 821 | queue.locks.add(expires, self.jid) 822 | return expires 823 | end 824 | end 825 | 826 | function QlessJob:priority(priority) 827 | priority = assert(tonumber(priority), 828 | 'Priority(): Arg "priority" missing or not a number: ' .. 829 | tostring(priority)) 830 | 831 | local queue = redis.call('hget', QlessJob.ns .. self.jid, 'queue') 832 | 833 | if queue == nil or queue == false then 834 | error('Priority(): Job ' .. self.jid .. ' does not exist') 835 | elseif queue == '' then 836 | redis.call('hset', QlessJob.ns .. self.jid, 'priority', priority) 837 | return priority 838 | else 839 | local queue_obj = Qless.queue(queue) 840 | if queue_obj.work.score(self.jid) then 841 | queue_obj.work.add(0, priority, self.jid) 842 | end 843 | redis.call('hset', QlessJob.ns .. self.jid, 'priority', priority) 844 | return priority 845 | end 846 | end 847 | 848 | function QlessJob:update(data) 849 | local tmp = {} 850 | for k, v in pairs(data) do 851 | table.insert(tmp, k) 852 | table.insert(tmp, v) 853 | end 854 | redis.call('hmset', QlessJob.ns .. self.jid, unpack(tmp)) 855 | end 856 | 857 | function QlessJob:timeout(now) 858 | local queue_name, state, worker = unpack(redis.call('hmget', 859 | QlessJob.ns .. self.jid, 'queue', 'state', 'worker')) 860 | if queue_name == nil or queue_name == false then 861 | error('Timeout(): Job ' .. self.jid .. ' does not exist') 862 | elseif state ~= 'running' then 863 | error('Timeout(): Job ' .. self.jid .. ' not running') 864 | else 865 | self:history(now, 'timed-out') 866 | local queue = Qless.queue(queue_name) 867 | queue.locks.remove(self.jid) 868 | queue.work.add(now, math.huge, self.jid) 869 | redis.call('hmset', QlessJob.ns .. self.jid, 870 | 'state', 'stalled', 'expires', 0) 871 | local encoded = cjson.encode({ 872 | jid = self.jid, 873 | event = 'lock_lost', 874 | worker = worker 875 | }) 876 | Qless.publish('w:' .. worker, encoded) 877 | Qless.publish('log', encoded) 878 | return queue_name 879 | end 880 | end 881 | 882 | function QlessJob:exists() 883 | return redis.call('exists', QlessJob.ns .. self.jid) == 1 884 | end 885 | 886 | function QlessJob:history(now, what, item) 887 | local history = redis.call('hget', QlessJob.ns .. self.jid, 'history') 888 | if history then 889 | history = cjson.decode(history) 890 | for i, value in ipairs(history) do 891 | redis.call('rpush', QlessJob.ns .. self.jid .. '-history', 892 | cjson.encode({math.floor(value.put), 'put', {q = value.q}})) 893 | 894 | if value.popped then 895 | redis.call('rpush', QlessJob.ns .. self.jid .. '-history', 896 | cjson.encode({math.floor(value.popped), 'popped', 897 | {worker = value.worker}})) 898 | end 899 | 900 | if value.failed then 901 | redis.call('rpush', QlessJob.ns .. self.jid .. '-history', 902 | cjson.encode( 903 | {math.floor(value.failed), 'failed', nil})) 904 | end 905 | 906 | if value.done then 907 | redis.call('rpush', QlessJob.ns .. self.jid .. '-history', 908 | cjson.encode( 909 | {math.floor(value.done), 'done', nil})) 910 | end 911 | end 912 | redis.call('hdel', QlessJob.ns .. self.jid, 'history') 913 | end 914 | 915 | if what == nil then 916 | local response = {} 917 | for i, value in ipairs(redis.call('lrange', 918 | QlessJob.ns .. self.jid .. '-history', 0, -1)) do 919 | value = cjson.decode(value) 920 | local dict = value[3] or {} 921 | dict['when'] = value[1] 922 | dict['what'] = value[2] 923 | table.insert(response, dict) 924 | end 925 | return response 926 | else 927 | local count = tonumber(Qless.config.get('max-job-history', 100)) 928 | if count > 0 then 929 | local obj = redis.call('lpop', QlessJob.ns .. self.jid .. '-history') 930 | redis.call('ltrim', QlessJob.ns .. self.jid .. '-history', -count + 2, -1) 931 | if obj ~= nil and obj ~= false then 932 | redis.call('lpush', QlessJob.ns .. self.jid .. '-history', obj) 933 | end 934 | end 935 | return redis.call('rpush', QlessJob.ns .. self.jid .. '-history', 936 | cjson.encode({math.floor(now), what, item})) 937 | end 938 | end 939 | function Qless.queue(name) 940 | assert(name, 'Queue(): no queue name provided') 941 | local queue = {} 942 | setmetatable(queue, QlessQueue) 943 | queue.name = name 944 | 945 | queue.work = { 946 | peek = function(count) 947 | if count == 0 then 948 | return {} 949 | end 950 | local jids = {} 951 | for index, jid in ipairs(redis.call( 952 | 'zrevrange', queue:prefix('work'), 0, count - 1)) do 953 | table.insert(jids, jid) 954 | end 955 | return jids 956 | end, remove = function(...) 957 | if #arg > 0 then 958 | return redis.call('zrem', queue:prefix('work'), unpack(arg)) 959 | end 960 | end, add = function(now, priority, jid) 961 | return redis.call('zadd', 962 | queue:prefix('work'), priority - (now / 10000000000), jid) 963 | end, score = function(jid) 964 | return redis.call('zscore', queue:prefix('work'), jid) 965 | end, length = function() 966 | return redis.call('zcard', queue:prefix('work')) 967 | end 968 | } 969 | 970 | queue.locks = { 971 | expired = function(now, offset, count) 972 | return redis.call('zrangebyscore', 973 | queue:prefix('locks'), -math.huge, now, 'LIMIT', offset, count) 974 | end, peek = function(now, offset, count) 975 | return redis.call('zrangebyscore', queue:prefix('locks'), 976 | now, math.huge, 'LIMIT', offset, count) 977 | end, add = function(expires, jid) 978 | redis.call('zadd', queue:prefix('locks'), expires, jid) 979 | end, remove = function(...) 980 | if #arg > 0 then 981 | return redis.call('zrem', queue:prefix('locks'), unpack(arg)) 982 | end 983 | end, running = function(now) 984 | return redis.call('zcount', queue:prefix('locks'), now, math.huge) 985 | end, length = function(now) 986 | if now then 987 | return redis.call('zcount', queue:prefix('locks'), 0, now) 988 | else 989 | return redis.call('zcard', queue:prefix('locks')) 990 | end 991 | end 992 | } 993 | 994 | queue.depends = { 995 | peek = function(now, offset, count) 996 | return redis.call('zrange', 997 | queue:prefix('depends'), offset, offset + count - 1) 998 | end, add = function(now, jid) 999 | redis.call('zadd', queue:prefix('depends'), now, jid) 1000 | end, remove = function(...) 1001 | if #arg > 0 then 1002 | return redis.call('zrem', queue:prefix('depends'), unpack(arg)) 1003 | end 1004 | end, length = function() 1005 | return redis.call('zcard', queue:prefix('depends')) 1006 | end 1007 | } 1008 | 1009 | queue.scheduled = { 1010 | peek = function(now, offset, count) 1011 | return redis.call('zrange', 1012 | queue:prefix('scheduled'), offset, offset + count - 1) 1013 | end, ready = function(now, offset, count) 1014 | return redis.call('zrangebyscore', 1015 | queue:prefix('scheduled'), 0, now, 'LIMIT', offset, count) 1016 | end, add = function(when, jid) 1017 | redis.call('zadd', queue:prefix('scheduled'), when, jid) 1018 | end, remove = function(...) 1019 | if #arg > 0 then 1020 | return redis.call('zrem', queue:prefix('scheduled'), unpack(arg)) 1021 | end 1022 | end, length = function() 1023 | return redis.call('zcard', queue:prefix('scheduled')) 1024 | end 1025 | } 1026 | 1027 | queue.recurring = { 1028 | peek = function(now, offset, count) 1029 | return redis.call('zrangebyscore', queue:prefix('recur'), 1030 | 0, now, 'LIMIT', offset, count) 1031 | end, ready = function(now, offset, count) 1032 | end, add = function(when, jid) 1033 | redis.call('zadd', queue:prefix('recur'), when, jid) 1034 | end, remove = function(...) 1035 | if #arg > 0 then 1036 | return redis.call('zrem', queue:prefix('recur'), unpack(arg)) 1037 | end 1038 | end, update = function(increment, jid) 1039 | redis.call('zincrby', queue:prefix('recur'), increment, jid) 1040 | end, score = function(jid) 1041 | return redis.call('zscore', queue:prefix('recur'), jid) 1042 | end, length = function() 1043 | return redis.call('zcard', queue:prefix('recur')) 1044 | end 1045 | } 1046 | return queue 1047 | end 1048 | 1049 | function QlessQueue:prefix(group) 1050 | if group then 1051 | return QlessQueue.ns..self.name..'-'..group 1052 | else 1053 | return QlessQueue.ns..self.name 1054 | end 1055 | end 1056 | 1057 | function QlessQueue:stats(now, date) 1058 | date = assert(tonumber(date), 1059 | 'Stats(): Arg "date" missing or not a number: '.. (date or 'nil')) 1060 | 1061 | local bin = date - (date % 86400) 1062 | 1063 | local histokeys = { 1064 | 's0','s1','s2','s3','s4','s5','s6','s7','s8','s9','s10','s11','s12','s13','s14','s15','s16','s17','s18','s19','s20','s21','s22','s23','s24','s25','s26','s27','s28','s29','s30','s31','s32','s33','s34','s35','s36','s37','s38','s39','s40','s41','s42','s43','s44','s45','s46','s47','s48','s49','s50','s51','s52','s53','s54','s55','s56','s57','s58','s59', 1065 | 'm1','m2','m3','m4','m5','m6','m7','m8','m9','m10','m11','m12','m13','m14','m15','m16','m17','m18','m19','m20','m21','m22','m23','m24','m25','m26','m27','m28','m29','m30','m31','m32','m33','m34','m35','m36','m37','m38','m39','m40','m41','m42','m43','m44','m45','m46','m47','m48','m49','m50','m51','m52','m53','m54','m55','m56','m57','m58','m59', 1066 | 'h1','h2','h3','h4','h5','h6','h7','h8','h9','h10','h11','h12','h13','h14','h15','h16','h17','h18','h19','h20','h21','h22','h23', 1067 | 'd1','d2','d3','d4','d5','d6' 1068 | } 1069 | 1070 | local mkstats = function(name, bin, queue) 1071 | local results = {} 1072 | 1073 | local key = 'ql:s:' .. name .. ':' .. bin .. ':' .. queue 1074 | local count, mean, vk = unpack(redis.call('hmget', key, 'total', 'mean', 'vk')) 1075 | 1076 | count = tonumber(count) or 0 1077 | mean = tonumber(mean) or 0 1078 | vk = tonumber(vk) 1079 | 1080 | results.count = count or 0 1081 | results.mean = mean or 0 1082 | results.histogram = {} 1083 | 1084 | if not count then 1085 | results.std = 0 1086 | else 1087 | if count > 1 then 1088 | results.std = math.sqrt(vk / (count - 1)) 1089 | else 1090 | results.std = 0 1091 | end 1092 | end 1093 | 1094 | local histogram = redis.call('hmget', key, unpack(histokeys)) 1095 | for i=1,#histokeys do 1096 | table.insert(results.histogram, tonumber(histogram[i]) or 0) 1097 | end 1098 | return results 1099 | end 1100 | 1101 | local retries, failed, failures = unpack(redis.call('hmget', 'ql:s:stats:' .. bin .. ':' .. self.name, 'retries', 'failed', 'failures')) 1102 | return { 1103 | retries = tonumber(retries or 0), 1104 | failed = tonumber(failed or 0), 1105 | failures = tonumber(failures or 0), 1106 | wait = mkstats('wait', bin, self.name), 1107 | run = mkstats('run' , bin, self.name) 1108 | } 1109 | end 1110 | 1111 | function QlessQueue:peek(now, count) 1112 | count = assert(tonumber(count), 1113 | 'Peek(): Arg "count" missing or not a number: ' .. tostring(count)) 1114 | 1115 | local jids = self.locks.expired(now, 0, count) 1116 | 1117 | self:check_recurring(now, count - #jids) 1118 | 1119 | self:check_scheduled(now, count - #jids) 1120 | 1121 | tbl_extend(jids, self.work.peek(count - #jids)) 1122 | 1123 | return jids 1124 | end 1125 | 1126 | function QlessQueue:paused() 1127 | return redis.call('sismember', 'ql:paused_queues', self.name) == 1 1128 | end 1129 | 1130 | function QlessQueue.pause(now, ...) 1131 | redis.call('sadd', 'ql:paused_queues', unpack(arg)) 1132 | end 1133 | 1134 | function QlessQueue.unpause(...) 1135 | redis.call('srem', 'ql:paused_queues', unpack(arg)) 1136 | end 1137 | 1138 | function QlessQueue:pop(now, worker, count) 1139 | assert(worker, 'Pop(): Arg "worker" missing') 1140 | count = assert(tonumber(count), 1141 | 'Pop(): Arg "count" missing or not a number: ' .. tostring(count)) 1142 | 1143 | local expires = now + tonumber( 1144 | Qless.config.get(self.name .. '-heartbeat') or 1145 | Qless.config.get('heartbeat', 60)) 1146 | 1147 | if self:paused() then 1148 | return {} 1149 | end 1150 | 1151 | redis.call('zadd', 'ql:workers', now, worker) 1152 | 1153 | local max_concurrency = tonumber( 1154 | Qless.config.get(self.name .. '-max-concurrency', 0)) 1155 | 1156 | if max_concurrency > 0 then 1157 | local allowed = math.max(0, max_concurrency - self.locks.running(now)) 1158 | count = math.min(allowed, count) 1159 | if count == 0 then 1160 | return {} 1161 | end 1162 | end 1163 | 1164 | local jids = self:invalidate_locks(now, count) 1165 | 1166 | self:check_recurring(now, count - #jids) 1167 | 1168 | self:check_scheduled(now, count - #jids) 1169 | 1170 | tbl_extend(jids, self.work.peek(count - #jids)) 1171 | 1172 | local state 1173 | for index, jid in ipairs(jids) do 1174 | local job = Qless.job(jid) 1175 | state = unpack(job:data('state')) 1176 | job:history(now, 'popped', {worker = worker}) 1177 | 1178 | local time = tonumber( 1179 | redis.call('hget', QlessJob.ns .. jid, 'time') or now) 1180 | local waiting = now - time 1181 | self:stat(now, 'wait', waiting) 1182 | redis.call('hset', QlessJob.ns .. jid, 1183 | 'time', string.format("%.20f", now)) 1184 | 1185 | redis.call('zadd', 'ql:w:' .. worker .. ':jobs', expires, jid) 1186 | 1187 | job:update({ 1188 | worker = worker, 1189 | expires = expires, 1190 | state = 'running' 1191 | }) 1192 | 1193 | self.locks.add(expires, jid) 1194 | 1195 | local tracked = redis.call('zscore', 'ql:tracked', jid) ~= false 1196 | if tracked then 1197 | Qless.publish('popped', jid) 1198 | end 1199 | end 1200 | 1201 | self.work.remove(unpack(jids)) 1202 | 1203 | return jids 1204 | end 1205 | 1206 | function QlessQueue:stat(now, stat, val) 1207 | local bin = now - (now % 86400) 1208 | local key = 'ql:s:' .. stat .. ':' .. bin .. ':' .. self.name 1209 | 1210 | local count, mean, vk = unpack( 1211 | redis.call('hmget', key, 'total', 'mean', 'vk')) 1212 | 1213 | count = count or 0 1214 | if count == 0 then 1215 | mean = val 1216 | vk = 0 1217 | count = 1 1218 | else 1219 | count = count + 1 1220 | local oldmean = mean 1221 | mean = mean + (val - mean) / count 1222 | vk = vk + (val - mean) * (val - oldmean) 1223 | end 1224 | 1225 | val = math.floor(val) 1226 | if val < 60 then -- seconds 1227 | redis.call('hincrby', key, 's' .. val, 1) 1228 | elseif val < 3600 then -- minutes 1229 | redis.call('hincrby', key, 'm' .. math.floor(val / 60), 1) 1230 | elseif val < 86400 then -- hours 1231 | redis.call('hincrby', key, 'h' .. math.floor(val / 3600), 1) 1232 | else -- days 1233 | redis.call('hincrby', key, 'd' .. math.floor(val / 86400), 1) 1234 | end 1235 | redis.call('hmset', key, 'total', count, 'mean', mean, 'vk', vk) 1236 | end 1237 | 1238 | function QlessQueue:put(now, worker, jid, klass, raw_data, delay, ...) 1239 | assert(jid , 'Put(): Arg "jid" missing') 1240 | assert(klass, 'Put(): Arg "klass" missing') 1241 | local data = assert(cjson.decode(raw_data), 1242 | 'Put(): Arg "data" missing or not JSON: ' .. tostring(raw_data)) 1243 | delay = assert(tonumber(delay), 1244 | 'Put(): Arg "delay" not a number: ' .. tostring(delay)) 1245 | 1246 | if #arg % 2 == 1 then 1247 | error('Odd number of additional args: ' .. tostring(arg)) 1248 | end 1249 | local options = {} 1250 | for i = 1, #arg, 2 do options[arg[i]] = arg[i + 1] end 1251 | 1252 | local job = Qless.job(jid) 1253 | local priority, tags, oldqueue, state, failure, retries, oldworker = 1254 | unpack(redis.call('hmget', QlessJob.ns .. jid, 'priority', 'tags', 1255 | 'queue', 'state', 'failure', 'retries', 'worker')) 1256 | 1257 | if tags then 1258 | Qless.tag(now, 'remove', jid, unpack(cjson.decode(tags))) 1259 | end 1260 | 1261 | retries = assert(tonumber(options['retries'] or retries or 5) , 1262 | 'Put(): Arg "retries" not a number: ' .. tostring(options['retries'])) 1263 | tags = assert(cjson.decode(options['tags'] or tags or '[]' ), 1264 | 'Put(): Arg "tags" not JSON' .. tostring(options['tags'])) 1265 | priority = assert(tonumber(options['priority'] or priority or 0), 1266 | 'Put(): Arg "priority" not a number' .. tostring(options['priority'])) 1267 | local depends = assert(cjson.decode(options['depends'] or '[]') , 1268 | 'Put(): Arg "depends" not JSON: ' .. tostring(options['depends'])) 1269 | 1270 | if #depends > 0 then 1271 | local new = {} 1272 | for _, d in ipairs(depends) do new[d] = 1 end 1273 | 1274 | local original = redis.call( 1275 | 'smembers', QlessJob.ns .. jid .. '-dependencies') 1276 | for _, dep in pairs(original) do 1277 | if new[dep] == nil or new[dep] == false then 1278 | redis.call('srem', QlessJob.ns .. dep .. '-dependents' , jid) 1279 | redis.call('srem', QlessJob.ns .. jid .. '-dependencies', dep) 1280 | end 1281 | end 1282 | end 1283 | 1284 | Qless.publish('log', cjson.encode({ 1285 | jid = jid, 1286 | event = 'put', 1287 | queue = self.name 1288 | })) 1289 | 1290 | job:history(now, 'put', {q = self.name}) 1291 | 1292 | if oldqueue then 1293 | local queue_obj = Qless.queue(oldqueue) 1294 | queue_obj.work.remove(jid) 1295 | queue_obj.locks.remove(jid) 1296 | queue_obj.depends.remove(jid) 1297 | queue_obj.scheduled.remove(jid) 1298 | end 1299 | 1300 | if oldworker and oldworker ~= '' then 1301 | redis.call('zrem', 'ql:w:' .. oldworker .. ':jobs', jid) 1302 | if oldworker ~= worker then 1303 | local encoded = cjson.encode({ 1304 | jid = jid, 1305 | event = 'lock_lost', 1306 | worker = oldworker 1307 | }) 1308 | Qless.publish('w:' .. oldworker, encoded) 1309 | Qless.publish('log', encoded) 1310 | end 1311 | end 1312 | 1313 | if state == 'complete' then 1314 | redis.call('zrem', 'ql:completed', jid) 1315 | end 1316 | 1317 | for i, tag in ipairs(tags) do 1318 | redis.call('zadd', 'ql:t:' .. tag, now, jid) 1319 | redis.call('zincrby', 'ql:tags', 1, tag) 1320 | end 1321 | 1322 | if state == 'failed' then 1323 | failure = cjson.decode(failure) 1324 | redis.call('lrem', 'ql:f:' .. failure.group, 0, jid) 1325 | if redis.call('llen', 'ql:f:' .. failure.group) == 0 then 1326 | redis.call('srem', 'ql:failures', failure.group) 1327 | end 1328 | local bin = failure.when - (failure.when % 86400) 1329 | redis.call('hincrby', 'ql:s:stats:' .. bin .. ':' .. self.name, 'failed' , -1) 1330 | end 1331 | 1332 | redis.call('hmset', QlessJob.ns .. jid, 1333 | 'jid' , jid, 1334 | 'klass' , klass, 1335 | 'data' , raw_data, 1336 | 'priority' , priority, 1337 | 'tags' , cjson.encode(tags), 1338 | 'state' , ((delay > 0) and 'scheduled') or 'waiting', 1339 | 'worker' , '', 1340 | 'expires' , 0, 1341 | 'queue' , self.name, 1342 | 'retries' , retries, 1343 | 'remaining', retries, 1344 | 'time' , string.format("%.20f", now)) 1345 | 1346 | for i, j in ipairs(depends) do 1347 | local state = redis.call('hget', QlessJob.ns .. j, 'state') 1348 | if (state and state ~= 'complete') then 1349 | redis.call('sadd', QlessJob.ns .. j .. '-dependents' , jid) 1350 | redis.call('sadd', QlessJob.ns .. jid .. '-dependencies', j) 1351 | end 1352 | end 1353 | 1354 | if delay > 0 then 1355 | if redis.call('scard', QlessJob.ns .. jid .. '-dependencies') > 0 then 1356 | self.depends.add(now, jid) 1357 | redis.call('hmset', QlessJob.ns .. jid, 1358 | 'state', 'depends', 1359 | 'scheduled', now + delay) 1360 | else 1361 | self.scheduled.add(now + delay, jid) 1362 | end 1363 | else 1364 | if redis.call('scard', QlessJob.ns .. jid .. '-dependencies') > 0 then 1365 | self.depends.add(now, jid) 1366 | redis.call('hset', QlessJob.ns .. jid, 'state', 'depends') 1367 | else 1368 | self.work.add(now, priority, jid) 1369 | end 1370 | end 1371 | 1372 | if redis.call('zscore', 'ql:queues', self.name) == false then 1373 | redis.call('zadd', 'ql:queues', now, self.name) 1374 | end 1375 | 1376 | if redis.call('zscore', 'ql:tracked', jid) ~= false then 1377 | Qless.publish('put', jid) 1378 | end 1379 | 1380 | return jid 1381 | end 1382 | 1383 | function QlessQueue:unfail(now, group, count) 1384 | assert(group, 'Unfail(): Arg "group" missing') 1385 | count = assert(tonumber(count or 25), 1386 | 'Unfail(): Arg "count" not a number: ' .. tostring(count)) 1387 | 1388 | local jids = redis.call('lrange', 'ql:f:' .. group, -count, -1) 1389 | 1390 | local toinsert = {} 1391 | for index, jid in ipairs(jids) do 1392 | local job = Qless.job(jid) 1393 | local data = job:data() 1394 | job:history(now, 'put', {q = self.name}) 1395 | redis.call('hmset', QlessJob.ns .. data.jid, 1396 | 'state' , 'waiting', 1397 | 'worker' , '', 1398 | 'expires' , 0, 1399 | 'queue' , self.name, 1400 | 'remaining', data.retries or 5) 1401 | self.work.add(now, data.priority, data.jid) 1402 | end 1403 | 1404 | redis.call('ltrim', 'ql:f:' .. group, 0, -count - 1) 1405 | if (redis.call('llen', 'ql:f:' .. group) == 0) then 1406 | redis.call('srem', 'ql:failures', group) 1407 | end 1408 | 1409 | return #jids 1410 | end 1411 | 1412 | function QlessQueue:recur(now, jid, klass, raw_data, spec, ...) 1413 | assert(jid , 'RecurringJob On(): Arg "jid" missing') 1414 | assert(klass, 'RecurringJob On(): Arg "klass" missing') 1415 | assert(spec , 'RecurringJob On(): Arg "spec" missing') 1416 | local data = assert(cjson.decode(raw_data), 1417 | 'RecurringJob On(): Arg "data" not JSON: ' .. tostring(raw_data)) 1418 | 1419 | if spec == 'interval' then 1420 | local interval = assert(tonumber(arg[1]), 1421 | 'Recur(): Arg "interval" not a number: ' .. tostring(arg[1])) 1422 | local offset = assert(tonumber(arg[2]), 1423 | 'Recur(): Arg "offset" not a number: ' .. tostring(arg[2])) 1424 | if interval <= 0 then 1425 | error('Recur(): Arg "interval" must be greater than 0') 1426 | end 1427 | 1428 | if #arg % 2 == 1 then 1429 | error('Odd number of additional args: ' .. tostring(arg)) 1430 | end 1431 | 1432 | local options = {} 1433 | for i = 3, #arg, 2 do options[arg[i]] = arg[i + 1] end 1434 | options.tags = assert(cjson.decode(options.tags or '{}'), 1435 | 'Recur(): Arg "tags" must be JSON string array: ' .. tostring( 1436 | options.tags)) 1437 | options.priority = assert(tonumber(options.priority or 0), 1438 | 'Recur(): Arg "priority" not a number: ' .. tostring( 1439 | options.priority)) 1440 | options.retries = assert(tonumber(options.retries or 0), 1441 | 'Recur(): Arg "retries" not a number: ' .. tostring( 1442 | options.retries)) 1443 | options.backlog = assert(tonumber(options.backlog or 0), 1444 | 'Recur(): Arg "backlog" not a number: ' .. tostring( 1445 | options.backlog)) 1446 | 1447 | local count, old_queue = unpack(redis.call('hmget', 'ql:r:' .. jid, 'count', 'queue')) 1448 | count = count or 0 1449 | 1450 | if old_queue then 1451 | Qless.queue(old_queue).recurring.remove(jid) 1452 | end 1453 | 1454 | redis.call('hmset', 'ql:r:' .. jid, 1455 | 'jid' , jid, 1456 | 'klass' , klass, 1457 | 'data' , raw_data, 1458 | 'priority', options.priority, 1459 | 'tags' , cjson.encode(options.tags or {}), 1460 | 'state' , 'recur', 1461 | 'queue' , self.name, 1462 | 'type' , 'interval', 1463 | 'count' , count, 1464 | 'interval', interval, 1465 | 'retries' , options.retries, 1466 | 'backlog' , options.backlog) 1467 | self.recurring.add(now + offset, jid) 1468 | 1469 | if redis.call('zscore', 'ql:queues', self.name) == false then 1470 | redis.call('zadd', 'ql:queues', now, self.name) 1471 | end 1472 | 1473 | return jid 1474 | else 1475 | error('Recur(): schedule type "' .. tostring(spec) .. '" unknown') 1476 | end 1477 | end 1478 | 1479 | function QlessQueue:length() 1480 | return self.locks.length() + self.work.length() + self.scheduled.length() 1481 | end 1482 | 1483 | function QlessQueue:check_recurring(now, count) 1484 | local moved = 0 1485 | local r = self.recurring.peek(now, 0, count) 1486 | for index, jid in ipairs(r) do 1487 | local klass, data, priority, tags, retries, interval, backlog = unpack( 1488 | redis.call('hmget', 'ql:r:' .. jid, 'klass', 'data', 'priority', 1489 | 'tags', 'retries', 'interval', 'backlog')) 1490 | local _tags = cjson.decode(tags) 1491 | local score = math.floor(tonumber(self.recurring.score(jid))) 1492 | interval = tonumber(interval) 1493 | 1494 | backlog = tonumber(backlog or 0) 1495 | if backlog ~= 0 then 1496 | local num = ((now - score) / interval) 1497 | if num > backlog then 1498 | score = score + ( 1499 | math.ceil(num - backlog) * interval 1500 | ) 1501 | end 1502 | end 1503 | 1504 | while (score <= now) and (moved < count) do 1505 | local count = redis.call('hincrby', 'ql:r:' .. jid, 'count', 1) 1506 | moved = moved + 1 1507 | 1508 | local child_jid = jid .. '-' .. count 1509 | 1510 | for i, tag in ipairs(_tags) do 1511 | redis.call('zadd', 'ql:t:' .. tag, now, child_jid) 1512 | redis.call('zincrby', 'ql:tags', 1, tag) 1513 | end 1514 | 1515 | redis.call('hmset', QlessJob.ns .. child_jid, 1516 | 'jid' , child_jid, 1517 | 'klass' , klass, 1518 | 'data' , data, 1519 | 'priority' , priority, 1520 | 'tags' , tags, 1521 | 'state' , 'waiting', 1522 | 'worker' , '', 1523 | 'expires' , 0, 1524 | 'queue' , self.name, 1525 | 'retries' , retries, 1526 | 'remaining' , retries, 1527 | 'time' , string.format("%.20f", score), 1528 | 'spawned_from_jid', jid) 1529 | Qless.job(child_jid):history(score, 'put', {q = self.name}) 1530 | 1531 | self.work.add(score, priority, child_jid) 1532 | 1533 | score = score + interval 1534 | self.recurring.add(score, jid) 1535 | end 1536 | end 1537 | end 1538 | 1539 | function QlessQueue:check_scheduled(now, count) 1540 | local scheduled = self.scheduled.ready(now, 0, count) 1541 | for index, jid in ipairs(scheduled) do 1542 | local priority = tonumber( 1543 | redis.call('hget', QlessJob.ns .. jid, 'priority') or 0) 1544 | self.work.add(now, priority, jid) 1545 | self.scheduled.remove(jid) 1546 | 1547 | redis.call('hset', QlessJob.ns .. jid, 'state', 'waiting') 1548 | end 1549 | end 1550 | 1551 | function QlessQueue:invalidate_locks(now, count) 1552 | local jids = {} 1553 | for index, jid in ipairs(self.locks.expired(now, 0, count)) do 1554 | local worker, failure = unpack( 1555 | redis.call('hmget', QlessJob.ns .. jid, 'worker', 'failure')) 1556 | redis.call('zrem', 'ql:w:' .. worker .. ':jobs', jid) 1557 | 1558 | local grace_period = tonumber(Qless.config.get('grace-period')) 1559 | 1560 | local courtesy_sent = tonumber( 1561 | redis.call('hget', QlessJob.ns .. jid, 'grace') or 0) 1562 | 1563 | local send_message = (courtesy_sent ~= 1) 1564 | local invalidate = not send_message 1565 | 1566 | if grace_period <= 0 then 1567 | send_message = true 1568 | invalidate = true 1569 | end 1570 | 1571 | if send_message then 1572 | if redis.call('zscore', 'ql:tracked', jid) ~= false then 1573 | Qless.publish('stalled', jid) 1574 | end 1575 | Qless.job(jid):history(now, 'timed-out') 1576 | redis.call('hset', QlessJob.ns .. jid, 'grace', 1) 1577 | 1578 | local encoded = cjson.encode({ 1579 | jid = jid, 1580 | event = 'lock_lost', 1581 | worker = worker 1582 | }) 1583 | Qless.publish('w:' .. worker, encoded) 1584 | Qless.publish('log', encoded) 1585 | self.locks.add(now + grace_period, jid) 1586 | 1587 | local bin = now - (now % 86400) 1588 | redis.call('hincrby', 1589 | 'ql:s:stats:' .. bin .. ':' .. self.name, 'retries', 1) 1590 | end 1591 | 1592 | if invalidate then 1593 | redis.call('hdel', QlessJob.ns .. jid, 'grace', 0) 1594 | 1595 | local remaining = tonumber(redis.call( 1596 | 'hincrby', QlessJob.ns .. jid, 'remaining', -1)) 1597 | 1598 | if remaining < 0 then 1599 | self.work.remove(jid) 1600 | self.locks.remove(jid) 1601 | self.scheduled.remove(jid) 1602 | 1603 | local group = 'failed-retries-' .. Qless.job(jid):data()['queue'] 1604 | local job = Qless.job(jid) 1605 | job:history(now, 'failed', {group = group}) 1606 | redis.call('hmset', QlessJob.ns .. jid, 'state', 'failed', 1607 | 'worker', '', 1608 | 'expires', '') 1609 | redis.call('hset', QlessJob.ns .. jid, 1610 | 'failure', cjson.encode({ 1611 | ['group'] = group, 1612 | ['message'] = 1613 | 'Job exhausted retries in queue "' .. self.name .. '"', 1614 | ['when'] = now, 1615 | ['worker'] = unpack(job:data('worker')) 1616 | })) 1617 | 1618 | redis.call('sadd', 'ql:failures', group) 1619 | redis.call('lpush', 'ql:f:' .. group, jid) 1620 | 1621 | if redis.call('zscore', 'ql:tracked', jid) ~= false then 1622 | Qless.publish('failed', jid) 1623 | end 1624 | Qless.publish('log', cjson.encode({ 1625 | jid = jid, 1626 | event = 'failed', 1627 | group = group, 1628 | worker = worker, 1629 | message = 1630 | 'Job exhausted retries in queue "' .. self.name .. '"' 1631 | })) 1632 | 1633 | local bin = now - (now % 86400) 1634 | redis.call('hincrby', 1635 | 'ql:s:stats:' .. bin .. ':' .. self.name, 'failures', 1) 1636 | redis.call('hincrby', 1637 | 'ql:s:stats:' .. bin .. ':' .. self.name, 'failed' , 1) 1638 | else 1639 | table.insert(jids, jid) 1640 | end 1641 | end 1642 | end 1643 | 1644 | return jids 1645 | end 1646 | 1647 | function QlessQueue.deregister(...) 1648 | redis.call('zrem', Qless.ns .. 'queues', unpack(arg)) 1649 | end 1650 | 1651 | function QlessQueue.counts(now, name) 1652 | if name then 1653 | local queue = Qless.queue(name) 1654 | local stalled = queue.locks.length(now) 1655 | queue:check_scheduled(now, queue.scheduled.length()) 1656 | return { 1657 | name = name, 1658 | waiting = queue.work.length(), 1659 | stalled = stalled, 1660 | running = queue.locks.length() - stalled, 1661 | scheduled = queue.scheduled.length(), 1662 | depends = queue.depends.length(), 1663 | recurring = queue.recurring.length(), 1664 | paused = queue:paused() 1665 | } 1666 | else 1667 | local queues = redis.call('zrange', 'ql:queues', 0, -1) 1668 | local response = {} 1669 | for index, qname in ipairs(queues) do 1670 | table.insert(response, QlessQueue.counts(now, qname)) 1671 | end 1672 | return response 1673 | end 1674 | end 1675 | function QlessRecurringJob:data() 1676 | local job = redis.call( 1677 | 'hmget', 'ql:r:' .. self.jid, 'jid', 'klass', 'state', 'queue', 1678 | 'priority', 'interval', 'retries', 'count', 'data', 'tags', 'backlog') 1679 | 1680 | if not job[1] then 1681 | return nil 1682 | end 1683 | 1684 | return { 1685 | jid = job[1], 1686 | klass = job[2], 1687 | state = job[3], 1688 | queue = job[4], 1689 | priority = tonumber(job[5]), 1690 | interval = tonumber(job[6]), 1691 | retries = tonumber(job[7]), 1692 | count = tonumber(job[8]), 1693 | data = job[9], 1694 | tags = cjson.decode(job[10]), 1695 | backlog = tonumber(job[11] or 0) 1696 | } 1697 | end 1698 | 1699 | function QlessRecurringJob:update(now, ...) 1700 | local options = {} 1701 | if redis.call('exists', 'ql:r:' .. self.jid) ~= 0 then 1702 | for i = 1, #arg, 2 do 1703 | local key = arg[i] 1704 | local value = arg[i+1] 1705 | assert(value, 'No value provided for ' .. tostring(key)) 1706 | if key == 'priority' or key == 'interval' or key == 'retries' then 1707 | value = assert(tonumber(value), 'Recur(): Arg "' .. key .. '" must be a number: ' .. tostring(value)) 1708 | if key == 'interval' then 1709 | local queue, interval = unpack(redis.call('hmget', 'ql:r:' .. self.jid, 'queue', 'interval')) 1710 | Qless.queue(queue).recurring.update( 1711 | value - tonumber(interval), self.jid) 1712 | end 1713 | redis.call('hset', 'ql:r:' .. self.jid, key, value) 1714 | elseif key == 'data' then 1715 | assert(cjson.decode(value), 'Recur(): Arg "data" is not JSON-encoded: ' .. tostring(value)) 1716 | redis.call('hset', 'ql:r:' .. self.jid, 'data', value) 1717 | elseif key == 'klass' then 1718 | redis.call('hset', 'ql:r:' .. self.jid, 'klass', value) 1719 | elseif key == 'queue' then 1720 | local queue_obj = Qless.queue( 1721 | redis.call('hget', 'ql:r:' .. self.jid, 'queue')) 1722 | local score = queue_obj.recurring.score(self.jid) 1723 | queue_obj.recurring.remove(self.jid) 1724 | Qless.queue(value).recurring.add(score, self.jid) 1725 | redis.call('hset', 'ql:r:' .. self.jid, 'queue', value) 1726 | if redis.call('zscore', 'ql:queues', value) == false then 1727 | redis.call('zadd', 'ql:queues', now, value) 1728 | end 1729 | elseif key == 'backlog' then 1730 | value = assert(tonumber(value), 1731 | 'Recur(): Arg "backlog" not a number: ' .. tostring(value)) 1732 | redis.call('hset', 'ql:r:' .. self.jid, 'backlog', value) 1733 | else 1734 | error('Recur(): Unrecognized option "' .. key .. '"') 1735 | end 1736 | end 1737 | return true 1738 | else 1739 | error('Recur(): No recurring job ' .. self.jid) 1740 | end 1741 | end 1742 | 1743 | function QlessRecurringJob:tag(...) 1744 | local tags = redis.call('hget', 'ql:r:' .. self.jid, 'tags') 1745 | if tags then 1746 | tags = cjson.decode(tags) 1747 | local _tags = {} 1748 | for i,v in ipairs(tags) do _tags[v] = true end 1749 | 1750 | for i=1,#arg do if _tags[arg[i]] == nil or _tags[arg[i]] == false then table.insert(tags, arg[i]) end end 1751 | 1752 | tags = cjson.encode(tags) 1753 | redis.call('hset', 'ql:r:' .. self.jid, 'tags', tags) 1754 | return tags 1755 | else 1756 | error('Tag(): Job ' .. self.jid .. ' does not exist') 1757 | end 1758 | end 1759 | 1760 | function QlessRecurringJob:untag(...) 1761 | local tags = redis.call('hget', 'ql:r:' .. self.jid, 'tags') 1762 | if tags then 1763 | tags = cjson.decode(tags) 1764 | local _tags = {} 1765 | for i,v in ipairs(tags) do _tags[v] = true end 1766 | for i = 1,#arg do _tags[arg[i]] = nil end 1767 | local results = {} 1768 | for i, tag in ipairs(tags) do if _tags[tag] then table.insert(results, tag) end end 1769 | tags = cjson.encode(results) 1770 | redis.call('hset', 'ql:r:' .. self.jid, 'tags', tags) 1771 | return tags 1772 | else 1773 | error('Untag(): Job ' .. self.jid .. ' does not exist') 1774 | end 1775 | end 1776 | 1777 | function QlessRecurringJob:unrecur() 1778 | local queue = redis.call('hget', 'ql:r:' .. self.jid, 'queue') 1779 | if queue then 1780 | Qless.queue(queue).recurring.remove(self.jid) 1781 | redis.call('del', 'ql:r:' .. self.jid) 1782 | return true 1783 | else 1784 | return true 1785 | end 1786 | end 1787 | function QlessWorker.deregister(...) 1788 | redis.call('zrem', 'ql:workers', unpack(arg)) 1789 | end 1790 | 1791 | function QlessWorker.counts(now, worker) 1792 | local interval = tonumber(Qless.config.get('max-worker-age', 86400)) 1793 | 1794 | local workers = redis.call('zrangebyscore', 'ql:workers', 0, now - interval) 1795 | for index, worker in ipairs(workers) do 1796 | redis.call('del', 'ql:w:' .. worker .. ':jobs') 1797 | end 1798 | 1799 | redis.call('zremrangebyscore', 'ql:workers', 0, now - interval) 1800 | 1801 | if worker then 1802 | return { 1803 | jobs = redis.call('zrevrangebyscore', 'ql:w:' .. worker .. ':jobs', now + 8640000, now), 1804 | stalled = redis.call('zrevrangebyscore', 'ql:w:' .. worker .. ':jobs', now, 0) 1805 | } 1806 | else 1807 | local response = {} 1808 | local workers = redis.call('zrevrange', 'ql:workers', 0, -1) 1809 | for index, worker in ipairs(workers) do 1810 | table.insert(response, { 1811 | name = worker, 1812 | jobs = redis.call('zcount', 'ql:w:' .. worker .. ':jobs', now, now + 8640000), 1813 | stalled = redis.call('zcount', 'ql:w:' .. worker .. ':jobs', 0, now) 1814 | }) 1815 | end 1816 | return response 1817 | end 1818 | end 1819 | local QlessAPI = {} 1820 | 1821 | function QlessAPI.get(now, jid) 1822 | local data = Qless.job(jid):data() 1823 | if not data then 1824 | return nil 1825 | end 1826 | return cjson.encode(data) 1827 | end 1828 | 1829 | function QlessAPI.multiget(now, ...) 1830 | local results = {} 1831 | for i, jid in ipairs(arg) do 1832 | table.insert(results, Qless.job(jid):data()) 1833 | end 1834 | return cjson.encode(results) 1835 | end 1836 | 1837 | QlessAPI['config.get'] = function(now, key) 1838 | if not key then 1839 | return cjson.encode(Qless.config.get(key)) 1840 | else 1841 | return Qless.config.get(key) 1842 | end 1843 | end 1844 | 1845 | QlessAPI['config.set'] = function(now, key, value) 1846 | return Qless.config.set(key, value) 1847 | end 1848 | 1849 | QlessAPI['config.unset'] = function(now, key) 1850 | return Qless.config.unset(key) 1851 | end 1852 | 1853 | QlessAPI.queues = function(now, queue) 1854 | return cjson.encode(QlessQueue.counts(now, queue)) 1855 | end 1856 | 1857 | QlessAPI.complete = function(now, jid, worker, queue, data, ...) 1858 | return Qless.job(jid):complete(now, worker, queue, data, unpack(arg)) 1859 | end 1860 | 1861 | QlessAPI.failed = function(now, group, start, limit) 1862 | return cjson.encode(Qless.failed(group, start, limit)) 1863 | end 1864 | 1865 | QlessAPI.fail = function(now, jid, worker, group, message, data) 1866 | return Qless.job(jid):fail(now, worker, group, message, data) 1867 | end 1868 | 1869 | QlessAPI.jobs = function(now, state, ...) 1870 | return Qless.jobs(now, state, unpack(arg)) 1871 | end 1872 | 1873 | QlessAPI.retry = function(now, jid, queue, worker, delay, group, message) 1874 | return Qless.job(jid):retry(now, queue, worker, delay, group, message) 1875 | end 1876 | 1877 | QlessAPI.depends = function(now, jid, command, ...) 1878 | return Qless.job(jid):depends(now, command, unpack(arg)) 1879 | end 1880 | 1881 | QlessAPI.heartbeat = function(now, jid, worker, data) 1882 | return Qless.job(jid):heartbeat(now, worker, data) 1883 | end 1884 | 1885 | QlessAPI.workers = function(now, worker) 1886 | return cjson.encode(QlessWorker.counts(now, worker)) 1887 | end 1888 | 1889 | QlessAPI.track = function(now, command, jid) 1890 | return cjson.encode(Qless.track(now, command, jid)) 1891 | end 1892 | 1893 | QlessAPI.tag = function(now, command, ...) 1894 | return cjson.encode(Qless.tag(now, command, unpack(arg))) 1895 | end 1896 | 1897 | QlessAPI.stats = function(now, queue, date) 1898 | return cjson.encode(Qless.queue(queue):stats(now, date)) 1899 | end 1900 | 1901 | QlessAPI.priority = function(now, jid, priority) 1902 | return Qless.job(jid):priority(priority) 1903 | end 1904 | 1905 | QlessAPI.log = function(now, jid, message, data) 1906 | assert(jid, "Log(): Argument 'jid' missing") 1907 | assert(message, "Log(): Argument 'message' missing") 1908 | if data then 1909 | data = assert(cjson.decode(data), 1910 | "Log(): Argument 'data' not cjson: " .. tostring(data)) 1911 | end 1912 | 1913 | local job = Qless.job(jid) 1914 | assert(job:exists(), 'Log(): Job ' .. jid .. ' does not exist') 1915 | job:history(now, message, data) 1916 | end 1917 | 1918 | QlessAPI.peek = function(now, queue, count) 1919 | local jids = Qless.queue(queue):peek(now, count) 1920 | local response = {} 1921 | for i, jid in ipairs(jids) do 1922 | table.insert(response, Qless.job(jid):data()) 1923 | end 1924 | return cjson.encode(response) 1925 | end 1926 | 1927 | QlessAPI.pop = function(now, queue, worker, count) 1928 | local jids = Qless.queue(queue):pop(now, worker, count) 1929 | local response = {} 1930 | for i, jid in ipairs(jids) do 1931 | table.insert(response, Qless.job(jid):data()) 1932 | end 1933 | return cjson.encode(response) 1934 | end 1935 | 1936 | QlessAPI.pause = function(now, ...) 1937 | return QlessQueue.pause(now, unpack(arg)) 1938 | end 1939 | 1940 | QlessAPI.unpause = function(now, ...) 1941 | return QlessQueue.unpause(unpack(arg)) 1942 | end 1943 | 1944 | QlessAPI.cancel = function(now, ...) 1945 | return Qless.cancel(unpack(arg)) 1946 | end 1947 | 1948 | QlessAPI.timeout = function(now, ...) 1949 | for _, jid in ipairs(arg) do 1950 | Qless.job(jid):timeout(now) 1951 | end 1952 | end 1953 | 1954 | QlessAPI.put = function(now, me, queue, jid, klass, data, delay, ...) 1955 | return Qless.queue(queue):put(now, me, jid, klass, data, delay, unpack(arg)) 1956 | end 1957 | 1958 | QlessAPI.requeue = function(now, me, queue, jid, ...) 1959 | local job = Qless.job(jid) 1960 | assert(job:exists(), 'Requeue(): Job ' .. jid .. ' does not exist') 1961 | return QlessAPI.put(now, me, queue, jid, unpack(arg)) 1962 | end 1963 | 1964 | QlessAPI.unfail = function(now, queue, group, count) 1965 | return Qless.queue(queue):unfail(now, group, count) 1966 | end 1967 | 1968 | QlessAPI.recur = function(now, queue, jid, klass, data, spec, ...) 1969 | return Qless.queue(queue):recur(now, jid, klass, data, spec, unpack(arg)) 1970 | end 1971 | 1972 | QlessAPI.unrecur = function(now, jid) 1973 | return Qless.recurring(jid):unrecur() 1974 | end 1975 | 1976 | QlessAPI['recur.get'] = function(now, jid) 1977 | local data = Qless.recurring(jid):data() 1978 | if not data then 1979 | return nil 1980 | end 1981 | return cjson.encode(data) 1982 | end 1983 | 1984 | QlessAPI['recur.update'] = function(now, jid, ...) 1985 | return Qless.recurring(jid):update(now, unpack(arg)) 1986 | end 1987 | 1988 | QlessAPI['recur.tag'] = function(now, jid, ...) 1989 | return Qless.recurring(jid):tag(unpack(arg)) 1990 | end 1991 | 1992 | QlessAPI['recur.untag'] = function(now, jid, ...) 1993 | return Qless.recurring(jid):untag(unpack(arg)) 1994 | end 1995 | 1996 | QlessAPI.length = function(now, queue) 1997 | return Qless.queue(queue):length() 1998 | end 1999 | 2000 | QlessAPI['worker.deregister'] = function(now, ...) 2001 | return QlessWorker.deregister(unpack(arg)) 2002 | end 2003 | 2004 | QlessAPI['queue.forget'] = function(now, ...) 2005 | QlessQueue.deregister(unpack(arg)) 2006 | end 2007 | 2008 | 2009 | if #KEYS > 0 then error('No Keys should be provided') end 2010 | 2011 | local command_name = assert(table.remove(ARGV, 1), 'Must provide a command') 2012 | local command = assert( 2013 | QlessAPI[command_name], 'Unknown command ' .. command_name) 2014 | 2015 | local now = tonumber(table.remove(ARGV, 1)) 2016 | local now = assert( 2017 | now, 'Arg "now" missing or not a number: ' .. (now or 'nil')) 2018 | 2019 | return command(now, unpack(ARGV)) 2020 | -------------------------------------------------------------------------------- /lib/resty/qless.lua: -------------------------------------------------------------------------------- 1 | local ffi = require "ffi" 2 | local redis_connector = require "resty.redis.connector" 3 | local cjson = require "cjson" 4 | 5 | local qless_luascript = require "resty.qless.luascript" 6 | local qless_queue = require "resty.qless.queue" 7 | local qless_job = require "resty.qless.job" 8 | local qless_recurring_job = require "resty.qless.recurring_job" 9 | 10 | local ngx_var = ngx.var 11 | local ngx_now = ngx.now 12 | local ngx_log = ngx.log 13 | local ngx_DEBUG = ngx.DEBUG 14 | local ngx_ERR = ngx.ERR 15 | local ngx_worker_pid = ngx.worker.pid 16 | local ngx_worker_id = ngx.worker.id 17 | local math_floor = math.floor 18 | local ffi_cdef = ffi.cdef 19 | local ffi_new = ffi.new 20 | local ffi_string = ffi.string 21 | local C = ffi.C 22 | local cjson_encode = cjson.encode 23 | local cjson_decode = cjson.decode 24 | local tbl_insert = table.insert 25 | local tbl_concat = table.concat 26 | local str_sub = string.sub 27 | local str_len = string.len 28 | 29 | 30 | ffi_cdef[[ 31 | typedef unsigned char u_char; 32 | u_char * ngx_hex_dump(u_char *dst, const u_char *src, size_t len); 33 | int RAND_pseudo_bytes(u_char *buf, int num); 34 | int gethostname (char *name, size_t size); 35 | ]] 36 | 37 | 38 | local function random_hex(len) 39 | local len = math_floor(len / 2) 40 | 41 | local bytes = ffi_new("uint8_t[?]", len) 42 | C.RAND_pseudo_bytes(bytes, len) 43 | if not bytes then 44 | ngx_log(ngx_ERR, "error getting random bytes via FFI") 45 | return nil 46 | end 47 | 48 | local hex = ffi_new("uint8_t[?]", len * 2) 49 | C.ngx_hex_dump(hex, bytes, len) 50 | return ffi_string(hex, len * 2) 51 | end 52 | 53 | 54 | local function gethostname() 55 | local name = ffi_new("char[?]", 255) 56 | C.gethostname(name, 255) 57 | return ffi_string(name) 58 | end 59 | 60 | 61 | -- 1) if `params.redis_client` exists and is not an empty table, return this 62 | -- pre-connected redis_client 63 | -- 2) if `params.get_redis_client` exists and is a function, call this function 64 | -- to return a connected redis instance. 65 | -- 66 | -- Otherwise lua-resty-redis-connector is used to create a new connection. 67 | local function get_existing_redis_connection(params) 68 | if params.redis_client and next(params.redis_client) then 69 | return params.redis_client 70 | elseif type(params.get_redis_client) == "function" then 71 | return params.get_redis_client() 72 | end 73 | end 74 | 75 | 76 | -- Jobs, to be accessed via qless.jobs. 77 | local _jobs = {} 78 | local _jobs_mt = { __index = _jobs } 79 | 80 | 81 | function _jobs.new(client) 82 | return setmetatable({ 83 | client = client, 84 | }, _jobs_mt) 85 | end 86 | 87 | 88 | function _jobs.complete(self, offset, count) 89 | return self.client:call("jobs", "complete", offset or 0, count or 25) 90 | end 91 | 92 | 93 | function _jobs.tracked(self) 94 | local res = self.client:call("track") 95 | res = cjson_decode(res) 96 | 97 | local tracked_jobs = {} 98 | for k,v in pairs(res.jobs) do 99 | tracked_jobs[k] = qless_job.new(self.client, v) 100 | end 101 | res.jobs = tracked_jobs 102 | return res 103 | end 104 | 105 | 106 | function _jobs.tagged(self, tag, offset, count) 107 | local tagged = self.client:call("tag", "get", tag, offset or 0, count or 25) 108 | if tagged then 109 | return cjson_decode(tagged) 110 | end 111 | end 112 | 113 | 114 | function _jobs.failed(self, tag, offset, count) 115 | if not tag then 116 | local failed = self.client:call("failed") 117 | return cjson_decode(failed) 118 | else 119 | local results = self.client:call("failed", tag, offset or 0, count or 25) 120 | results = cjson_decode(results) 121 | results["jobs"] = self:multiget(unpack(results["jobs"])) 122 | return results 123 | end 124 | end 125 | 126 | 127 | function _jobs.get(self, jid) 128 | local results = self.client:call("get", jid) 129 | if results == ngx.null then 130 | -- Perhaps this jid is a recurring job. 131 | results = self.client:call("recur.get", jid) 132 | if results ~= ngx.null then 133 | return qless_recurring_job.new(self.client, cjson_decode(results)) 134 | end 135 | else 136 | return qless_job.new(self.client, cjson_decode(results)) 137 | end 138 | end 139 | 140 | 141 | function _jobs.multiget(self, ...) 142 | local res = self.client:call("multiget", ...) 143 | res = cjson_decode(res) 144 | local jobs = {} 145 | for _,data in ipairs(res) do 146 | tbl_insert(jobs, qless_job.new(self.client, data)) 147 | end 148 | return jobs 149 | end 150 | 151 | 152 | -- Workers, to be accessed via qless.workers. 153 | local _workers = {} 154 | 155 | function _workers.new(client) 156 | return setmetatable({ 157 | client = client, 158 | counts = _workers.counts, 159 | }, { 160 | __index = function(t, k) 161 | return t.client:call("workers", k) 162 | end, 163 | }) 164 | end 165 | 166 | function _workers.counts(self) 167 | local res = self.client:call("workers") 168 | return cjson_decode(res) 169 | end 170 | 171 | 172 | 173 | -- Queues, to be accessed via qless.queues etc. 174 | local _queues = {} 175 | 176 | function _queues.new(client) 177 | return setmetatable({ 178 | client = client, 179 | counts = _queues.counts, 180 | }, { 181 | __index = function(t, k) 182 | local q = qless_queue.new(k, t.client) 183 | rawset(t, k, q) 184 | return q 185 | end, 186 | }) 187 | end 188 | 189 | function _queues.counts(self) 190 | local res = self.client:call("queues") 191 | return cjson_decode(res) 192 | end 193 | 194 | 195 | -- Events, to be accessed via qless.events etc. 196 | local _events = {} 197 | local _events_mt = { __index = _events } 198 | 199 | 200 | function _events.new(params) 201 | -- First try to pull an existing connection from the params 202 | local redis, err = get_existing_redis_connection(params) 203 | if not redis and err then 204 | -- err indicates we were given an existing connection / callback, but 205 | -- it failed 206 | return nil, err 207 | end 208 | 209 | -- If not, use redis connector to create one from params 210 | local rc 211 | if not redis then 212 | local err 213 | rc, err = redis_connector.new(params) 214 | if not rc then return nil, err end 215 | 216 | redis, err = rc:connect() 217 | if not redis then return nil, err end 218 | end 219 | 220 | return setmetatable({ 221 | redis = redis, 222 | redis_connector = rc, 223 | }, _events_mt) 224 | end 225 | 226 | 227 | function _events.listen(self, events, callback) 228 | local ql_ns = "ql:" 229 | for i, ev in ipairs(events) do 230 | local ok, err = self.redis:subscribe(ql_ns .. ev) 231 | if not ok then ngx_log(ngx_ERR, err) end 232 | end 233 | 234 | repeat 235 | local reply, err = self.redis:read_reply() 236 | if not reply then 237 | ngx_log(ngx_ERR, err) 238 | else 239 | local channel = str_sub(reply[2], str_len(ql_ns) + 1) 240 | local message = reply[3] 241 | callback(channel, message) 242 | end 243 | until not reply 244 | end 245 | 246 | 247 | function _events.stop(self) 248 | return self.redis:unsubscribe() 249 | end 250 | 251 | 252 | local _M = { 253 | _VERSION = '0.12', 254 | } 255 | 256 | local mt = { __index = _M } 257 | 258 | 259 | function _M.new(params) 260 | -- First try to pull an existing connection from the params 261 | local redis, err = get_existing_redis_connection(params) 262 | if not redis and err then 263 | -- err indicates we were given an existing connection / callback, but 264 | -- it failed 265 | return nil, err 266 | end 267 | 268 | -- If not, use redis connector to create one from params 269 | local rc 270 | if not redis then 271 | local err 272 | rc, err = redis_connector.new(params) 273 | if not rc then return nil, err end 274 | 275 | redis, err = rc:connect() 276 | if not redis then return nil, err end 277 | end 278 | 279 | local worker_name = gethostname() .. "-nginx-" .. 280 | tostring(ngx_worker_pid()) .. "-" .. tostring(ngx_worker_id()) 281 | 282 | local self = setmetatable({ 283 | params = params, 284 | redis = redis, 285 | redis_connector = rc, 286 | worker_name = worker_name, 287 | luascript = qless_luascript.new("qless", redis), 288 | }, mt) 289 | 290 | self.workers = _workers.new(self) 291 | self.queues = _queues.new(self) 292 | self.jobs = _jobs.new(self) 293 | 294 | return self 295 | end 296 | 297 | 298 | function _M.events(params) 299 | return _events.new(params) 300 | end 301 | 302 | 303 | local function set_keepalive(self, keepalive_timeout, keepalive_poolsize) 304 | local redis = self.redis 305 | if not redis or not redis.set_keepalive then 306 | return nil, "redis is not connected" 307 | end 308 | 309 | local params = self.params 310 | 311 | -- If we're given params, close the redis connection directly 312 | if keepalive_timeout or keepalive_poolsize then 313 | return redis:set_keepalive( 314 | keepalive_timeout, 315 | keepalive_poolsize 316 | ) 317 | elseif params.close_redis_client and 318 | type(params.close_redis_client == "function") then 319 | 320 | -- Use the callback given to us 321 | return params.close_redis_client(redis) 322 | elseif self.redis_connector then 323 | -- Use redis connector keepalive params (or defaults) 324 | return self.redis_connector:set_keepalive(redis) 325 | else 326 | -- Just use system defaults 327 | return redis:set_keepalive() 328 | end 329 | end 330 | _M.set_keepalive = set_keepalive 331 | _events.set_keepalive = set_keepalive 332 | _M.redis_close = set_keepalive -- maintain backwards compatability 333 | 334 | 335 | function _M.generate_jid(self) 336 | return random_hex(32) 337 | end 338 | 339 | 340 | function _M.call(self, command, ...) 341 | local res, err = self.luascript:call(command, ngx_now(), select(1, ...)) 342 | if not res then 343 | ngx_log(ngx_ERR, err) 344 | end 345 | return res, err 346 | end 347 | 348 | 349 | function _M.config_set(self, k, v) 350 | return self:call("config.set", k, v) 351 | end 352 | 353 | 354 | function _M.config_get(self, k) 355 | return self:call("config.get", k) 356 | end 357 | 358 | 359 | function _M.config_get_all(self) 360 | local res, err = self:call("config.get") 361 | return cjson_decode(res) 362 | end 363 | 364 | 365 | function _M.config_clear(self, k) 366 | return self:call("config.unset", k) 367 | end 368 | 369 | 370 | function _M.track(self, jid) 371 | return self:call("track", "track", jid) 372 | end 373 | 374 | 375 | function _M.untrack(self, jid) 376 | return self:call("track", "untrack", jid) 377 | end 378 | 379 | 380 | function _M.tags(self, offset, count) 381 | return cjson_decode(self:call("tag", "top", offset or 0, count or 100)) 382 | end 383 | 384 | 385 | function _M.deregister_workers(self, worker_names) 386 | return self:call("worker.deregister", unpack(worker_names)) 387 | end 388 | 389 | 390 | function _M.bulk_cancel(self, jids) 391 | return self:call("cancel", jids) 392 | end 393 | 394 | 395 | return _M 396 | -------------------------------------------------------------------------------- /lib/resty/qless/job.lua: -------------------------------------------------------------------------------- 1 | local cjson = require "cjson" 2 | 3 | local ngx_log = ngx.log 4 | local ngx_DEBUG = ngx.DEBUG 5 | local ngx_ERR = ngx.ERR 6 | local ngx_INFO = ngx.INFO 7 | local ngx_now = ngx.now 8 | local cjson_encode = cjson.encode 9 | local cjson_decode = cjson.decode 10 | 11 | 12 | local _M = { 13 | _VERSION = '0.12', 14 | } 15 | 16 | local mt = { 17 | -- We hide priority as __priority, and use metamethods to update redis 18 | -- when the value is set. 19 | __index = function (t, k) 20 | if k == "priority" then 21 | return t.__priority 22 | else 23 | return _M[k] 24 | end 25 | end, 26 | 27 | __newindex = function(t, k, v) 28 | if k == "priority" then 29 | return rawset(t, "__priority", t.client:call("priority", t.jid, v)) 30 | else 31 | return rawset(t, k, v) 32 | end 33 | end, 34 | } 35 | 36 | 37 | function _M.new(client, atts) 38 | return setmetatable({ 39 | client = client, 40 | jid = atts.jid, 41 | data = cjson_decode(atts.data or "{}"), 42 | tags = atts.tags, 43 | state = atts.state, 44 | tracked = atts.tracked, 45 | failure = atts.failure, 46 | dependencies = atts.dependencies, 47 | dependents = atts.dependents, 48 | spawned_from_jid = atts.spawned_from_jid, 49 | 50 | __priority = atts.priority, -- Accessed via metatable setter/getter 51 | 52 | expires_at = atts.expires, 53 | worker_name = atts.worker, 54 | klass = atts.klass, 55 | queue_name = atts.queue, 56 | original_retries = atts.retries, 57 | retries_left = atts.remaining, 58 | raw_queue_history = atts.history, 59 | 60 | state_changed = false, 61 | }, mt) 62 | end 63 | 64 | 65 | -- For building a job from attribute data, without the roundtrip to redis. 66 | function _M.build(client, klass, atts) 67 | local defaults = { 68 | jid = client:generate_jid(), 69 | spawned_from_jid = nil, 70 | data = {}, 71 | klass = klass, 72 | priority = 0, 73 | tags = {}, 74 | worker = 'mock_worker', 75 | expires = ngx_now() + (60 * 60), -- an hour from now 76 | state = 'running', 77 | tracked = false, 78 | queue = 'mock_queue', 79 | retries = 5, 80 | remaining = 5, 81 | failure = {}, 82 | history = {}, 83 | dependencies = {}, 84 | dependents = {}, 85 | } 86 | setmetatable(atts, { __index = defaults }) 87 | atts.data = cjson_encode(atts.data) 88 | 89 | return _M.new(client, atts) 90 | end 91 | 92 | 93 | function _M.queue(self) 94 | return self.client.queues[self.queue_name] 95 | end 96 | 97 | 98 | function _M.perform(self, ...) 99 | local ok, task = pcall(require, self.klass) 100 | if ok then 101 | if task.perform and type(task.perform) == "function" then 102 | local ok, res, err_type, err = pcall(task.perform, self, ...) 103 | 104 | if not ok then 105 | local err = res 106 | return nil, "failed-" .. self.queue_name, "'" .. self.klass .. "' " .. (err or "") 107 | else 108 | return res, err_type, err 109 | end 110 | else 111 | return nil, 112 | self.queue_name .. "-invalid-task", 113 | "Job '" .. self.klass .. "' has no perform function" 114 | end 115 | else 116 | return nil, 117 | self.queue_name .. "-invalid-task", 118 | "Module '" .. self.klass .. "' could not be found" 119 | end 120 | end 121 | 122 | 123 | function _M.description(self) 124 | return self.klass .. " (" .. self.jid .. " / " .. self.queue_name .. " / " .. self.state .. ")" 125 | end 126 | 127 | 128 | function _M.ttl(self) 129 | return self.expires_at - ngx_now() 130 | end 131 | 132 | 133 | function _M.spawned_from(self) 134 | if self.spawned_from_jid then 135 | return self.spawned_from or self.client.jobs:get(self.spawned_from_jid) 136 | else 137 | return nil 138 | end 139 | end 140 | 141 | 142 | function _M.requeue(self, queue, options) 143 | if not options then options = {} end 144 | 145 | self:begin_state_change("requeue") 146 | local res = self.client:call("requeue", self.client.worker_name, queue, self.jid, self.klass, 147 | cjson_encode(options.data or self.data), 148 | options.delay or 0, 149 | "priority", options.priority or self.priority, 150 | "tags", cjson_encode(options.tags or self.tags), 151 | "retries", options.retries or self.original_retries, 152 | "depends", cjson_encode(options.depends or self.dependencies) 153 | ) 154 | self:finish_state_change("requeue") 155 | return res 156 | end 157 | _M.move = _M.requeue -- Old versions of qless previoulsly used 'move' 158 | 159 | 160 | function _M.fail(self, group, message) 161 | self:begin_state_change("fail") 162 | local res, err = self.client:call("fail", 163 | self.jid, 164 | self.client.worker_name, 165 | group or "[unknown group]", message or "[no message]", 166 | cjson_encode(self.data)) 167 | 168 | if not res then 169 | ngx_log(ngx_ERR, "Could not fail job: ", err) 170 | return false 171 | end 172 | self:finish_state_change("fail") 173 | 174 | return true 175 | end 176 | 177 | 178 | function _M.heartbeat(self) 179 | self.expires_at = self.client:call( 180 | "heartbeat", 181 | self.jid, 182 | self.worker_name, 183 | cjson_encode(self.data) 184 | ) 185 | return self.expires_at 186 | end 187 | 188 | 189 | function _M.complete(self, next_queue, options) 190 | if not options then options = {} end 191 | 192 | self:begin_state_change("complete") 193 | local res, err 194 | if next_queue then 195 | res, err = self.client:call("complete", 196 | self.jid, 197 | self.worker_name, 198 | self.queue_name, 199 | cjson_encode(self.data), 200 | "next", next_queue, 201 | "delay", options.delay or 0, 202 | "depends", cjson_encode(options.depends or {}) 203 | ) 204 | else 205 | res, err = self.client:call("complete", 206 | self.jid, 207 | self.worker_name, 208 | self.queue_name, 209 | cjson_encode(self.data) 210 | ) 211 | end 212 | 213 | if not res then ngx_log(ngx_ERR, err) end 214 | self:finish_state_change("complete") 215 | 216 | return res, err 217 | end 218 | 219 | 220 | function _M.retry(self, delay, group, message) 221 | if not delay then delay = 0 end 222 | 223 | self:begin_state_change("retry") 224 | local res = self.client:call("retry", 225 | self.jid, 226 | self.queue_name, 227 | self.worker_name, 228 | delay, 229 | group, message) 230 | self:end_state_change("retry") 231 | return res 232 | end 233 | 234 | 235 | function _M.cancel(self) 236 | self:begin_state_change("cancel") 237 | local res = self.client:call("cancel", self.jid) 238 | self:finish_state_change("cancel") 239 | return res 240 | end 241 | 242 | 243 | function _M.timeout(self) 244 | return self.client:call("timeout", self.jid) 245 | end 246 | 247 | 248 | function _M.track(self) 249 | return self.client:call("track", "track", self.jid) 250 | end 251 | 252 | 253 | function _M.untrack(self) 254 | return self.client:call("track", "untrack", self.jid) 255 | end 256 | 257 | 258 | function _M.tag(self, ...) 259 | return self.client:call("tag", "add", self.jid, ...) 260 | end 261 | 262 | 263 | function _M.untag(self, ...) 264 | return self.client:call("tag", "remove", self.jid, ...) 265 | end 266 | 267 | 268 | function _M.depend(self, ...) 269 | return self.client:call("depends", self.jid, "on", ...) 270 | end 271 | 272 | 273 | function _M.undepend(self, ...) 274 | return self.client:call("depends", self.jid, "off", ...) 275 | end 276 | 277 | 278 | function _M.log(self, message, data) 279 | if data then data = cjson_encode(data) end 280 | return self.client:call("log", self.jid, message, data) 281 | end 282 | 283 | 284 | function _M.begin_state_change(self, event) 285 | local before = self["before_" .. event] 286 | if before and type(before) == "function" then 287 | before() 288 | end 289 | end 290 | 291 | 292 | function _M.finish_state_change(self, event) 293 | self.state_changed = true 294 | 295 | local after = self["after_" .. event] 296 | if after and type(after) == "function" then 297 | after() 298 | end 299 | end 300 | 301 | return _M 302 | -------------------------------------------------------------------------------- /lib/resty/qless/luascript.lua: -------------------------------------------------------------------------------- 1 | local ngx_log = ngx.log 2 | local ngx_DEBUG = ngx.DEBUG 3 | local ngx_ERR = ngx.ERR 4 | local ngx_sha1_bin = ngx.sha1_bin 5 | local str_gsub = string.gsub 6 | local str_format = string.format 7 | local str_byte = string.byte 8 | local str_len = string.len 9 | local str_sub = string.sub 10 | local debug_getinfo = debug.getinfo 11 | local io_open = io.open 12 | 13 | 14 | local _M = { 15 | _VERSION = '0.12', 16 | } 17 | 18 | local mt = { __index = _M } 19 | 20 | -- Couldn't find a better way to determine the current script path... 21 | local current_path = str_sub(debug_getinfo(1).source, 2, str_len("/luascript.lua") * -1) 22 | 23 | -- Load the qless scripts and generate the sha1 digest. 24 | local f = assert(io_open(current_path .. "../../qless.lua", "r")) 25 | local qless_script = f:read("*all") 26 | local qless_script_sha1 = ngx_sha1_bin(qless_script) 27 | local qless_script_sha1_sum = str_gsub(qless_script_sha1, "(.)", 28 | function (c) 29 | return str_format("%02x%s", str_byte(c), "") 30 | end) 31 | 32 | 33 | function _M.new(name, redis) 34 | return setmetatable({ 35 | name = name, 36 | redis = redis, 37 | sha = qless_script_sha1_sum, 38 | }, mt) 39 | end 40 | 41 | 42 | function _M.reload(self) 43 | self.sha = self.redis:script("load", qless_script) 44 | end 45 | 46 | 47 | function _M.call(self, ...) 48 | local res, err = self.redis:evalsha(self.sha, 0, select(1, ...)) 49 | if not res and err == "NOSCRIPT No matching script. Please use EVAL." then 50 | self:reload() 51 | res, err = self.redis:evalsha(self.sha, 0, select(1, ...)) 52 | end 53 | return res, err 54 | end 55 | 56 | 57 | return _M 58 | -------------------------------------------------------------------------------- /lib/resty/qless/queue.lua: -------------------------------------------------------------------------------- 1 | local qless_job = require "resty.qless.job" 2 | local cjson = require "cjson" 3 | 4 | local ngx_log = ngx.log 5 | local ngx_DEBUG = ngx.DEBUG 6 | local ngx_ERR = ngx.ERR 7 | local ngx_now = ngx.now 8 | local cjson_encode = cjson.encode 9 | local cjson_decode = cjson.decode 10 | 11 | 12 | 13 | -- Object for interacting with jobs in different states in the queue. Not meant to be 14 | -- instantiated directly, it's accessed via queue.jobs. 15 | local _queue_jobs = {} 16 | local _queue_jobs_mt = { __index = _queue_jobs } 17 | 18 | 19 | function _queue_jobs._new(name, client) 20 | return setmetatable({ name = name, client = client }, _queue_jobs_mt) 21 | end 22 | 23 | 24 | function _queue_jobs.running(self, start, count) 25 | return self.client:call("jobs", "running", self.name, start or 0, count or 25) 26 | end 27 | 28 | 29 | function _queue_jobs.stalled(self, start, count) 30 | return self.client:call("jobs", "stalled", self.name, start or 0, count or 25) 31 | end 32 | 33 | 34 | function _queue_jobs.scheduled(self, start, count) 35 | return self.client:call("jobs", "scheduled", self.name, start or 0, count or 25) 36 | end 37 | 38 | 39 | function _queue_jobs.depends(self, start, count) 40 | return self.client:call("jobs", "depends", self.name, start or 0, count or 25) 41 | end 42 | 43 | 44 | function _queue_jobs.recurring(self, start, count) 45 | return self.client:call("jobs", "recurring", self.name, start or 0, count or 25) 46 | end 47 | 48 | 49 | 50 | local _M = { 51 | _VERSION = '0.12', 52 | } 53 | 54 | 55 | local mt = { 56 | __index = function(t, k) 57 | if k == "heartbeat" then 58 | return _M.get_config(k) 59 | elseif k == "max_concurrency" then 60 | return tonumber(_M.get_config("max-concurrency")) 61 | else 62 | return _M[k] 63 | end 64 | end, 65 | 66 | __newindex = function(t, k, v) 67 | if k == "heartbeat" then 68 | return _M.set_config(k, v) 69 | elseif k == "max_concurrency" then 70 | return _M.set_config("max-concurrency", v) 71 | end 72 | end, 73 | } 74 | 75 | 76 | function _M.new(name, client) 77 | local self = setmetatable({ 78 | name = name, 79 | client = client, 80 | worker_name = client.worker_name, 81 | jobs = _queue_jobs._new(name, client), 82 | }, mt) 83 | 84 | return self 85 | end 86 | 87 | 88 | function _M.config_set(self, k, v) 89 | return self.client:call("config.set", self.name .. "-" .. k, v) 90 | end 91 | 92 | 93 | function _M.config_get(self, k) 94 | return self.client:call("config.get", self.name .. "-" .. k) 95 | end 96 | 97 | 98 | function _M.counts(self) 99 | local counts = self.client:call("queues", self.name) 100 | return cjson_decode(counts) 101 | end 102 | 103 | 104 | function _M.paused(self) 105 | return self:counts().paused or false 106 | end 107 | 108 | 109 | function _M.pause(self, options) 110 | if not options then options = {} end 111 | 112 | local client = self.client 113 | local res, err 114 | res, err = client:call("pause", self.name) 115 | 116 | if options.stop_jobs then 117 | res, err = client:call("timeout", self.jobs:running(0, -1)) 118 | end 119 | 120 | return res, err 121 | end 122 | 123 | 124 | function _M.unpause(self) 125 | return self.client:call("unpause", self.name) 126 | end 127 | 128 | 129 | function _M.put(self, klass, data, options) 130 | if not options then options = {} end 131 | return self.client:call( 132 | "put", 133 | self.worker_name, 134 | self.name, 135 | options.jid or self.client:generate_jid(), 136 | klass, 137 | cjson_encode(data or {}), 138 | options.delay or 0, 139 | "priority", options.priority or 0, 140 | "tags", cjson_encode(options.tags or {}), 141 | "retries", options.retries or 5, 142 | "depends", cjson_encode(options.depends or {}) 143 | ) 144 | end 145 | 146 | 147 | function _M.recur(self, klass, data, interval, options) 148 | if not options then options = {} end 149 | return self.client:call( 150 | "recur", 151 | self.name, 152 | options.jid or self.client:generate_jid(), 153 | klass, 154 | cjson_encode(data or {}), 155 | "interval", interval, options.offset or 0, 156 | "priority", options.priority or 0, 157 | "tags", cjson_encode(options.tags or {}), 158 | "retries", options.retries or 5, 159 | "backlog", options.backlog or 0 160 | ) 161 | end 162 | 163 | 164 | function _M.pop(self, count) 165 | local res = self.client:call("pop", self.name, self.worker_name, count or 1) 166 | if not res then return nil end 167 | res = cjson_decode(res) 168 | 169 | local jobs = {} 170 | for i, job in ipairs(res) do 171 | jobs[i] = qless_job.new(self.client, job) 172 | end 173 | 174 | if not count then 175 | return jobs[1] 176 | else 177 | return jobs 178 | end 179 | end 180 | 181 | 182 | function _M.peek(self, count) 183 | local res = self.client:call("peek", self.name, count or 1) 184 | res = cjson_decode(res) 185 | local jobs = {} 186 | for i, job in ipairs(res) do 187 | jobs[i] = qless_job.new(self.client, job) 188 | end 189 | if not count then 190 | return jobs[1] 191 | else 192 | return jobs 193 | end 194 | end 195 | 196 | 197 | function _M.stats(self, time) 198 | local stats = self.client:call("stats", self.name, time or ngx_now()) 199 | return cjson_decode(stats) 200 | end 201 | 202 | 203 | function _M.length(self) 204 | local redis = self.client.redis 205 | redis:multi() 206 | redis:zcard("ql:q:"..self.name.."-locks") 207 | redis:zcard("ql:q:"..self.name.."-work") 208 | redis:zcard("ql:q:"..self.name.."-scheduled") 209 | local res, err = redis:exec() 210 | 211 | local len = 0 212 | for _, v in ipairs(res) do 213 | len = len + v 214 | end 215 | 216 | return len 217 | end 218 | 219 | 220 | return _M 221 | -------------------------------------------------------------------------------- /lib/resty/qless/recurring_job.lua: -------------------------------------------------------------------------------- 1 | local cjson = require "cjson" 2 | 3 | local ngx_log = ngx.log 4 | local ngx_DEBUG = ngx.DEBUG 5 | local ngx_ERR = ngx.ERR 6 | local ngx_INFO = ngx.INFO 7 | local ngx_now = ngx.now 8 | local cjson_encode = cjson.encode 9 | local cjson_decode = cjson.decode 10 | 11 | 12 | local _M = { 13 | _VERSION = '0.12', 14 | } 15 | 16 | local mt = { 17 | -- We hide the real properties with __, and access them via the "update" 18 | -- setter method, to match the Ruby client syntax. 19 | __index = function (t, k) 20 | local private = rawget(t, "__" .. k) 21 | if private then 22 | return private 23 | else 24 | return _M[k] 25 | end 26 | end, 27 | 28 | __newindex = function(t, k, v) 29 | if t["__" .. k] then 30 | return t.update(t, k, v) 31 | end 32 | end, 33 | } 34 | 35 | 36 | function _M.new(client, atts) 37 | return setmetatable({ 38 | client = client, 39 | jid = atts.jid, 40 | tags = atts.tags, 41 | count = atts.count, 42 | 43 | -- Accessed via metatable setter/getter for 44 | -- compatability with the Ruby bindings. 45 | __priority = atts.priority, 46 | __retries = atts.retries, 47 | __interval = atts.interval, 48 | __data = cjson_decode(atts.data or "{}"), 49 | __klass = atts.klass, 50 | __backlog = atts.backlog, 51 | 52 | klass_name = atts.klass, 53 | queue_name = atts.queue, 54 | }, mt) 55 | end 56 | 57 | 58 | function _M.update(self, property, value) 59 | if property == "data" and value then value = cjson_encode(value) end 60 | 61 | self.client:call("recur.update", self.jid, property, value) 62 | self["__" .. property] = value 63 | end 64 | 65 | 66 | function _M.move(self, queue) 67 | self.client:call("recur.update", self.jid, "queue", queue) 68 | self.queue_name = queue 69 | end 70 | _M.requeue = _M.move -- for API parity with normal jobs 71 | 72 | 73 | function _M.cancel(self) 74 | self.client:call("unrecur", self.jid) 75 | end 76 | 77 | 78 | function _M.tag(self, ...) 79 | self.client:call("recur.tag", self.jid, ...) 80 | end 81 | 82 | 83 | function _M.untag(self, ...) 84 | self.client:call("recur.untag", self.jid, ...) 85 | end 86 | 87 | 88 | return _M 89 | -------------------------------------------------------------------------------- /lib/resty/qless/reserver/ordered.lua: -------------------------------------------------------------------------------- 1 | local ngx_log = ngx.log 2 | local ngx_DEBUG = ngx.DEBUG 3 | local ngx_ERR = ngx.ERR 4 | local ngx_INFO = ngx.INFO 5 | 6 | local _M = { 7 | _VERSION = '0.12', 8 | } 9 | 10 | local mt = { __index = _M } 11 | 12 | 13 | function _M.new(queues) 14 | return setmetatable({ 15 | queues = queues, 16 | }, mt) 17 | end 18 | 19 | 20 | function _M.reserve(self) 21 | for _, q in ipairs(self.queues) do 22 | local job = q:pop() 23 | if job then return job end 24 | end 25 | end 26 | 27 | 28 | return _M 29 | -------------------------------------------------------------------------------- /lib/resty/qless/reserver/round_robin.lua: -------------------------------------------------------------------------------- 1 | local ngx_log = ngx.log 2 | local ngx_DEBUG = ngx.DEBUG 3 | local ngx_ERR = ngx.ERR 4 | local ngx_INFO = ngx.INFO 5 | 6 | local _M = { 7 | _VERSION = '0.12', 8 | } 9 | 10 | local mt = { __index = _M } 11 | 12 | 13 | function _M.new(queues) 14 | return setmetatable({ 15 | queues = queues, 16 | num_queues = #queues, 17 | last_queue_index = 0, 18 | }, mt) 19 | end 20 | 21 | 22 | function _M.reserve(self) 23 | for i = 1, self.num_queues do 24 | local job = self:next_queue():pop() 25 | if job then return job end 26 | end 27 | end 28 | 29 | 30 | function _M.next_queue(self) 31 | self.last_queue_index = self.last_queue_index + 1 32 | self.last_queue_index = ((self.last_queue_index - 1) % (self.num_queues)) + 1 33 | return self.queues[self.last_queue_index] 34 | end 35 | 36 | 37 | return _M 38 | -------------------------------------------------------------------------------- /lib/resty/qless/reserver/shuffled_round_robin.lua: -------------------------------------------------------------------------------- 1 | local round_robin = require "resty.qless.reserver.round_robin" 2 | local tbl_insert = table.insert 3 | local tbl_remove = table.remove 4 | local math_random = math.random 5 | local math_randomseed = math.randomseed 6 | 7 | local _M = { 8 | _VERSION = '0.12', 9 | } 10 | 11 | local mt = { __index = _M } 12 | 13 | 14 | function _M.new(queues) 15 | math_randomseed(ngx.now()) 16 | return setmetatable({ 17 | queues = queues, 18 | num_queues = #queues, 19 | last_queue_index = 0, 20 | }, mt) 21 | end 22 | 23 | 24 | function _M.reserve(self) 25 | self:shuffle() 26 | return round_robin.reserve(self) 27 | end 28 | 29 | 30 | function _M.shuffle(self) 31 | local queues = {}; 32 | while #self.queues > 0 do 33 | tbl_insert(queues, tbl_remove(self.queues, math_random(#self.queues))) 34 | end 35 | self.queues = queues 36 | end 37 | 38 | -- import from round robin 39 | _M.next_queue = round_robin.next_queue 40 | 41 | 42 | return _M 43 | -------------------------------------------------------------------------------- /lib/resty/qless/worker.lua: -------------------------------------------------------------------------------- 1 | local cjson = require "cjson" 2 | local qless = require "resty.qless" 3 | 4 | local ngx_log = ngx.log 5 | local ngx_DEBUG = ngx.DEBUG 6 | local ngx_ERR = ngx.ERR 7 | local ngx_INFO = ngx.INFO 8 | local ngx_now = ngx.now 9 | local ngx_timer_at = ngx.timer.at 10 | local cjson_encode = cjson.encode 11 | local cjson_decode = cjson.decode 12 | local tbl_insert = table.insert 13 | local co_create = coroutine.create 14 | local co_status = coroutine.status 15 | local co_resume = coroutine.resume 16 | local co_yield = coroutine.yield 17 | 18 | 19 | local _M = { 20 | _VERSION = '0.12', 21 | } 22 | 23 | local mt = { __index = _M } 24 | 25 | local DEFAULT_OPTIONS = { 26 | concurrency = 1, 27 | interval = 10, 28 | reserver = "ordered", 29 | queues = {}, 30 | } 31 | 32 | 33 | function _M.new(params) 34 | return setmetatable({ 35 | params = params, 36 | }, mt) 37 | end 38 | 39 | 40 | function _M.start(self, options) 41 | local options = setmetatable(options, { __index = DEFAULT_OPTIONS }) 42 | 43 | local function worker(premature) 44 | if not premature then 45 | local q, err = qless.new(self.params) 46 | if not q then 47 | ngx_log(ngx_ERR, "qless could not connect to Redis: ", err) 48 | 49 | -- Try again at interval 50 | local ok, err = ngx_timer_at(options.interval, worker) 51 | if not ok then 52 | ngx_log(ngx_ERR, "failed to run worker: ", err) 53 | else 54 | return ok 55 | end 56 | end 57 | 58 | local ok, reserver_type = 59 | pcall(require, "resty.qless.reserver." .. options.reserver) 60 | 61 | if not ok then 62 | ngx_log(ngx_ERR, 63 | "No such reserver: ", options.reserver, " - ", reserver_type) 64 | 65 | return nil 66 | end 67 | 68 | local queues = {} 69 | for i,v in ipairs(options.queues) do 70 | tbl_insert(queues, q.queues[v]) 71 | end 72 | 73 | local reserver = reserver_type.new(queues) 74 | 75 | repeat 76 | local job = reserver:reserve() 77 | if job then 78 | local ok, err_type, err = self:perform(job) 79 | if not ok and err_type then 80 | -- err_type, err indicates the job "raised an exception" 81 | job:fail(err_type, err) 82 | 83 | ngx_log(ngx_ERR, 84 | "Got ", err_type, " failure from ", 85 | job:description(), " \n", err) 86 | else 87 | -- Complete the job, unless its status has been changed 88 | -- already 89 | if not job.state_changed then 90 | job:complete() 91 | end 92 | end 93 | end 94 | co_yield() -- The scheduler will resume us. 95 | until not job 96 | 97 | q:deregister_workers({ q.worker_name }) 98 | q:set_keepalive() 99 | 100 | local ok, err = ngx_timer_at(options.interval, worker) 101 | if not ok then 102 | ngx_log(ngx_ERR, "failed to run worker: ", err) 103 | end 104 | end 105 | end 106 | 107 | for i = 1,(options.concurrency) do 108 | local ok, err = ngx_timer_at(i, worker) 109 | if not ok then 110 | ngx_log(ngx_ERR, "failed to start worker: ", err) 111 | end 112 | end 113 | 114 | return true 115 | end 116 | 117 | 118 | function _M.perform(self, job) 119 | local res, err_type, err 120 | if self.middleware and type(self.middleware) == "function" then 121 | local mw = co_create(self.middleware) 122 | 123 | res, err_type, err = job:perform(select(1, co_resume(mw, job))) 124 | 125 | if co_status(mw) == "suspended" then 126 | co_resume(mw) 127 | end 128 | else 129 | res, err_type, err = job:perform() 130 | end 131 | 132 | return res, err_type, err 133 | end 134 | 135 | 136 | return _M 137 | -------------------------------------------------------------------------------- /lua-resty-qless-0.12-0.rockspec: -------------------------------------------------------------------------------- 1 | package = "lua-resty-qless" 2 | version = "0.12-0" 3 | source = { 4 | url = "git://github.com/pintsized/lua-resty-qless", 5 | tag = "v0.12" 6 | } 7 | description = { 8 | summary = "Lua binding to Qless (Queue / Pipeline management) for OpenResty", 9 | detailed = [[ 10 | lua-resty-qless is a binding to qless-core from Moz - a powerful Redis 11 | based job queueing system inspired by resque, but instead implemented as 12 | a collection of Lua scripts for Redis. 13 | This binding provides a full implementation of Qless via Lua script running 14 | in OpenResty / lua-nginx-module, including workers which can be started 15 | during the init_worker_by_lua phase. 16 | Essentially, with this module and a modern Redis instance, you can turn 17 | your OpenResty server into a quite sophisticated yet lightweight job 18 | queuing system, which is also compatible with the reference Ruby 19 | implementation, Qless. 20 | Note: This module is not designed to work in a pure Lua environment. 21 | ]], 22 | homepage = "https://github.com/pintsized/lua-resty-qless", 23 | license = "2-clause BSD", 24 | maintainer = "James Hurst ", 25 | } 26 | 27 | dependencies = { 28 | "lua >= 5.1", 29 | "lua-resty-redis-connector >= 0.05", 30 | } 31 | 32 | local modules = { 33 | "qless", 34 | "qless-lib", 35 | "resty.qless", 36 | "resty.qless.job", 37 | "resty.qless.luascript", 38 | "resty.qless.queue", 39 | "resty.qless.recurring_job", 40 | "resty.qless.worker", 41 | "resty.qless.reserver.ordered", 42 | "resty.qless.reserver.round_robin", 43 | "resty.qless.reserver.shuffled_round_robin", 44 | } 45 | local files = {} 46 | for i = 1, #modules do 47 | local module = modules [i] 48 | files [module] = "lib/" .. module:gsub ("%.", "/") .. ".lua" 49 | end 50 | 51 | build = { 52 | type = "builtin", 53 | modules = files, 54 | } 55 | -------------------------------------------------------------------------------- /t/01-sanity.t: -------------------------------------------------------------------------------- 1 | use Test::Nginx::Socket 'no_plan'; 2 | use Cwd qw(cwd); 3 | 4 | my $pwd = cwd(); 5 | 6 | $ENV{TEST_REDIS_PORT} ||= 6379; 7 | $ENV{TEST_REDIS_DATABASE} ||= 1; 8 | 9 | our $HttpConfig = qq{ 10 | lua_package_path "$pwd/../lua-resty-redis-connector/lib/?.lua;$pwd/lib/?.lua;;"; 11 | error_log logs/error.log debug; 12 | init_by_lua_block { 13 | require("luacov.runner").init() 14 | 15 | cjson = require "cjson" 16 | redis_params = { 17 | host = "127.0.0.1", 18 | port = $ENV{TEST_REDIS_PORT}, 19 | db = $ENV{TEST_REDIS_DATABASE}, 20 | } 21 | } 22 | }; 23 | 24 | no_long_string(); 25 | #no_diff(); 26 | 27 | run_tests(); 28 | 29 | __DATA__ 30 | === TEST 1: Prove we can load the module and call a script. 31 | --- http_config eval: $::HttpConfig 32 | --- config 33 | location = /1 { 34 | content_by_lua_block { 35 | local q, err = assert(require("resty.qless").new(redis_params), 36 | "new should return positively") 37 | 38 | ngx.say(cjson.encode(q.queues:counts())) 39 | } 40 | } 41 | --- request 42 | GET /1 43 | --- response_body 44 | {} 45 | --- no_error_log 46 | [error] 47 | [warn] 48 | 49 | 50 | === TEST 2: Load using externally connected redis. 51 | --- http_config eval: $::HttpConfig 52 | --- config 53 | location = /1 { 54 | content_by_lua_block { 55 | local function get_redis_client() 56 | return require("resty.redis.connector").new({ 57 | port = redis_params.port, 58 | db = redis_params.db 59 | }):connect() 60 | end 61 | 62 | local function broken_get_redis_client() 63 | return nil, "error connecting to redis" 64 | end 65 | 66 | local qless = require("resty.qless") 67 | 68 | local q = assert(qless.new({ redis_client = get_redis_client() }), 69 | "qless.new with redis_client should return positively") 70 | ngx.say(cjson.encode(q.queues:counts())) 71 | 72 | local q = assert(qless.new({ get_redis_client = get_redis_client }), 73 | "qless.new with get_redis_client should return positively") 74 | ngx.say(cjson.encode(q.queues:counts())) 75 | 76 | local q, err = qless.new({ get_redis_client = broken_get_redis_client }) 77 | assert(not q and err == "error connecting to redis", 78 | "qless.new should fail with bad_get_redis_client") 79 | } 80 | } 81 | --- request 82 | GET /1 83 | --- response_body 84 | {} 85 | {} 86 | --- no_error_log 87 | [error] 88 | [warn] 89 | 90 | 91 | === TEST 3: Set / get / clear config. 92 | --- http_config eval: $::HttpConfig 93 | --- config 94 | location = /1 { 95 | content_by_lua_block { 96 | local qless = require "resty.qless" 97 | local q = qless.new(redis_params) 98 | 99 | local all = q:config_get_all() 100 | 101 | -- We can get options from all 102 | ngx.say(all["heartbeat"]) 103 | ngx.say(all["grace-period"]) 104 | 105 | -- They match individual calls to get 106 | ngx.say(q:config_get("heartbeat") == all["heartbeat"]) 107 | 108 | -- We can change them 109 | q:config_set("heartbeat", 30) 110 | local heartbeat = q:config_get("heartbeat") 111 | ngx.say(heartbeat) 112 | 113 | -- We can reset them to defaults 114 | q:config_clear("heartbeat") 115 | ngx.say(q:config_get("heartbeat") == all["heartbeat"]) 116 | } 117 | } 118 | --- request 119 | GET /1 120 | --- response_body_like 121 | \d+ 122 | \d+ 123 | true 124 | 30 125 | true 126 | --- no_error_log 127 | [error] 128 | [warn] 129 | 130 | 131 | === TEST 4: Connection methods 132 | --- http_config eval: $::HttpConfig 133 | --- config 134 | location = /1 { 135 | lua_socket_log_errors Off; 136 | content_by_lua_block { 137 | -- bad connection params 138 | local params = { 139 | port = 1233, 140 | } 141 | 142 | local qless, err = require("resty.qless").new(params) 143 | assert(not qless and err == "connection refused", 144 | "connection should be refused") 145 | 146 | local function get_connection() 147 | ngx.say("using connection callback") 148 | return require("resty.redis.connector").new({ 149 | port = redis_params.port 150 | }):connect() 151 | end 152 | 153 | local function close_connection(redis) 154 | ngx.say("using close connection callback") 155 | return redis:set_keepalive() 156 | end 157 | 158 | -- get_redis_client should override bad config 159 | params.get_redis_client = get_connection 160 | params.close_redis_client = close_connection 161 | 162 | local qless, err = require("resty.qless").new(params) 163 | assert(qless and not err, 164 | "callback should be used") 165 | 166 | local ok, err = qless:set_keepalive() 167 | assert(ok and not err, 168 | "connection should be added to the keepalive pool without error") 169 | 170 | -- direct already established client should overrid callback 171 | params.redis_client = require("resty.redis.connector").new({ 172 | port = redis_params.port 173 | }):connect() 174 | params.close_redis_client = nil 175 | 176 | local qless, err = require("resty.qless").new(params) 177 | assert(qless and not err, 178 | "existing client should be used") 179 | 180 | local ok, err = qless:set_keepalive() 181 | assert(ok and not err, 182 | "connection should be added to the keepalive pool without error") 183 | 184 | 185 | -- use redis connector to create new connection 186 | params = { 187 | port = redis_params.port 188 | } 189 | 190 | local qless, err = require("resty.qless").new(params) 191 | assert(qless and not err, 192 | "a new connection should be made") 193 | 194 | local ok, err = qless:set_keepalive() 195 | assert(ok and not err, 196 | "connection should be added to the keepalive pool without error") 197 | 198 | 199 | local qless, err = require("resty.qless").new(params) 200 | assert(qless and not err, 201 | "a new connection should be made") 202 | 203 | -- manual keepalive config 204 | local ok, err = qless:set_keepalive(30000, 100) 205 | assert(ok and not err, 206 | "connection should be added to the keepalive pool without error") 207 | 208 | } 209 | } 210 | --- request 211 | GET /1 212 | --- response_body 213 | using connection callback 214 | using close connection callback 215 | --- no_error_log 216 | [error] 217 | [warn] 218 | -------------------------------------------------------------------------------- /t/02-queue.t: -------------------------------------------------------------------------------- 1 | use Test::Nginx::Socket 'no_plan'; 2 | use Cwd qw(cwd); 3 | 4 | my $pwd = cwd(); 5 | 6 | $ENV{TEST_REDIS_PORT} ||= 6379; 7 | $ENV{TEST_REDIS_DATABASE} ||= 1; 8 | 9 | our $HttpConfig = qq{ 10 | lua_package_path "$pwd/../lua-resty-redis-connector/lib/?.lua;$pwd/lib/?.lua;;"; 11 | error_log logs/error.log debug; 12 | init_by_lua_block { 13 | require("luacov.runner").init() 14 | cjson = require "cjson" 15 | redis_params = { 16 | host = "127.0.0.1", 17 | port = $ENV{TEST_REDIS_PORT}, 18 | db = $ENV{TEST_REDIS_DATABASE} 19 | } 20 | } 21 | }; 22 | 23 | no_long_string(); 24 | #no_diff(); 25 | 26 | run_tests(); 27 | 28 | __DATA__ 29 | === TEST 1: Empty queue 30 | --- http_config eval: $::HttpConfig 31 | --- config 32 | location = /1 { 33 | content_by_lua_block { 34 | local qless = require "resty.qless" 35 | local q = qless.new(redis_params) 36 | local counts = q.queues["queue_1"]:counts() 37 | ngx.say(counts["paused"]) 38 | ngx.say(counts["running"]) 39 | ngx.say(counts["name"]) 40 | ngx.say(counts["waiting"]) 41 | ngx.say(counts["recurring"]) 42 | ngx.say(counts["depends"]) 43 | ngx.say(counts["stalled"]) 44 | ngx.say(counts["scheduled"]) 45 | 46 | ngx.say(q.queues["queue_1"]:paused()) 47 | } 48 | } 49 | --- request 50 | GET /1 51 | --- response_body 52 | false 53 | 0 54 | queue_1 55 | 0 56 | 0 57 | 0 58 | 0 59 | 0 60 | false 61 | --- no_error_log 62 | [error] 63 | [warn] 64 | 65 | 66 | === TEST 2: Schedule some jobs, with and without data / options. 67 | Two will be "waiting", one "scheduled" with a delay, and one depending on the 68 | scheduled one. None will be running. 69 | --- http_config eval: $::HttpConfig 70 | --- config 71 | location = /1 { 72 | content_by_lua_block { 73 | local qless = require "resty.qless" 74 | local q = qless.new(redis_params) 75 | 76 | q.queues["queue_1"]:put("job_klass_1") 77 | q.queues["queue_1"]:put("job_klass_2", { a = 1, b = 2}) 78 | local delayed_jid = q.queues["queue_1"]:put("job_klass_3", 79 | { a = 1 }, { delay = 1}) 80 | q.queues["queue_1"]:put("job_klass_4", {}, { depends = { delayed_jid }}) 81 | 82 | local counts = q.queues["queue_1"]:counts() 83 | ngx.say(counts["paused"]) 84 | ngx.say(counts["running"]) 85 | ngx.say(counts["name"]) 86 | ngx.say(counts["waiting"]) 87 | ngx.say(counts["recurring"]) 88 | ngx.say(counts["depends"]) 89 | ngx.say(counts["stalled"]) 90 | ngx.say(counts["scheduled"]) 91 | 92 | ngx.say(q.queues["queue_1"]:paused()) 93 | } 94 | } 95 | --- request 96 | GET /1 97 | --- response_body 98 | false 99 | 0 100 | queue_1 101 | 2 102 | 0 103 | 1 104 | 0 105 | 1 106 | false 107 | --- no_error_log 108 | [error] 109 | [warn] 110 | 111 | 112 | === TEST 3: Pause and unpause the queue. 113 | --- http_config eval: $::HttpConfig 114 | --- config 115 | location = /1 { 116 | content_by_lua_block { 117 | local qless = require "resty.qless" 118 | local q = qless.new(redis_params) 119 | 120 | local queue = q.queues["queue_1"] 121 | 122 | ngx.say(queue:paused()) 123 | ngx.say(queue:counts()["paused"]) 124 | 125 | queue:pause() 126 | 127 | ngx.say(queue:counts()["paused"]) 128 | ngx.say(queue:paused()) 129 | 130 | queue:unpause() 131 | 132 | ngx.say(queue:counts()["paused"]) 133 | ngx.say(queue:paused()) 134 | } 135 | } 136 | --- request 137 | GET /1 138 | --- response_body 139 | false 140 | false 141 | true 142 | true 143 | false 144 | false 145 | --- no_error_log 146 | [error] 147 | [warn] 148 | 149 | 150 | === TEST 4: Peek at some jobs 151 | --- http_config eval: $::HttpConfig 152 | --- config 153 | location = /1 { 154 | content_by_lua_block { 155 | ngx.sleep(1) -- Wait for our delayed job to become available 156 | 157 | local qless = require "resty.qless" 158 | local q = qless.new(redis_params) 159 | 160 | local queue = q.queues["queue_1"] 161 | 162 | local job1 = queue:peek() 163 | ngx.say("single:", job1.klass) 164 | 165 | local jobs23 = queue:peek(3) 166 | for _,v in ipairs(jobs23) do 167 | ngx.say("multiple:", v.klass) 168 | end 169 | } 170 | } 171 | --- request 172 | GET /1 173 | --- response_body_like 174 | single:job_klass_\d 175 | multiple:job_klass_\d 176 | multiple:job_klass_\d 177 | multiple:job_klass_\d 178 | --- no_error_log 179 | [error] 180 | [warn] 181 | 182 | 183 | === TEST 5: Pop some jobs 184 | --- http_config eval: $::HttpConfig 185 | --- config 186 | location = /1 { 187 | content_by_lua_block { 188 | local qless = require "resty.qless" 189 | local q = qless.new(redis_params) 190 | 191 | local queue = q.queues["queue_1"] 192 | 193 | local job1 = queue:pop() 194 | 195 | local counts = queue:counts() 196 | ngx.say("running:", counts["running"]) 197 | ngx.say("waiting:", counts["waiting"]) 198 | ngx.say("scheduled:", counts["scheduled"]) 199 | 200 | local jobs23 = queue:pop(2) 201 | 202 | local counts = queue:counts() 203 | ngx.say("running:", counts["running"]) 204 | ngx.say("waiting:", counts["waiting"]) 205 | ngx.say("scheduled:", counts["scheduled"]) 206 | } 207 | } 208 | --- request 209 | GET /1 210 | --- response_body 211 | running:1 212 | waiting:2 213 | scheduled:0 214 | running:3 215 | waiting:0 216 | scheduled:0 217 | --- no_error_log 218 | [error] 219 | [warn] 220 | 221 | 222 | === TEST 6: Check the stats 223 | --- http_config eval: $::HttpConfig 224 | --- config 225 | location = /1 { 226 | content_by_lua_block { 227 | local qless = require "resty.qless" 228 | local q = qless.new(redis_params) 229 | 230 | local queue = q.queues["queue_1"] 231 | 232 | local stats = queue:stats() 233 | ngx.say(stats.wait.count) 234 | 235 | ngx.say(queue:length()) 236 | } 237 | } 238 | --- request 239 | GET /1 240 | --- response_body 241 | 3 242 | 3 243 | --- no_error_log 244 | [error] 245 | [warn] 246 | -------------------------------------------------------------------------------- /t/03-job.t: -------------------------------------------------------------------------------- 1 | use Test::Nginx::Socket 'no_plan'; 2 | use Cwd qw(cwd); 3 | 4 | my $pwd = cwd(); 5 | 6 | $ENV{TEST_REDIS_PORT} ||= 6379; 7 | $ENV{TEST_REDIS_DATABASE} ||= 1; 8 | 9 | our $HttpConfig = qq{ 10 | lua_package_path "$pwd/../lua-resty-redis-connector/lib/?.lua;$pwd/lib/?.lua;;"; 11 | error_log logs/error.log debug; 12 | init_by_lua_block { 13 | require("luacov.runner").init() 14 | cjson = require "cjson" 15 | redis_params = { 16 | host = "127.0.0.1", 17 | port = $ENV{TEST_REDIS_PORT}, 18 | db = $ENV{TEST_REDIS_DATABASE} 19 | } 20 | } 21 | }; 22 | 23 | no_long_string(); 24 | #no_diff(); 25 | 26 | run_tests(); 27 | 28 | __DATA__ 29 | === TEST 1: Simple job attributes 30 | --- http_config eval: $::HttpConfig 31 | --- config 32 | location = /1 { 33 | content_by_lua_block { 34 | local qless = require "resty.qless" 35 | local q = qless.new(redis_params) 36 | 37 | local jid = q.queues["queue_2"]:put("job_klass_1", { a = 1, b = 2}) 38 | local job = q.queues["queue_2"]:pop() 39 | 40 | ngx.say("jid_match:", jid == job.jid) 41 | ngx.say("data_a:", job.data.a) 42 | ngx.say("data_b:", job.data.b) 43 | 44 | ngx.say("tags:", type(job.tags), ":", #job.tags) 45 | ngx.say("state:", job.state) 46 | ngx.say("tracked:", job.tracked) 47 | ngx.say("failure:", type(job.failure), ":", #job.failure) 48 | ngx.say("dependencies:", type(job.dependencies), ":", #job.dependencies) 49 | ngx.say("dependents:", type(job.dependents), ":", #job.dependents) 50 | ngx.say("spawned_from_jid:", job.spawned_from_jid) 51 | 52 | ngx.say("priority:", job.priority) 53 | job.priority = 10 54 | ngx.say("priority:", job.priority) 55 | 56 | ngx.say("expires_at:", job.expires_at) 57 | ngx.say("worker_name_match:", q.worker_name == job.worker_name) 58 | ngx.say("klass:", job.klass) 59 | ngx.say("queue_name:", job.queue_name) 60 | ngx.say("original_retries:", job.retries) 61 | ngx.say("retries_left:", job.retries_left) 62 | ngx.say("raw_queue_history_1_q:", job.raw_queue_history[1].q) 63 | 64 | ngx.say("description:", job:description()) 65 | ngx.say("ttl:", math.ceil(job:ttl())) 66 | ngx.say("spawned_from:", job:spawned_from()) 67 | } 68 | } 69 | --- request 70 | GET /1 71 | --- response_body_like 72 | jid_match:true 73 | data_a:1 74 | data_b:2 75 | tags:table:0 76 | state:running 77 | tracked:false 78 | failure:table:0 79 | dependencies:table:0 80 | dependents:table:0 81 | spawned_from_jid:false 82 | priority:0 83 | priority:10 84 | expires_at:[\d\.]+ 85 | worker_name_match:true 86 | klass:job_klass_1 87 | queue_name:queue_2 88 | original_retries:nil 89 | retries_left:5 90 | raw_queue_history_1_q:queue_2 91 | description:job_klass_1 \([a-z0-9]+ / queue_2 / running\) 92 | ttl:60 93 | spawned_from:nil 94 | --- no_error_log 95 | [error] 96 | [warn] 97 | 98 | 99 | === TEST 2: Move job to a different queue 100 | --- http_config eval: $::HttpConfig 101 | --- config 102 | location = /1 { 103 | content_by_lua_block { 104 | local qless = require "resty.qless" 105 | local q = qless.new(redis_params) 106 | 107 | local jid = q.queues["queue_3"]:put("job_klass_1", 108 | { a = 1 }, 109 | { priority = 5, tags = { "hello"} } 110 | ) 111 | 112 | local job = q.queues["queue_3"]:pop() 113 | 114 | local before_triggered = false 115 | job.before_requeue = function() 116 | before_triggered = true 117 | end 118 | 119 | local after_triggered = false 120 | job.after_requeue = function() 121 | after_triggered = true 122 | end 123 | 124 | job:move("queue_4") 125 | job = q.queues["queue_4"]:pop() 126 | 127 | ngx.say("jid_match:", jid == job.jid) 128 | ngx.say("data_a:", job.data.a) 129 | ngx.say("priority:", job.priority) 130 | ngx.say("tag_1:", job.tags[1]) 131 | ngx.say("before_triggered:", before_triggered) 132 | ngx.say("after_triggered:", after_triggered) 133 | } 134 | } 135 | --- request 136 | GET /1 137 | --- response_body 138 | jid_match:true 139 | data_a:1 140 | priority:5 141 | tag_1:hello 142 | before_triggered:true 143 | after_triggered:true 144 | --- no_error_log 145 | [error] 146 | [warn] 147 | 148 | 149 | === TEST 3: Fail a job 150 | --- http_config eval: $::HttpConfig 151 | --- config 152 | location = /1 { 153 | content_by_lua_block { 154 | local qless = require "resty.qless" 155 | local q = qless.new(redis_params) 156 | 157 | local queue = q.queues["queue_4"] 158 | 159 | local failed = q.jobs:failed() 160 | ngx.say("failed:", failed["failed-jobs"]) 161 | 162 | local jid = queue:put("job_klass_1") 163 | local job = queue:pop() 164 | 165 | local before_triggered = false 166 | job.before_fail = function() 167 | before_triggered = true 168 | end 169 | 170 | local after_triggered = false 171 | job.after_fail = function() 172 | after_triggered = true 173 | end 174 | 175 | job:fail("failed-jobs", "testing") 176 | 177 | local failed = q.jobs:failed() 178 | ngx.say("failed:", failed["failed-jobs"]) 179 | 180 | ngx.say("before_triggered:", before_triggered) 181 | ngx.say("after_triggered:", after_triggered) 182 | } 183 | } 184 | --- request 185 | GET /1 186 | --- response_body 187 | failed:nil 188 | failed:1 189 | before_triggered:true 190 | after_triggered:true 191 | --- no_error_log 192 | [error] 193 | [warn] 194 | 195 | 196 | === TEST 4: Heartbeat 197 | --- http_config eval: $::HttpConfig 198 | --- config 199 | location = /1 { 200 | content_by_lua_block { 201 | local qless = require "resty.qless" 202 | local q = qless.new(redis_params) 203 | 204 | local queue = q.queues["queue_5"] 205 | local jid = queue:put("job_klass_1") 206 | 207 | 208 | local job = queue:pop() 209 | ngx.say("ttl:", math.ceil(job:ttl())) 210 | 211 | ngx.sleep(1) 212 | local expires = job:heartbeat() 213 | 214 | ngx.say("ttl:", math.ceil(job:ttl())) 215 | } 216 | } 217 | --- request 218 | GET /1 219 | --- response_body 220 | ttl:60 221 | ttl:60 222 | --- no_error_log 223 | [error] 224 | [warn] 225 | 226 | 227 | === TEST 5: Complete, complete-and-move, then cancel a job 228 | --- http_config eval: $::HttpConfig 229 | --- config 230 | location = /1 { 231 | content_by_lua_block { 232 | local qless = require "resty.qless" 233 | local q = qless.new(redis_params) 234 | 235 | local queue = q.queues["queue_6"] 236 | local jid = queue:put("job_klass_1") 237 | 238 | local counts = queue:counts() 239 | ngx.say("waiting:", counts.waiting) 240 | ngx.say("running:", counts.running) 241 | 242 | local job = queue:pop() 243 | 244 | local before_complete_triggered = false 245 | job.before_complete = function() 246 | before_complete_triggered = true 247 | end 248 | 249 | local after_complete_triggered = false 250 | job.after_complete = function() 251 | after_complete_triggered = true 252 | end 253 | 254 | local counts = queue:counts() 255 | ngx.say("waiting:", counts.waiting) 256 | ngx.say("running:", counts.running) 257 | 258 | job:complete() 259 | 260 | local counts = queue:counts() 261 | ngx.say("waiting:", counts.waiting) 262 | ngx.say("running:", counts.running) 263 | 264 | ngx.say("before_complete_triggered:", before_complete_triggered) 265 | ngx.say("after_complete_triggered:", after_complete_triggered) 266 | 267 | -- Now do it again, but move completed job 268 | -- to the next queue, and include some options (delay). 269 | local jid = queue:put("job_klass_2") 270 | 271 | local queue2 = q.queues["queue_7"] 272 | local counts2 = queue2:counts() 273 | ngx.say("waiting:", counts2.waiting) 274 | ngx.say("scheduled:", counts2.scheduled) 275 | ngx.say("running:", counts2.running) 276 | 277 | local job = queue:pop() 278 | job:complete("queue_7", { delay = 1 }) 279 | 280 | local counts2 = queue2:counts() 281 | ngx.say("waiting:", counts2.waiting) 282 | ngx.say("scheduled:", counts2.scheduled) 283 | ngx.say("running:", counts2.running) 284 | 285 | ngx.sleep(1) 286 | 287 | local counts2 = queue2:counts() 288 | ngx.say("waiting:", counts2.waiting) 289 | ngx.say("scheduled:", counts2.scheduled) 290 | ngx.say("running:", counts2.running) 291 | 292 | local job = queue2:pop() 293 | 294 | local before_cancel_triggered = false 295 | job.before_cancel = function() 296 | before_cancel_triggered = true 297 | end 298 | 299 | local after_cancel_triggered = false 300 | job.after_cancel = function() 301 | after_cancel_triggered = true 302 | end 303 | 304 | job:cancel() 305 | 306 | local counts2 = queue2:counts() 307 | ngx.say("waiting:", counts2.waiting) 308 | ngx.say("scheduled:", counts2.scheduled) 309 | ngx.say("running:", counts2.running) 310 | 311 | ngx.say("before_cancel_triggered:", before_cancel_triggered) 312 | ngx.say("after_cancel_triggered:", after_cancel_triggered) 313 | } 314 | } 315 | --- request 316 | GET /1 317 | --- response_body 318 | waiting:1 319 | running:0 320 | waiting:0 321 | running:1 322 | waiting:0 323 | running:0 324 | before_complete_triggered:true 325 | after_complete_triggered:true 326 | waiting:0 327 | scheduled:0 328 | running:0 329 | waiting:0 330 | scheduled:1 331 | running:0 332 | waiting:1 333 | scheduled:0 334 | running:0 335 | waiting:0 336 | scheduled:0 337 | running:0 338 | before_cancel_triggered:true 339 | after_cancel_triggered:true 340 | --- no_error_log 341 | [error] 342 | [warn] 343 | 344 | 345 | === TEST 6: Track and untrack jobs 346 | --- http_config eval: $::HttpConfig 347 | --- config 348 | location = /1 { 349 | content_by_lua_block { 350 | local qless = require "resty.qless" 351 | local q = qless.new(redis_params) 352 | 353 | local queue = q.queues["queue_8"] 354 | local jid = queue:put("job_klass_1") 355 | 356 | local tracked = q.jobs:tracked() 357 | ngx.say("expired:", table.getn(tracked.expired)) 358 | ngx.say("jobs:", table.getn(tracked.jobs)) 359 | 360 | local job = queue:pop() 361 | job:track() 362 | 363 | local tracked = q.jobs:tracked() 364 | ngx.say("expired:", table.getn(tracked.expired)) 365 | ngx.say("jobs:", table.getn(tracked.jobs)) 366 | 367 | ngx.say("jid_match:", tracked.jobs[1].jid == jid) 368 | 369 | job:untrack() 370 | 371 | local tracked = q.jobs:tracked() 372 | ngx.say("expired:", table.getn(tracked.expired)) 373 | ngx.say("jobs:", table.getn(tracked.jobs)) 374 | } 375 | } 376 | --- request 377 | GET /1 378 | --- response_body 379 | expired:0 380 | jobs:0 381 | expired:0 382 | jobs:1 383 | jid_match:true 384 | expired:0 385 | jobs:0 386 | --- no_error_log 387 | [error] 388 | [warn] 389 | 390 | 391 | === TEST 7: Tag and untag jobs 392 | --- http_config eval: $::HttpConfig 393 | --- config 394 | location = /1 { 395 | content_by_lua_block { 396 | local qless = require "resty.qless" 397 | local q = qless.new(redis_params) 398 | 399 | local queue = q.queues["queue_9"] 400 | local jid = queue:put("job_klass_1") 401 | 402 | local tagged = q.jobs:tagged("testtag") 403 | ngx.say("total:", tagged.total) 404 | 405 | local job = queue:pop() 406 | job:tag("testtag", "testtag2") 407 | 408 | local tagged = q.jobs:tagged("testtag") 409 | ngx.say("total:", tagged.total) 410 | 411 | local tagged = q.jobs:tagged("testtag2") 412 | ngx.say("total:", tagged.total) 413 | 414 | job:untag("testtag2") 415 | 416 | local tagged = q.jobs:tagged("testtag2") 417 | ngx.say("total:", tagged.total) 418 | 419 | job:untag("testtag") 420 | local tagged = q.jobs:tagged("testtag") 421 | ngx.say("total:", tagged.total) 422 | 423 | -- Add tags during put 424 | 425 | local jid = queue:put("job_klass_2", {}, 426 | { tags = { "testtag3", "testtag4" }}) 427 | 428 | local tagged = q.jobs:tagged("testtag3") 429 | ngx.say("total:", tagged.total) 430 | 431 | 432 | -- Test offset and count 433 | 434 | local jid = queue:put("job_klass_2", {}, { tags = { "testtag5" }}) 435 | local jid = queue:put("job_klass_2", {}, { tags = { "testtag5" }}) 436 | local jid = queue:put("job_klass_2", {}, { tags = { "testtag5" }}) 437 | local jid = queue:put("job_klass_2", {}, { tags = { "testtag5" }}) 438 | 439 | local tagged = q.jobs:tagged("testtag5", 0, 2) 440 | ngx.say("total:", table.getn(tagged.jobs)) 441 | 442 | local tagged = q.jobs:tagged("testtag5", 3, 2) 443 | ngx.say("total:", table.getn(tagged.jobs)) 444 | } 445 | } 446 | --- request 447 | GET /1 448 | --- response_body 449 | total:0 450 | total:1 451 | total:1 452 | total:0 453 | total:0 454 | total:1 455 | total:2 456 | total:1 457 | --- no_error_log 458 | [error] 459 | [warn] 460 | 461 | 462 | === TEST 8: Depend and undepend jobs 463 | --- http_config eval: $::HttpConfig 464 | --- config 465 | location = /1 { 466 | content_by_lua_block { 467 | local qless = require "resty.qless" 468 | local q = qless.new(redis_params) 469 | 470 | local queue = q.queues["queue_10"] 471 | local jid1 = queue:put("job_klass_1") 472 | 473 | local jid2 = queue:put("job_klass_2", {}, { depends = { jid1 }}) 474 | 475 | local job1, job2 = unpack(q.jobs:multiget(jid1, jid2)) 476 | 477 | ngx.say("job2_depends_job1:", job2.dependencies[1] == jid1) 478 | ngx.say("job1_dependent_of_job2:", job1.dependents[1] == jid2) 479 | 480 | -- Add dependencies post creation 481 | 482 | local jid3 = queue:put("job_klass_3") 483 | 484 | -- You cant add dependencies to a job not in the "depends" state 485 | -- (i.e. already depending on something). Bit odd bit thems the rules. 486 | 487 | job2:depend(jid3) 488 | job2:undepend(jid1) 489 | local job2 = q.jobs:get(jid2) 490 | 491 | ngx.say("job2_depends_job3:", (job2.dependencies[1] == jid3)) 492 | ngx.say("job2_depends_count:", table.getn(job2.dependencies)) 493 | 494 | } 495 | } 496 | --- request 497 | GET /1 498 | --- response_body 499 | job2_depends_job1:true 500 | job1_dependent_of_job2:true 501 | job2_depends_job3:true 502 | job2_depends_count:1 503 | --- no_error_log 504 | [error] 505 | [warn] 506 | 507 | 508 | === TEST 9: Log to the job history 509 | --- http_config eval: $::HttpConfig 510 | --- config 511 | location = /1 { 512 | content_by_lua_block { 513 | local qless = require "resty.qless" 514 | local q = qless.new(redis_params) 515 | 516 | local queue = q.queues["queue_11"] 517 | local jid1 = queue:put("job_klass_1") 518 | 519 | local job = q.jobs:get(jid1) 520 | 521 | ngx.say("1_what:", job.raw_queue_history[1].what) 522 | 523 | job:log("captainslog") 524 | local job = q.jobs:get(jid1) 525 | 526 | ngx.say("2_what:", job.raw_queue_history[2].what) 527 | 528 | } 529 | } 530 | --- request 531 | GET /1 532 | --- response_body 533 | 1_what:put 534 | 2_what:captainslog 535 | --- no_error_log 536 | [error] 537 | [warn] 538 | -------------------------------------------------------------------------------- /t/04-recurring_job.t: -------------------------------------------------------------------------------- 1 | use Test::Nginx::Socket 'no_plan'; 2 | use Cwd qw(cwd); 3 | 4 | my $pwd = cwd(); 5 | 6 | $ENV{TEST_REDIS_PORT} ||= 6379; 7 | $ENV{TEST_REDIS_DATABASE} ||= 1; 8 | 9 | our $HttpConfig = qq{ 10 | lua_package_path "$pwd/../lua-resty-redis-connector/lib/?.lua;$pwd/lib/?.lua;;"; 11 | error_log logs/error.log debug; 12 | init_by_lua_block { 13 | require("luacov.runner").init() 14 | cjson = require "cjson" 15 | redis_params = { 16 | host = "127.0.0.1", 17 | port = $ENV{TEST_REDIS_PORT}, 18 | db = $ENV{TEST_REDIS_DATABASE} 19 | } 20 | } 21 | }; 22 | 23 | no_long_string(); 24 | #no_diff(); 25 | 26 | run_tests(); 27 | 28 | __DATA__ 29 | === TEST 1: Enqueue a recurring job and check attributes 30 | --- http_config eval: $::HttpConfig 31 | --- config 32 | location = /1 { 33 | content_by_lua_block { 34 | local qless = require "resty.qless" 35 | local q = qless.new(redis_params) 36 | 37 | local jid = q.queues["queue_12"]:recur("job_klass_1", 38 | { a = 1, b = 2 }, 39 | 10, 40 | { 41 | priority = 2, 42 | tags = { 43 | "recurringtag1", 44 | }, 45 | } 46 | ) 47 | local job = q.jobs:get(jid) 48 | 49 | ngx.say("jid_match:", jid == job.jid) 50 | ngx.say("klass_name:", job.klass_name) 51 | ngx.say("data_a:", job.data.a) 52 | ngx.say("data_b:", job.data.b) 53 | ngx.say("interval:", job.interval) 54 | ngx.say("priority:", job.priority) 55 | 56 | -- Move 57 | local counts = q.queues["queue_13"]:counts() 58 | ngx.say("queue_13_count:", counts.recurring) 59 | 60 | job:move("queue_13") 61 | 62 | local counts = q.queues["queue_13"]:counts() 63 | ngx.say("queue_13_count:", counts.recurring) 64 | 65 | -- Tag 66 | ngx.say("tag1:", job.tags[1]) 67 | 68 | job:tag("recurringtag2") 69 | local job = q.jobs:get(jid) 70 | 71 | ngx.say("tag2:", job.tags[2]) 72 | 73 | job:cancel() 74 | 75 | local job = q.jobs:get(jid) 76 | ngx.say("job_cancelled:", job == nil) 77 | } 78 | } 79 | --- request 80 | GET /1 81 | --- response_body 82 | jid_match:true 83 | klass_name:job_klass_1 84 | data_a:1 85 | data_b:2 86 | interval:10 87 | priority:2 88 | queue_13_count:0 89 | queue_13_count:1 90 | tag1:recurringtag1 91 | tag2:recurringtag2 92 | job_cancelled:true 93 | 94 | --- no_error_log 95 | [error] 96 | [warn] 97 | -------------------------------------------------------------------------------- /t/05-worker.t: -------------------------------------------------------------------------------- 1 | use Test::Nginx::Socket 'no_plan'; 2 | use Cwd qw(cwd); 3 | 4 | my $pwd = cwd(); 5 | 6 | $ENV{TEST_REDIS_PORT} ||= 6379; 7 | $ENV{TEST_REDIS_DATABASE} ||= 1; 8 | 9 | our $HttpConfig = qq{ 10 | lua_package_path "$pwd/../lua-resty-redis-connector/lib/?.lua;$pwd/lib/?.lua;;"; 11 | error_log logs/error.log debug; 12 | init_by_lua_block { 13 | require("luacov.runner").init() 14 | cjson = require "cjson" 15 | redis_params = { 16 | host = "127.0.0.1", 17 | port = $ENV{TEST_REDIS_PORT}, 18 | db = $ENV{TEST_REDIS_DATABASE} 19 | } 20 | 21 | -- Test task module, just sums numbers and logs the result. 22 | local sum = {} 23 | 24 | function sum.perform(job) 25 | local data = job.data 26 | 27 | if data.cancel then 28 | job:cancel() 29 | return 30 | end 31 | 32 | if not data or not data.numbers or #data.numbers == 0 then 33 | return nil, "job-error", "no data provided" 34 | end 35 | 36 | local sum = 0 37 | for _,v in ipairs(data.numbers) do 38 | sum = sum + v 39 | end 40 | 41 | ngx.log(ngx.NOTICE, "Sum: ", sum) 42 | 43 | if data.autocomplete then 44 | job:complete() 45 | end 46 | end 47 | 48 | package.loaded["testtasks.sum"] = sum 49 | } 50 | 51 | 52 | init_worker_by_lua_block { 53 | local Qless_Worker = require "resty.qless.worker" 54 | 55 | local worker = Qless_Worker.new(redis_params) 56 | 57 | worker:start({ 58 | interval = 1, 59 | concurrency = 4, 60 | reserver = "ordered", 61 | queues = { "queue_14" }, 62 | }) 63 | 64 | 65 | local worker_mw = Qless_Worker.new(redis_params) 66 | 67 | worker_mw.middleware = function() 68 | ngx.log(ngx.NOTICE, "Middleware start") 69 | coroutine.yield() 70 | ngx.log(ngx.NOTICE, "Middleware stop") 71 | end 72 | 73 | worker_mw:start({ 74 | queues = { "queue_15" }, 75 | }) 76 | } 77 | }; 78 | 79 | no_long_string(); 80 | #no_diff(); 81 | 82 | run_tests(); 83 | 84 | __DATA__ 85 | === TEST 1: Test a job runs and gets completed. 86 | --- http_config eval: $::HttpConfig 87 | --- config 88 | location = /1 { 89 | content_by_lua_block { 90 | local qless = require "resty.qless" 91 | local q = qless.new(redis_params) 92 | 93 | local jid = q.queues["queue_14"]:put("testtasks.sum", { numbers = { 1, 2, 3, 4 } }) 94 | ngx.sleep(1) 95 | 96 | local job = q.jobs:get(jid) 97 | ngx.say(job.state) 98 | } 99 | } 100 | --- request 101 | GET /1 102 | --- response_body 103 | complete 104 | --- error_log eval 105 | [qr/Sum: 10/] 106 | 107 | 108 | === TEST 2: Test middleware runs before and after job 109 | --- http_config eval: $::HttpConfig 110 | --- config 111 | location = /1 { 112 | content_by_lua_block { 113 | local qless = require "resty.qless" 114 | local q = qless.new(redis_params) 115 | 116 | local jid = q.queues["queue_15"]:put("testtasks.sum", { numbers = { 1, 2, 3, 4 } }) 117 | ngx.sleep(1) 118 | 119 | local job = q.jobs:get(jid) 120 | ngx.say(job.state) 121 | } 122 | } 123 | --- request 124 | GET /1 125 | --- response_body 126 | complete 127 | --- error_log eval 128 | [qr/Sum: 10/, 129 | qr/Middleware stop/, 130 | qr/Middleware start/] 131 | 132 | 133 | === TEST 3: Test a job can cancel itself 134 | --- http_config eval: $::HttpConfig 135 | --- config 136 | location = /1 { 137 | content_by_lua_block { 138 | local qless = require "resty.qless" 139 | local q = qless.new(redis_params) 140 | 141 | local jid = q.queues["queue_14"]:put("testtasks.sum", { 142 | cancel = true 143 | }) 144 | ngx.sleep(1) 145 | 146 | local job = q.jobs:get(jid) 147 | if job then 148 | ngx.say(job.state) 149 | else 150 | ngx.say("canceled") 151 | end 152 | } 153 | } 154 | --- request 155 | GET /1 156 | --- response_body 157 | canceled 158 | --- no_error_log 159 | [error] 160 | 161 | 162 | === TEST 3b: Test a job is failed and logs the error if data is bad 163 | --- http_config eval: $::HttpConfig 164 | --- config 165 | location = /1 { 166 | content_by_lua_block { 167 | local qless = require "resty.qless" 168 | local q = qless.new(redis_params) 169 | 170 | local jid = q.queues["queue_14"]:put("testtasks.sum") 171 | ngx.sleep(1) 172 | 173 | local job = q.jobs:get(jid) 174 | if job then 175 | ngx.say(job.state) 176 | else 177 | ngx.say("canceled") 178 | end 179 | } 180 | } 181 | --- request 182 | GET /1 183 | --- response_body 184 | failed 185 | --- error_log eval 186 | [qr/Got job-error failure from testtasks\.sum \([a-f0-9]{32} \/ queue_14 \/ running\)/] 187 | 188 | 189 | === TEST 4: Test a job can complete itself without tripping up the worker 190 | --- http_config eval: $::HttpConfig 191 | --- config 192 | location = /1 { 193 | content_by_lua_block { 194 | local qless = require "resty.qless" 195 | local q = qless.new(redis_params) 196 | 197 | local jid = q.queues["queue_14"]:put("testtasks.sum", { 198 | numbers = { 1, 2, 3, 4}, 199 | autocomplete = true 200 | }) 201 | ngx.sleep(1) 202 | 203 | local job = q.jobs:get(jid) 204 | if job then 205 | ngx.say(job.state) 206 | end 207 | } 208 | } 209 | --- request 210 | GET /1 211 | --- response_body 212 | complete 213 | --- no_error_log 214 | [error] 215 | -------------------------------------------------------------------------------- /t/06-reserver.t: -------------------------------------------------------------------------------- 1 | use Test::Nginx::Socket 'no_plan'; 2 | use Cwd qw(cwd); 3 | 4 | my $pwd = cwd(); 5 | 6 | $ENV{TEST_REDIS_PORT} ||= 6379; 7 | $ENV{TEST_REDIS_DATABASE} ||= 1; 8 | 9 | our $HttpConfig = qq{ 10 | lua_package_path "$pwd/../lua-resty-redis-connector/lib/?.lua;$pwd/lib/?.lua;;"; 11 | error_log logs/error.log debug; 12 | init_by_lua_block { 13 | require("luacov.runner").init() 14 | cjson = require "cjson" 15 | redis_params = { 16 | host = "127.0.0.1", 17 | port = $ENV{TEST_REDIS_PORT}, 18 | db = $ENV{TEST_REDIS_DATABASE} 19 | } 20 | } 21 | }; 22 | 23 | no_long_string(); 24 | #no_diff(); 25 | 26 | run_tests(); 27 | 28 | __DATA__ 29 | === TEST 1: Test jobs are reserved in queue order 30 | --- http_config eval: $::HttpConfig 31 | --- config 32 | location = /1 { 33 | content_by_lua_block { 34 | local qless = require "resty.qless" 35 | local q = qless.new(redis_params) 36 | 37 | local jid1 = q.queues["queue_16"]:put("testtask", { 1 }, { priority = 2 }) 38 | local jid2 = q.queues["queue_16"]:put("testtask", { 1 }, { priotity = 1 }) 39 | local jid3 = q.queues["queue_15"]:put("testtask", { 1 }) 40 | 41 | local ordered = require "resty.qless.reserver.ordered" 42 | local reserver = ordered.new({ q.queues["queue_15"], q.queues["queue_16"] }) 43 | 44 | ngx.say("jid3_match:", reserver:reserve().jid == jid3) 45 | ngx.say("jid1_match:", reserver:reserve().jid == jid1) 46 | ngx.say("jid2_match:", reserver:reserve().jid == jid2) 47 | } 48 | } 49 | --- request 50 | GET /1 51 | --- response_body 52 | jid3_match:true 53 | jid1_match:true 54 | jid2_match:true 55 | --- no_error_log 56 | [error] 57 | [warn] 58 | 59 | 60 | === TEST 2: Test jobs are reserved in round robin order 61 | --- http_config eval: $::HttpConfig 62 | --- config 63 | location = /1 { 64 | content_by_lua_block { 65 | local qless = require "resty.qless" 66 | local q = qless.new(redis_params) 67 | 68 | local jid1 = q.queues["queue_17"]:put("testtask", { 1 }, { priority = 2 }) 69 | local jid2 = q.queues["queue_17"]:put("testtask", { 1 }, { priority = 1 }) 70 | local jid3 = q.queues["queue_18"]:put("testtask", { 1 }) 71 | 72 | local ordered = require "resty.qless.reserver.round_robin" 73 | local reserver = ordered.new({ q.queues["queue_17"], q.queues["queue_18"] }) 74 | 75 | ngx.say("jid1_match:", reserver:reserve().jid == jid1) 76 | ngx.say("jid3_match:", reserver:reserve().jid == jid3) 77 | ngx.say("jid2_match:", reserver:reserve().jid == jid2) 78 | } 79 | } 80 | --- request 81 | GET /1 82 | --- response_body 83 | jid1_match:true 84 | jid3_match:true 85 | jid2_match:true 86 | --- no_error_log 87 | [error] 88 | [warn] 89 | 90 | 91 | === TEST 3: Test jobs are reserved in shuffled round robin order. 92 | Cant test for randomness, but we test that the jobs turn up without errors. 93 | --- http_config eval: $::HttpConfig 94 | --- config 95 | location = /1 { 96 | content_by_lua_block { 97 | local qless = require "resty.qless" 98 | local q = qless.new(redis_params) 99 | 100 | local jid1 = q.queues["queue_17"]:put("testtask", { 1 }, { priority = 2 }) 101 | local jid2 = q.queues["queue_17"]:put("testtask", { 1 }, { priority = 1 }) 102 | local jid3 = q.queues["queue_18"]:put("testtask", { 1 }) 103 | 104 | local shuffled = require "resty.qless.reserver.shuffled_round_robin" 105 | local reserver = shuffled.new({ q.queues["queue_17"], q.queues["queue_18"] }) 106 | 107 | ngx.log(ngx.INFO, reserver:reserve().queue_name) 108 | ngx.log(ngx.INFO, reserver:reserve().queue_name) 109 | ngx.log(ngx.INFO, reserver:reserve().queue_name) 110 | } 111 | } 112 | --- request 113 | GET /1 114 | --- response_body 115 | --- no_error_log 116 | [error] 117 | [warn] 118 | --- error_log eval 119 | ["queue_17","queue_17","queue_18"] 120 | -------------------------------------------------------------------------------- /t/07-events.t: -------------------------------------------------------------------------------- 1 | use Test::Nginx::Socket 'no_plan'; 2 | use Cwd qw(cwd); 3 | 4 | my $pwd = cwd(); 5 | 6 | $ENV{TEST_REDIS_PORT} ||= 6379; 7 | $ENV{TEST_REDIS_DATABASE} ||= 1; 8 | 9 | our $HttpConfig = qq{ 10 | lua_package_path "$pwd/../lua-resty-redis-connector/lib/?.lua;$pwd/lib/?.lua;;"; 11 | error_log logs/error.log debug; 12 | init_by_lua_block { 13 | cjson = require "cjson" 14 | redis_params = { 15 | host = "127.0.0.1", 16 | port = $ENV{TEST_REDIS_PORT}, 17 | db = $ENV{TEST_REDIS_DATABASE}, 18 | } 19 | } 20 | 21 | init_worker_by_lua_block { 22 | local subscribe = function(premature) 23 | if not premature then 24 | require("luacov.runner").init() 25 | 26 | local qless = require "resty.qless" 27 | local events = qless.events(redis_params) 28 | 29 | events:listen({ "log", "canceled" }, function(channel, message) 30 | if channel == "log" then 31 | message = cjson.decode(message) 32 | ngx.log(ngx.DEBUG, channel, " ", message.event) 33 | else 34 | ngx.log(ngx.DEBUG, channel, " ", message) 35 | end 36 | end) 37 | 38 | local ok, err = ngx.timer.at(0, subscribe) 39 | if not ok then ngx.log(ngx.ERR, err) end 40 | end 41 | end 42 | 43 | local ok, err = ngx.timer.at(0, subscribe) 44 | if not ok then ngx.log(ngx.ERR, err) end 45 | } 46 | }; 47 | 48 | no_long_string(); 49 | #no_diff(); 50 | 51 | run_tests(); 52 | 53 | __DATA__ 54 | === TEST 1: Listen for events 55 | --- http_config eval: $::HttpConfig 56 | --- config 57 | location = /1 { 58 | content_by_lua_block { 59 | local qless = require "resty.qless" 60 | local q = qless.new(redis_params) 61 | 62 | local jid = q.queues["queue_19"]:put("testjob") 63 | 64 | q.jobs:get(jid):track() 65 | q.jobs:get(jid):cancel() 66 | } 67 | } 68 | --- request 69 | GET /1 70 | --- response_body 71 | --- no_error_log 72 | [warn] 73 | [error] 74 | --- error_log eval 75 | ["log put", 76 | "log canceled", 77 | "canceled"] 78 | -------------------------------------------------------------------------------- /util/lua-releng: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | 3 | use strict; 4 | use warnings; 5 | 6 | sub file_contains ($$); 7 | 8 | my $version; 9 | for my $file (map glob, qw{ lib/*/*.lua lib/*/*/*.lua lib/*/*/*/*.lua lib/*/*/*/*/*.lua }) { 10 | # Check the sanity of each .lua file 11 | open my $in, $file or 12 | die "ERROR: Can't open $file for reading: $!\n"; 13 | my $found_ver; 14 | while (<$in>) { 15 | my ($ver, $skipping); 16 | if (/(?x) (?:_VERSION) \s* = .*? ([\d\.]*\d+) (.*? SKIP)?/) { 17 | my $orig_ver = $ver = $1; 18 | $found_ver = 1; 19 | # $skipping = $2; 20 | $ver =~ s{^(\d+)\.(\d{3})(\d{3})$}{join '.', int($1), int($2), int($3)}e; 21 | warn "$file: $orig_ver ($ver)\n"; 22 | 23 | } elsif (/(?x) (?:_VERSION) \s* = \s* ([a-zA-Z_]\S*)/) { 24 | warn "$file: $1\n"; 25 | $found_ver = 1; 26 | last; 27 | } 28 | 29 | if ($ver and $version and !$skipping) { 30 | if ($version ne $ver) { 31 | # die "$file: $ver != $version\n"; 32 | } 33 | } elsif ($ver and !$version) { 34 | $version = $ver; 35 | } 36 | } 37 | if (!$found_ver) { 38 | warn "WARNING: No \"_VERSION\" or \"version\" field found in `$file`.\n"; 39 | } 40 | close $in; 41 | 42 | #print "Checking use of Lua global variables in file $file ...\n"; 43 | system("luac -p -l $file | grep ETGLOBAL | grep -vE '(require|type|tostring|error|ngx|ndk|jit|setmetatable|getmetatable|string|table|io|os|print|tonumber|math|pcall|xpcall|unpack|pairs|ipairs|assert|module|package|coroutine|[gs]etfenv|next|select|rawset|rawget|debug)\$'"); 44 | #file_contains($file, "attempt to write to undeclared variable"); 45 | system("grep -H -n -E --color '.{120}' $file"); 46 | } 47 | 48 | sub file_contains ($$) { 49 | my ($file, $regex) = @_; 50 | open my $in, $file 51 | or die "Cannot open $file fo reading: $!\n"; 52 | my $content = do { local $/; <$in> }; 53 | close $in; 54 | #print "$content"; 55 | return scalar ($content =~ /$regex/); 56 | } 57 | 58 | if (-d 't') { 59 | for my $file (map glob, qw{ t/*.t t/*/*.t t/*/*/*.t }) { 60 | system(qq{grep -H -n --color -E '\\--- ?(ONLY|LAST)' $file}); 61 | } 62 | } 63 | 64 | --------------------------------------------------------------------------------