`.
24 |
25 | "Error importing XXX" when starting consumer
26 | This error message occurs when the module containing the configuration
27 | specified cannot be loaded (not on the pythonpath, mistyped, etc). One
28 | quick way to check is to open up a python shell and try to import the
29 | configuration.
30 |
31 | Example syntax: ``huey_consumer.py main_module.huey``
32 |
33 | Tasks not returning results
34 | Ensure that you have not accidentally specified ``results=False`` when
35 | instantiating your :py:class:`Huey` object.
36 |
37 | Additionally note that, by default, Huey does not store ``None`` in the
38 | result-store. So if your task returns ``None``, Huey will discard the
39 | result. If you need to block or detect whether a task has finished, it is
40 | recommended that you return a non-``None`` value or in extreme
41 | circumstances you can initialize Huey with ``store_none=True`` (though this
42 | can quickly fill up your result store and is only recommended for users who
43 | are very familiar with Huey).
44 |
45 | Scheduled tasks are not being run at the correct time
46 | Check the time on the server the consumer is running on - if different from
47 | the producer this may cause problems. Huey uses UTC internally by default,
48 | and naive datetimes will be converted from local time to UTC (if local time
49 | happens to not be UTC).
50 |
51 | Cronjobs are not being run
52 | The consumer and scheduler run in UTC by default.
53 |
54 | Greenlet workers seem stuck
55 | If you wish to use the Greenlet worker type, you need to be sure to
56 | monkeypatch in your application's entrypoint. At the top of your ``main``
57 | module, you can add the following code: ``from gevent import monkey; monkey.patch_all()``.
58 | Furthermore, if your tasks are CPU-bound, ``gevent`` can appear to lock up
59 | because it only supports cooperative multi-tasking (as opposed to
60 | pre-emptive multi-tasking when using threads). For Django, it is necessary
61 | to apply the patch inside the ``manage.py`` script. See the Django docs
62 | section for the code.
63 |
64 | Testing projects using Huey
65 | Use ``immediate=True``:
66 |
67 | .. code-block:: python
68 |
69 | test_mode = os.environ.get('TEST_MODE')
70 |
71 | # When immediate=True, Huey will default to using an in-memory
72 | # storage layer.
73 | huey = RedisHuey(immediate=test_mode)
74 |
75 | # Alternatively, you can set the `immediate` attribute:
76 | huey.immediate = True if test_mode else False
77 |
--------------------------------------------------------------------------------
/examples/django_ex/README:
--------------------------------------------------------------------------------
1 | In one terminal, run:
2 |
3 | ./manage.py run_huey
4 |
5 | In another terminal:
6 |
7 | ./manage.py shell
8 |
9 | Commands to try out:
10 |
11 | from djangoex.test_app.tasks import *
12 | res = add(1, 2)
13 | print(res.get(blocking=True)) # Wait for result, then print.
14 |
--------------------------------------------------------------------------------
/examples/django_ex/djangoex/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coleifer/huey/302fb8ed8c4894ee7b419f91a22ec53b236fe50c/examples/django_ex/djangoex/__init__.py
--------------------------------------------------------------------------------
/examples/django_ex/djangoex/settings.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | INSTALLED_APPS = [
4 | 'huey.contrib.djhuey',
5 | 'djangoex.test_app',
6 | ]
7 |
8 | HUEY = {
9 | 'name': 'test-django',
10 | 'consumer': {
11 | 'blocking': True, # Use blocking list pop instead of polling Redis.
12 | 'loglevel': logging.DEBUG,
13 | 'workers': 4,
14 | 'scheduler_interval': 1,
15 | 'simple_log': True,
16 | },
17 | }
18 |
19 | DATABASES = {'default': {
20 | 'NAME': ':memory:',
21 | 'ENGINE': 'django.db.backends.sqlite3'}}
22 |
23 | SECRET_KEY = 'foo'
24 |
--------------------------------------------------------------------------------
/examples/django_ex/djangoex/test_app/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coleifer/huey/302fb8ed8c4894ee7b419f91a22ec53b236fe50c/examples/django_ex/djangoex/test_app/__init__.py
--------------------------------------------------------------------------------
/examples/django_ex/djangoex/test_app/models.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coleifer/huey/302fb8ed8c4894ee7b419f91a22ec53b236fe50c/examples/django_ex/djangoex/test_app/models.py
--------------------------------------------------------------------------------
/examples/django_ex/djangoex/test_app/tasks.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from huey import crontab
4 | from huey.contrib.djhuey import task, periodic_task, db_task, on_commit_task
5 |
6 |
7 | def tprint(s, c=32):
8 | # Helper to print messages from within tasks using color, to make them
9 | # stand out in examples.
10 | print('\x1b[1;%sm%s\x1b[0m' % (c, s))
11 |
12 |
13 | # Tasks used in examples.
14 |
15 | @task()
16 | def add(a, b):
17 | return a + b
18 |
19 |
20 | @task()
21 | def mul(a, b):
22 | return a * b
23 |
24 |
25 | @db_task() # Opens DB connection for duration of task.
26 | def slow(n):
27 | tprint('going to sleep for %s seconds' % n)
28 | time.sleep(n)
29 | tprint('finished sleeping for %s seconds' % n)
30 | return n
31 |
32 |
33 | @task(retries=1, retry_delay=5, context=True)
34 | def flaky_task(task=None):
35 | if task is not None and task.retries == 0:
36 | tprint('flaky task succeeded on retry.')
37 | return 'succeeded on retry.'
38 | tprint('flaky task is about to raise an exception.', 31)
39 | raise Exception('flaky task failed!')
40 |
41 |
42 | # Periodic tasks.
43 |
44 | @periodic_task(crontab(minute='*/2'))
45 | def every_other_minute():
46 | tprint('This task runs every 2 minutes.', 35)
47 |
48 |
49 | @periodic_task(crontab(minute='*/5'))
50 | def every_five_mins():
51 | tprint('This task runs every 5 minutes.', 34)
52 |
53 |
54 | # When this task is called, it will not be enqueued until the active
55 | # transaction commits. If no transaction is active it will enqueue immediately.
56 | # Example:
57 | # with transaction.atomic():
58 | # rh = after_commit('hello!')
59 | # time.sleep(5) # Still not enqueued....
60 | #
61 | # # Now the task is enqueued.
62 | # print(rh.get(True)) # prints "6".
63 | @on_commit_task()
64 | def after_commit(msg):
65 | tprint(msg, 33)
66 | return len(msg)
67 |
--------------------------------------------------------------------------------
/examples/django_ex/djangoex/urls.py:
--------------------------------------------------------------------------------
1 | from django.conf.urls import patterns
2 |
3 | urlpatterns = patterns('',
4 | )
5 |
--------------------------------------------------------------------------------
/examples/django_ex/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import sys
4 |
5 | if __name__ == "__main__":
6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoex.settings")
7 |
8 | from django.core.management import execute_from_command_line
9 |
10 | execute_from_command_line(sys.argv)
11 |
--------------------------------------------------------------------------------
/examples/flask_ex/README.md:
--------------------------------------------------------------------------------
1 | ## Flask example
2 |
3 | Minimal example of using Huey with Flask. Displays a form that accepts user
4 | input and then enqueues a task with the form value when the form is submitted.
5 |
6 | To try out the example:
7 |
8 | * Run ``./run_webapp.sh`` then browse to http://localhost:5000/
9 | * In second terminal, ``./run_huey.sh`` to run the consumer.
10 |
11 | **Important**: note that the tasks and views are imported in the `main.py`,
12 | which serves as the application entry-point. This is because any functions
13 | decorated with `@huey.task()` need to be imported to be registered with the
14 | huey instance. Similarly, we need to import the views so that our view function
15 | is registered with the Flask application.
16 |
--------------------------------------------------------------------------------
/examples/flask_ex/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coleifer/huey/302fb8ed8c4894ee7b419f91a22ec53b236fe50c/examples/flask_ex/__init__.py
--------------------------------------------------------------------------------
/examples/flask_ex/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | from huey import RedisHuey
3 |
4 |
5 | DEBUG = True
6 | SECRET_KEY = 'shhh, secret'
7 |
8 | app = Flask(__name__)
9 | app.config.from_object(__name__)
10 |
11 | huey = RedisHuey()
12 |
--------------------------------------------------------------------------------
/examples/flask_ex/main.py:
--------------------------------------------------------------------------------
1 | from app import app
2 | from app import huey
3 | import tasks # Import tasks so they are registered with Huey instance.
4 | import views # Import views so they are registered with Flask app.
5 |
6 |
7 | if __name__ == '__main__':
8 | app.run()
9 |
--------------------------------------------------------------------------------
/examples/flask_ex/run_huey.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Ensure that the huey package is on the python-path, in the event it hasn't
4 | # been installed using pip.
5 | export PYTHONPATH="../../:$PYTHONPATH"
6 |
7 | # Run the consumer with 2 worker threads.
8 | python ../../huey/bin/huey_consumer.py main.huey -w2
9 |
--------------------------------------------------------------------------------
/examples/flask_ex/run_webapp.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | python main.py
4 |
--------------------------------------------------------------------------------
/examples/flask_ex/tasks.py:
--------------------------------------------------------------------------------
1 | from huey import crontab
2 |
3 | from app import huey
4 |
5 |
6 | @huey.task()
7 | def example_task(n):
8 | # Example task -- prints the following line to the stdout of the
9 | # consumer process and returns the argument that was passed in (n).
10 | print('-- RUNNING EXAMPLE TASK: CALLED WITH n=%s --' % n)
11 | return n
12 |
13 |
14 | @huey.periodic_task(crontab(minute='*/5'))
15 | def print_every5_minutes():
16 | # Example periodic task -- this runs every 5 minutes and prints the
17 | # following line to the stdout of the consumer process.
18 | print('-- PERIODIC TASK -- THIS RUNS EVERY 5 MINUTES --')
19 |
--------------------------------------------------------------------------------
/examples/flask_ex/templates/home.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Flask example
6 |
7 |
8 | Flask example
9 | {% if message %}
10 | {{ message }}
11 | {% endif %}
12 |
13 | Submitting the form will cause an example task to be enqueued and executed
14 | by the consumer.
15 |
16 |
21 |
22 | Links
23 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/examples/flask_ex/views.py:
--------------------------------------------------------------------------------
1 | from flask import render_template
2 | from flask import request
3 |
4 | from app import app
5 | from tasks import example_task
6 |
7 |
8 | @app.route('/', methods=['GET', 'POST'])
9 | def home():
10 | if request.method == 'POST' and request.form.get('n'):
11 | n = request.form['n']
12 |
13 | # Enqueue our task, the consumer will pick it up and run it.
14 | example_task(n)
15 | message = 'Enqueued example_task(%s) - see consumer output' % n
16 | else:
17 | message = None
18 |
19 | return render_template('home.html', message=message)
20 |
--------------------------------------------------------------------------------
/examples/mini/mini.py:
--------------------------------------------------------------------------------
1 | from gevent import monkey; monkey.patch_all()
2 | import gevent
3 |
4 | from huey.contrib.mini import MiniHuey
5 |
6 |
7 | huey = MiniHuey()
8 |
9 | # If we want to support scheduling tasks for execution in the future, or for
10 | # periodic execution (e.g. cron), then we need to call `huey.start()` which
11 | # starts a scheduler thread.
12 | huey.start()
13 |
14 |
15 | @huey.task()
16 | def add(a, b):
17 | return a + b
18 |
19 | res = add(1, 2)
20 | print(res()) # Result is calculated in separate greenlet.
21 |
22 | print('Scheduling task for execution in 2 seconds.')
23 | res = add.schedule(args=(10, 20), delay=2)
24 | print(res())
25 |
26 | # Stop the scheduler. Not strictly necessary, but a good idea.
27 | huey.stop()
28 |
--------------------------------------------------------------------------------
/examples/simple/README:
--------------------------------------------------------------------------------
1 | In one terminal, run:
2 |
3 | ./cons.sh
4 |
5 | In another terminal:
6 |
7 | python main.py
8 |
9 | To try out the various worker classes, you can run:
10 |
11 | * ./cons.sh thread
12 | * ./cons.sh greenlet
13 | * ./cons.sh process
14 |
--------------------------------------------------------------------------------
/examples/simple/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coleifer/huey/302fb8ed8c4894ee7b419f91a22ec53b236fe50c/examples/simple/__init__.py
--------------------------------------------------------------------------------
/examples/simple/amain.py:
--------------------------------------------------------------------------------
1 | """
2 | Example script showing how you can use asyncio to read results.
3 | """
4 | import asyncio
5 | import time
6 |
7 | from huey.contrib.asyncio import aget_result
8 | from huey.contrib.asyncio import aget_result_group
9 |
10 | from tasks import *
11 |
12 |
13 | async def main():
14 | s = time.time()
15 | r1, r2, r3 = [slow(2) for _ in range(3)]
16 | results = await asyncio.gather(
17 | aget_result(r1),
18 | aget_result(r2),
19 | aget_result(r3))
20 | print(results)
21 | print(round(time.time() - s, 2))
22 |
23 | # Using result group.
24 | s = time.time()
25 | results = await aget_result_group(slow.map([2, 2, 2]))
26 | print(results)
27 | print(round(time.time() - s, 2))
28 |
29 |
30 | if __name__ == '__main__':
31 | asyncio.run(main())
32 |
--------------------------------------------------------------------------------
/examples/simple/config.py:
--------------------------------------------------------------------------------
1 | from huey import RedisHuey
2 |
3 | huey = RedisHuey('simple.test', blocking=True)
4 |
--------------------------------------------------------------------------------
/examples/simple/cons.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | echo "HUEY CONSUMER"
3 | echo "-------------"
4 | echo "In another terminal, run 'python main.py'"
5 | echo "Stop the consumer using Ctrl+C"
6 | PYTHONPATH=".:$PYTHONPATH"
7 | export PYTHONPATH
8 | WORKER_CLASS=${1:-thread}
9 | export WORKER_CLASS
10 | python ../../huey/bin/huey_consumer.py main.huey --workers=4 -k $WORKER_CLASS -S
11 |
--------------------------------------------------------------------------------
/examples/simple/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | if os.environ.get('WORKER_CLASS') in ('greenlet', 'gevent'):
3 | print('Monkey-patching for gevent.')
4 | from gevent import monkey; monkey.patch_all()
5 | import sys
6 |
7 | from config import huey
8 | from tasks import add
9 |
10 |
11 | if __name__ == '__main__':
12 | if sys.version_info[0] == 2:
13 | input = raw_input
14 |
15 | print('Huey Demo -- adds two numbers.')
16 | a = int(input('a = '))
17 | b = int(input('b = '))
18 | result = add(a, b)
19 | print('Result:')
20 | print(result.get(True))
21 |
--------------------------------------------------------------------------------
/examples/simple/tasks.py:
--------------------------------------------------------------------------------
1 | import os
2 | import threading
3 | import time
4 | from huey import crontab
5 | from huey.signals import *
6 |
7 | from config import huey
8 |
9 |
10 | def tprint(s, c=32):
11 | # Helper to print messages from within tasks using color, to make them
12 | # stand out in examples.
13 | print('\x1b[1;%sm%s\x1b[0m' % (c, s))
14 |
15 |
16 | # Tasks used in examples.
17 |
18 | @huey.task()
19 | def add(a, b):
20 | return a + b
21 |
22 | @huey.task()
23 | def mul(a, b):
24 | return a * b
25 |
26 |
27 | @huey.task()
28 | def slow(n):
29 | tprint('going to sleep for %s seconds' % n)
30 | time.sleep(n)
31 | tprint('finished sleeping for %s seconds' % n)
32 | return n
33 |
34 |
35 | # Example task that will fail on its first invocation, but succeed when
36 | # retried. Also shows how to use the `context` parameter, which passes the task
37 | # instance into the decorated function.
38 |
39 | @huey.task(retries=1, retry_delay=5, context=True)
40 | def flaky_task(task=None):
41 | if task is not None and task.retries == 0:
42 | tprint('flaky task succeeded on retry.')
43 | return 'succeeded on retry.'
44 | tprint('flaky task is about to raise an exception.', 31)
45 | raise Exception('flaky task failed!')
46 |
47 |
48 | # Pipeline example.
49 |
50 | @huey.task()
51 | def add_pipeline(a, b, *nums):
52 | # Example task that spawns a pipeline of sub-tasks.
53 | # In an interactive shell, you would call this like:
54 | # results = add_pipeline(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
55 | # print(results.get(blocking=True))
56 | # [3, 6, 10, 15, 21, 28, 36, 45, 55]
57 | task = add.s(a, b)
58 | for num in nums:
59 | task = task.then(add, num)
60 | result_group = huey.enqueue(task)
61 | tprint('enqueued pipeline of add() tasks.')
62 | return result_group.get(blocking=True)
63 |
64 |
65 | # Periodic tasks.
66 |
67 | @huey.periodic_task(crontab(minute='*/2'))
68 | def every_other_minute():
69 | tprint('This task runs every 2 minutes.', 35)
70 |
71 |
72 | @huey.periodic_task(crontab(minute='*/5'))
73 | def every_five_mins():
74 | tprint('This task runs every 5 minutes.', 34)
75 |
76 |
77 | # Example of using hooks.
78 |
79 | @huey.on_startup()
80 | def startup_hook():
81 | pid = os.getpid()
82 | tid = threading.get_ident()
83 | tprint('process %s, thread %s - startup hook' % (pid, tid))
84 |
85 |
86 | @huey.on_shutdown()
87 | def shutdown_hook():
88 | pid = os.getpid()
89 | tid = threading.get_ident()
90 | tprint('process %s, thread %s - shutdown hook' % (pid, tid))
91 |
92 |
93 | # Example of using a signal.
94 |
95 | @huey.signal(SIGNAL_COMPLETE)
96 | def on_complete(signal, task, exc=None):
97 | tprint('received signal [%s] for task [%s]' % (signal, task))
98 |
99 | @huey.signal(SIGNAL_INTERRUPTED)
100 | def on_interrupted(signal, task, exc=None):
101 | tprint('received interrupted task signal for task: %s' % task)
102 |
103 |
104 | # Example of retrying a task if it is *currently* running.
105 |
106 | from huey.constants import EmptyData
107 | from huey.exceptions import RetryTask
108 | @huey.task(context=True)
109 | def hold_on(a, task=None):
110 | if task is not None and huey.storage.peek_data('hold_on') is not EmptyData:
111 | print('appears to be running...will retry in 60s')
112 | raise RetryTask(delay=60)
113 |
114 | huey.storage.put_data('hold_on', '1')
115 | try:
116 | print('in task, sleeping for %s' % a)
117 | time.sleep(a)
118 | finally:
119 | huey.storage.pop_data('hold_on')
120 | return True
121 |
122 | # Example of limiting the time a task can run for (10s).
123 |
124 | @huey.task()
125 | def limit_time(n):
126 | s = time.time()
127 | evt = threading.Event()
128 | def run_computation():
129 | for i in range(n):
130 | # Here we would do some kind of computation, checking our event
131 | # along the way.
132 | print('.', end='', flush=True)
133 | if evt.wait(1):
134 | print('CANCELED')
135 | return
136 |
137 | evt.set()
138 |
139 | t = threading.Thread(target=run_computation)
140 | t.start()
141 |
142 | # Attempt to wait for the thread to finish for a total of 10s.
143 | for i in range(10):
144 | t.join(1)
145 |
146 | if not evt.is_set():
147 | # The thread still hasn't finished -- flag it that it must stop now.
148 | evt.set()
149 | t.join()
150 |
151 | print('limit_time() completed in %0.2f' % (time.time() - s))
152 |
153 | # Task that blocks CPU, used for testing.
154 |
155 | @huey.task()
156 | def slow_cpu():
157 | for i in range(1000000000):
158 | j = i % 13331
159 |
--------------------------------------------------------------------------------
/huey/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Charles Leifer'
2 | __license__ = 'MIT'
3 | __version__ = '2.5.3'
4 |
5 | from huey.api import BlackHoleHuey
6 | from huey.api import Huey
7 | from huey.api import FileHuey
8 | from huey.api import MemoryHuey
9 | from huey.api import PriorityRedisExpireHuey
10 | from huey.api import PriorityRedisHuey
11 | from huey.api import RedisExpireHuey
12 | from huey.api import RedisHuey
13 | from huey.api import SqliteHuey
14 | from huey.api import crontab
15 | from huey.exceptions import CancelExecution
16 | from huey.exceptions import RetryTask
17 |
--------------------------------------------------------------------------------
/huey/bin/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coleifer/huey/302fb8ed8c4894ee7b419f91a22ec53b236fe50c/huey/bin/__init__.py
--------------------------------------------------------------------------------
/huey/bin/huey_consumer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import logging
4 | import os
5 | import sys
6 |
7 | from huey.constants import WORKER_PROCESS
8 | from huey.consumer import Consumer
9 | from huey.consumer_options import ConsumerConfig
10 | from huey.consumer_options import OptionParserHandler
11 | from huey.utils import load_class
12 |
13 |
14 | def err(s):
15 | sys.stderr.write('\033[91m%s\033[0m\n' % s)
16 |
17 |
18 | def load_huey(path):
19 | try:
20 | return load_class(path)
21 | except:
22 | cur_dir = os.getcwd()
23 | if cur_dir not in sys.path:
24 | sys.path.insert(0, cur_dir)
25 | return load_huey(path)
26 | err('Error importing %s' % path)
27 | raise
28 |
29 |
30 | def consumer_main():
31 | parser_handler = OptionParserHandler()
32 | parser = parser_handler.get_option_parser()
33 | options, args = parser.parse_args()
34 |
35 | if len(args) == 0:
36 | err('Error: missing import path to `Huey` instance')
37 | err('Example: huey_consumer.py app.queue.huey_instance')
38 | sys.exit(1)
39 |
40 | options = {k: v for k, v in options.__dict__.items()
41 | if v is not None}
42 | config = ConsumerConfig(**options)
43 | config.validate()
44 |
45 | if sys.platform == 'win32' and config.worker_type == WORKER_PROCESS:
46 | err('Error: huey cannot be run in "process"-mode on Windows.')
47 | sys.exit(1)
48 |
49 | huey_instance = load_huey(args[0])
50 |
51 | # Set up logging for the "huey" namespace.
52 | logger = logging.getLogger('huey')
53 | config.setup_logger(logger)
54 |
55 | consumer = huey_instance.create_consumer(**config.values)
56 | consumer.run()
57 |
58 |
59 | if __name__ == '__main__':
60 | if sys.version_info >= (3, 8) and sys.platform == 'darwin':
61 | import multiprocessing
62 | try:
63 | multiprocessing.set_start_method('fork')
64 | except RuntimeError:
65 | pass
66 | consumer_main()
67 |
--------------------------------------------------------------------------------
/huey/constants.py:
--------------------------------------------------------------------------------
1 | WORKER_THREAD = 'thread'
2 | WORKER_GREENLET = 'greenlet'
3 | WORKER_PROCESS = 'process'
4 | WORKER_TYPES = (WORKER_THREAD, WORKER_GREENLET, WORKER_PROCESS)
5 |
6 |
7 | class EmptyData(object):
8 | pass
9 |
--------------------------------------------------------------------------------
/huey/consumer_options.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import optparse
3 | from collections import namedtuple
4 | from logging import FileHandler
5 |
6 | from huey.constants import WORKER_THREAD
7 | from huey.constants import WORKER_TYPES
8 |
9 |
10 | config_defaults = (
11 | ('workers', 1),
12 | ('worker_type', WORKER_THREAD),
13 | ('initial_delay', 0.1),
14 | ('backoff', 1.15),
15 | ('max_delay', 10.0),
16 | ('check_worker_health', True),
17 | ('health_check_interval', 10),
18 | ('scheduler_interval', 1),
19 | ('periodic', True),
20 | ('logfile', None),
21 | ('verbose', None),
22 | ('simple_log', None),
23 | ('flush_locks', False),
24 | ('extra_locks', None),
25 | )
26 | config_keys = [param for param, _ in config_defaults]
27 |
28 |
29 | def option(name, **options):
30 | if isinstance(name, tuple):
31 | letter, opt_name = name
32 | else:
33 | opt_name = name.replace('_', '-')
34 | letter = name[0]
35 | options.setdefault('dest', name)
36 | return ('-' + letter, '--' + opt_name, options)
37 |
38 |
39 | class OptionParserHandler(object):
40 | def get_worker_options(self):
41 | return (
42 | # -w, -k, -d, -m, -b, -c, -C, -f
43 | option('workers', type='int',
44 | help='number of worker threads/processes (default=1)'),
45 | option(('k', 'worker-type'), choices=WORKER_TYPES,
46 | dest='worker_type',
47 | help=('worker execution model (thread, greenlet, '
48 | 'process). Use process for CPU-intensive workloads, '
49 | 'and greenlet for IO-heavy workloads. When in doubt, '
50 | 'thread is the safest choice.')),
51 | option('delay', dest='initial_delay',
52 | help='minimum time to wait when polling queue (default=.1)',
53 | metavar='SECONDS', type='float'),
54 | option('max_delay', metavar='SECONDS',
55 | help='maximum time to wait when polling queue (default=10)',
56 | type='float'),
57 | option('backoff', metavar='SECONDS',
58 | help=('factor used to back-off polling interval when queue '
59 | 'is empty (default=1.15, must be >= 1)'),
60 | type='float'),
61 | option(('c', 'health-check-interval'), type='float',
62 | dest='health_check_interval', metavar='SECONDS',
63 | help=('minimum time to wait between worker health checks '
64 | '(default=1.0)')),
65 | option(('C', 'disable-health-check'), action='store_false',
66 | dest='check_worker_health',
67 | help=('disable health check that monitors worker health, '
68 | 'restarting any worker that crashes unexpectedly.')),
69 | option('flush_locks', action='store_true', dest='flush_locks',
70 | help=('flush all locks when starting consumer.')),
71 | option(('L', 'extra-locks'), dest='extra_locks',
72 | help=('additional locks to flush, separated by comma.')),
73 | )
74 |
75 | def get_scheduler_options(self):
76 | return (
77 | # -s, -n
78 | option('scheduler_interval', type='int',
79 | help='Granularity of scheduler in seconds.'),
80 | option('no_periodic', action='store_false',
81 | dest='periodic', help='do NOT enqueue periodic tasks'),
82 | )
83 |
84 | def get_logging_options(self):
85 | return (
86 | # -l, -v, -q, -S
87 | option('logfile', metavar='FILE'),
88 | option('verbose', action='store_true',
89 | help='verbose logging (includes DEBUG statements)'),
90 | option('quiet', action='store_false', dest='verbose',
91 | help='minimal logging'),
92 | option(('S', 'simple'), action='store_true', dest='simple_log',
93 | help='simple logging format (time message)'),
94 | )
95 |
96 | def get_option_parser(self):
97 | parser = optparse.OptionParser('Usage: %prog [options] '
98 | 'path.to.huey_instance')
99 |
100 | def add_group(name, description, options):
101 | group = parser.add_option_group(name, description)
102 | for abbrev, name, kwargs in options:
103 | group.add_option(abbrev, name, **kwargs)
104 |
105 | add_group('Logging', 'The following options pertain to logging.',
106 | self.get_logging_options())
107 |
108 | add_group('Workers', (
109 | 'By default huey uses a single worker thread. To specify a '
110 | 'different number of workers, or a different execution model (such'
111 | ' as multiple processes or greenlets), use the options below.'),
112 | self.get_worker_options())
113 |
114 | add_group('Scheduler', (
115 | 'By default Huey will run the scheduler once every second to check'
116 | ' for tasks scheduled in the future, or tasks set to run at '
117 | 'specfic intervals (periodic tasks). Use the options below to '
118 | 'configure the scheduler or to disable periodic task scheduling.'),
119 | self.get_scheduler_options())
120 |
121 | return parser
122 |
123 |
124 | class ConsumerConfig(namedtuple('_ConsumerConfig', config_keys)):
125 | def __new__(cls, **kwargs):
126 | config = dict(config_defaults)
127 | config.update(kwargs)
128 | args = [config[key] for key in config_keys]
129 | return super(ConsumerConfig, cls).__new__(cls, *args)
130 |
131 | def validate(self):
132 | if self.backoff < 1:
133 | raise ValueError('The backoff must be greater than 1.')
134 | if not (0 < self.scheduler_interval <= 60):
135 | raise ValueError('The scheduler must run at least once per '
136 | 'minute, and at most once per second (1-60).')
137 | if 60 % self.scheduler_interval != 0:
138 | raise ValueError('The scheduler interval must be a factor of 60: '
139 | '1, 2, 3, 4, 5, 6, 10, 12, 15, 20, 30, or 60')
140 |
141 | @property
142 | def loglevel(self):
143 | if self.verbose is None:
144 | return logging.INFO
145 | return logging.DEBUG if self.verbose else logging.WARNING
146 |
147 | def setup_logger(self, logger=None):
148 | if self.worker_type == 'process':
149 | worker = '%(process)d'
150 | else:
151 | worker = '%(threadName)s'
152 |
153 | if self.simple_log:
154 | datefmt = '%H:%M:%S'
155 | logformat = '%(asctime)s %(message)s'
156 | else:
157 | datefmt = None # Use default
158 | logformat = ('[%(asctime)s] %(levelname)s:%(name)s:' + worker +
159 | ':%(message)s')
160 | if logger is None:
161 | logger = logging.getLogger()
162 |
163 | if self.logfile:
164 | handler = logging.FileHandler(self.logfile)
165 | else:
166 | handler = logging.StreamHandler()
167 |
168 | handler.setFormatter(logging.Formatter(logformat, datefmt))
169 | logger.addHandler(handler)
170 | logger.setLevel(self.loglevel)
171 |
172 | @property
173 | def values(self):
174 | return dict((key, getattr(self, key)) for key in config_keys
175 | if key not in ('logfile', 'verbose', 'simple_log'))
176 |
--------------------------------------------------------------------------------
/huey/contrib/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coleifer/huey/302fb8ed8c4894ee7b419f91a22ec53b236fe50c/huey/contrib/__init__.py
--------------------------------------------------------------------------------
/huey/contrib/asyncio.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from huey.constants import EmptyData
4 |
5 |
6 | async def aget_result(res, backoff=1.15, max_delay=1.0, preserve=False):
7 | """
8 | Await a task result.
9 |
10 | Example usage:
11 |
12 | @huey.task()
13 | def sleep(n):
14 | time.sleep(n)
15 | return n
16 |
17 | # Call the task and get the normal result-handle.
18 | rh = sleep(2)
19 |
20 | # Asynchronously await the result of the task.
21 | result = await aget_result(rh)
22 |
23 | More advanced example of waiting for multiple results concurrently:
24 |
25 | r1 = sleep(1)
26 | r2 = sleep(2)
27 | r3 = sleep(3)
28 |
29 | # Asynchronously await the results of all 3 tasks. Will take
30 | # ~3 seconds.
31 | results = await asyncio.gather(
32 | aget_result(r1),
33 | aget_result(r2),
34 | aget_result(r3))
35 |
36 | NOTE: the Redis operation will be a normal blocking socket read, but in
37 | practice these will be super fast. The slow part is the necessity to wait
38 | between polling intervals (since the Redis command to read the result does
39 | not block).
40 | """
41 | delay = 0.1
42 | while res._result is EmptyData:
43 | delay = min(delay, max_delay)
44 | if res._get(preserve) is EmptyData:
45 | await asyncio.sleep(delay)
46 | delay *= backoff
47 | return res._result
48 |
49 |
50 | async def aget_result_group(rg, *args, **kwargs):
51 | """
52 | Await the results of a ResultGroup.
53 |
54 | Example usage:
55 |
56 | @huey.task()
57 | def sleep(n):
58 | time.sleep(n)
59 | return n
60 |
61 | rg = sleep.map([2, 2, 2])
62 |
63 | # This should take ~2 seconds.
64 | results = await aget_result_group(rg)
65 | """
66 | return await asyncio.gather(*[
67 | aget_result(r, *args, **kwargs)
68 | for r in rg])
69 |
--------------------------------------------------------------------------------
/huey/contrib/djhuey/__init__.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | from importlib import import_module
3 | import sys
4 | import traceback
5 |
6 | from django.conf import settings
7 | from django.db import close_old_connections
8 | from django.db import transaction
9 |
10 |
11 | configuration_message = """
12 | Configuring Huey for use with Django
13 | ====================================
14 |
15 | Huey was designed to be simple to configure in the general case. For that
16 | reason, huey will "just work" with no configuration at all provided you have
17 | Redis installed and running locally.
18 |
19 | On the other hand, you can configure huey manually using the following
20 | setting structure.
21 |
22 | The following example uses Redis on localhost, and will run four worker
23 | processes:
24 |
25 | HUEY = {
26 | 'name': 'my-app',
27 | 'connection': {'host': 'localhost', 'port': 6379},
28 | 'consumer': {
29 | 'workers': 4,
30 | 'worker_type': 'process', # "thread" or "greenlet" are other options
31 | },
32 | }
33 |
34 | If you would like to configure Huey's logger using Django's integrated logging
35 | settings, the logger used by consumer is named "huey".
36 |
37 | Alternatively you can simply assign `settings.HUEY` to an actual `Huey`
38 | object instance:
39 |
40 | from huey import RedisHuey
41 | HUEY = RedisHuey('my-app')
42 | """
43 |
44 |
45 | default_backend_path = 'huey.RedisHuey'
46 |
47 | def default_queue_name():
48 | try:
49 | return settings.DATABASE_NAME
50 | except AttributeError:
51 | try:
52 | return str(settings.DATABASES['default']['NAME'])
53 | except KeyError:
54 | return 'huey'
55 |
56 |
57 | def get_backend(import_path=default_backend_path):
58 | module_path, class_name = import_path.rsplit('.', 1)
59 | module = import_module(module_path)
60 | return getattr(module, class_name)
61 |
62 |
63 | def config_error(msg):
64 | print(configuration_message)
65 | print('\n\n')
66 | print(msg)
67 | sys.exit(1)
68 |
69 |
70 | HUEY = getattr(settings, 'HUEY', None)
71 | if HUEY is None:
72 | try:
73 | RedisHuey = get_backend(default_backend_path)
74 | except ImportError:
75 | config_error('Error: Huey could not import the redis backend. '
76 | 'Install `redis-py`.')
77 | else:
78 | HUEY = RedisHuey(default_queue_name())
79 |
80 | if isinstance(HUEY, dict):
81 | huey_config = HUEY.copy() # Operate on a copy.
82 | name = huey_config.pop('name', default_queue_name())
83 | if 'backend_class' in huey_config:
84 | huey_config['huey_class'] = huey_config.pop('backend_class')
85 | backend_path = huey_config.pop('huey_class', default_backend_path)
86 | conn_kwargs = huey_config.pop('connection', {})
87 | try:
88 | del huey_config['consumer'] # Don't need consumer opts here.
89 | except KeyError:
90 | pass
91 | if 'immediate' not in huey_config:
92 | huey_config['immediate'] = settings.DEBUG
93 | huey_config.update(conn_kwargs)
94 |
95 | try:
96 | backend_cls = get_backend(backend_path)
97 | except (ValueError, ImportError, AttributeError):
98 | config_error('Error: could not import Huey backend:\n%s'
99 | % traceback.format_exc())
100 |
101 | HUEY = backend_cls(name, **huey_config)
102 |
103 | # Function decorators.
104 | task = HUEY.task
105 | periodic_task = HUEY.periodic_task
106 | lock_task = HUEY.lock_task
107 |
108 | # Task management.
109 | enqueue = HUEY.enqueue
110 | restore = HUEY.restore
111 | restore_all = HUEY.restore_all
112 | restore_by_id = HUEY.restore_by_id
113 | revoke = HUEY.revoke
114 | revoke_all = HUEY.revoke_all
115 | revoke_by_id = HUEY.revoke_by_id
116 | is_revoked = HUEY.is_revoked
117 | result = HUEY.result
118 | scheduled = HUEY.scheduled
119 |
120 | # Hooks.
121 | on_startup = HUEY.on_startup
122 | on_shutdown = HUEY.on_shutdown
123 | pre_execute = HUEY.pre_execute
124 | post_execute = HUEY.post_execute
125 | signal = HUEY.signal
126 | disconnect_signal = HUEY.disconnect_signal
127 |
128 |
129 | def close_db(fn):
130 | """Decorator to be used with tasks that may operate on the database."""
131 | @wraps(fn)
132 | def inner(*args, **kwargs):
133 | if not HUEY.immediate:
134 | close_old_connections()
135 | try:
136 | return fn(*args, **kwargs)
137 | finally:
138 | if not HUEY.immediate:
139 | close_old_connections()
140 | return inner
141 |
142 |
143 | def db_task(*args, **kwargs):
144 | def decorator(fn):
145 | ret = task(*args, **kwargs)(close_db(fn))
146 | ret.call_local = fn
147 | return ret
148 | return decorator
149 |
150 |
151 | def db_periodic_task(*args, **kwargs):
152 | def decorator(fn):
153 | ret = periodic_task(*args, **kwargs)(close_db(fn))
154 | ret.call_local = fn
155 | return ret
156 | return decorator
157 |
158 |
159 | def on_commit_task(*args, **kwargs):
160 | """
161 | This task will register a post-commit callback to enqueue the task. A
162 | result handle will still be returned immediately, however, even though
163 | the task may not (ever) be enqueued, subject to whether or not the
164 | transaction actually commits.
165 |
166 | Because we have to setup the callback within the bit of code that performs
167 | the actual enqueueing, we cannot expose the full functionality of the
168 | TaskWrapper. If you anticipate wanting all these methods, you are probably
169 | best off decorating the same function twice, e.g.:
170 |
171 | def update_data(pk):
172 | # Do some database operation.
173 | pass
174 |
175 | my_task = task()(update_data)
176 | my_on_commit_task = on_commit_task()(update_data)
177 | """
178 | def decorator(fn):
179 | task_wrapper = task(*args, **kwargs)(close_db(fn))
180 |
181 | @wraps(fn)
182 | def inner(*a, **k):
183 | task = task_wrapper.s(*a, **k)
184 | def enqueue_on_commit():
185 | task_wrapper.huey.enqueue(task)
186 | transaction.on_commit(enqueue_on_commit)
187 | return HUEY._result_handle(task)
188 | return inner
189 | return decorator
190 |
--------------------------------------------------------------------------------
/huey/contrib/djhuey/management/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coleifer/huey/302fb8ed8c4894ee7b419f91a22ec53b236fe50c/huey/contrib/djhuey/management/__init__.py
--------------------------------------------------------------------------------
/huey/contrib/djhuey/management/commands/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coleifer/huey/302fb8ed8c4894ee7b419f91a22ec53b236fe50c/huey/contrib/djhuey/management/commands/__init__.py
--------------------------------------------------------------------------------
/huey/contrib/djhuey/management/commands/run_huey.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 |
4 | from django.conf import settings
5 | from django.core.management.base import BaseCommand
6 | from django.utils.module_loading import autodiscover_modules
7 |
8 | from huey.consumer_options import ConsumerConfig
9 | from huey.consumer_options import OptionParserHandler
10 |
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 |
15 | class Command(BaseCommand):
16 | """
17 | Queue consumer. Example usage::
18 |
19 | To start the consumer (note you must export the settings module):
20 |
21 | django-admin.py run_huey
22 | """
23 | help = "Run the queue consumer"
24 | _type_map = {'int': int, 'float': float}
25 |
26 | def add_arguments(self, parser):
27 | option_handler = OptionParserHandler()
28 | groups = (
29 | option_handler.get_logging_options(),
30 | option_handler.get_worker_options(),
31 | option_handler.get_scheduler_options(),
32 | )
33 | for option_list in groups:
34 | for short, full, kwargs in option_list:
35 | if short == '-v':
36 | full = '--huey-verbose'
37 | short = '-V'
38 | if 'type' in kwargs:
39 | kwargs['type'] = self._type_map[kwargs['type']]
40 | kwargs.setdefault('default', None)
41 | parser.add_argument(full, short, **kwargs)
42 |
43 | parser.add_argument('-A', '--disable-autoload', action='store_true',
44 | dest='disable_autoload',
45 | help='Do not autoload "tasks.py"')
46 |
47 | def handle(self, *args, **options):
48 | from huey.contrib.djhuey import HUEY
49 |
50 | # Python 3.8+ on MacOS uses an incompatible multiprocess model. In this
51 | # case we must explicitly configure mp to use fork().
52 | if sys.version_info >= (3, 8) and sys.platform == 'darwin':
53 | # Apparently this was causing a "context has already been set"
54 | # error for some user. We'll just pass and hope for the best.
55 | # They're apple users so presumably nothing important will be lost.
56 | import multiprocessing
57 | try:
58 | multiprocessing.set_start_method('fork')
59 | except RuntimeError:
60 | pass
61 |
62 | consumer_options = {}
63 | try:
64 | if isinstance(settings.HUEY, dict):
65 | consumer_options.update(settings.HUEY.get('consumer', {}))
66 | except AttributeError:
67 | pass
68 |
69 | for key, value in options.items():
70 | if value is not None:
71 | consumer_options[key] = value
72 |
73 | consumer_options.setdefault('verbose',
74 | consumer_options.pop('huey_verbose', None))
75 |
76 | if not options.get('disable_autoload'):
77 | autodiscover_modules("tasks")
78 |
79 | logger = logging.getLogger('huey')
80 |
81 | config = ConsumerConfig(**consumer_options)
82 | config.validate()
83 |
84 | # Only configure the "huey" logger if it has no handlers. For example,
85 | # some users may configure the huey logger via the Django global
86 | # logging config. This prevents duplicating log messages:
87 | if not logger.handlers:
88 | config.setup_logger(logger)
89 |
90 | consumer = HUEY.create_consumer(**config.values)
91 | consumer.run()
92 |
--------------------------------------------------------------------------------
/huey/contrib/djhuey/models.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/coleifer/huey/302fb8ed8c4894ee7b419f91a22ec53b236fe50c/huey/contrib/djhuey/models.py
--------------------------------------------------------------------------------
/huey/contrib/helpers.py:
--------------------------------------------------------------------------------
1 | from functools import wraps
2 | import time
3 | import uuid
4 |
5 | from huey import RedisHuey
6 | from huey.exceptions import TaskLockedException
7 |
8 |
9 | class RedisSemaphore(object):
10 | """
11 | Extremely basic semaphore for use with Redis.
12 | """
13 | def __init__(self, huey, name, value=1, timeout=None):
14 | if not isinstance(huey, RedisHuey):
15 | raise ValueError('Semaphore is only supported for Redis.')
16 | self.huey = huey
17 | self.key = '%s.lock.%s' % (huey.name, name)
18 | self.value = value
19 | self.timeout = timeout or 86400 # Set a max age for lock holders.
20 |
21 | self.huey._locks.add(self.key)
22 | self._conn = self.huey.storage.conn
23 |
24 | def acquire(self, name=None):
25 | name = name or str(uuid.uuid4())
26 | ts = time.time()
27 | pipeline = self._conn.pipeline(True)
28 | pipeline.zremrangebyscore(self.key, '-inf', ts - self.timeout)
29 | pipeline.zadd(self.key, {name: ts})
30 | pipeline.zrank(self.key, name) # See whether we acquired.
31 | if pipeline.execute()[-1] < self.value:
32 | return name
33 | self._conn.zrem(self.key, name)
34 | return
35 |
36 | def release(self, name):
37 | return self._conn.zrem(self.key, name)
38 |
39 |
40 | def lock_task_semaphore(huey, lock_name, value=1, timeout=None):
41 | """
42 | Lock which can be acquired multiple times (default = 1).
43 |
44 | NOTE: no provisions are made for blocking, waiting, or notifying. This is
45 | just a lock which can be acquired a configurable number of times.
46 |
47 | Example:
48 |
49 | # Allow up to 3 workers to run this task concurrently. If the task is
50 | # locked, retry up to 2 times with a delay of 60s.
51 | @huey.task(retries=2, retry_delay=60)
52 | @lock_task_semaphore(huey, 'my-lock', 3)
53 | def my_task():
54 | ...
55 | """
56 | sem = RedisSemaphore(huey, lock_name, value, timeout)
57 | def decorator(fn):
58 | @wraps(fn)
59 | def inner(*args, **kwargs):
60 | tid = sem.acquire()
61 | if tid is None:
62 | raise TaskLockedException('unable to acquire lock %s' %
63 | lock_name)
64 | try:
65 | return fn(*args, **kwargs)
66 | finally:
67 | sem.release(tid)
68 | return inner
69 | return decorator
70 |
--------------------------------------------------------------------------------
/huey/contrib/kyototycoon.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 | import time
3 |
4 | from ukt import KT_NONE
5 | from ukt import KyotoTycoon
6 |
7 | from huey.api import Huey
8 | from huey.constants import EmptyData
9 | from huey.storage import BaseStorage
10 | from huey.utils import decode
11 |
12 |
13 | class KyotoTycoonStorage(BaseStorage):
14 | priority = True
15 |
16 | def __init__(self, name='huey', host='127.0.0.1', port=1978, db=None,
17 | timeout=None, max_age=3600, queue_db=None, client=None,
18 | blocking=False, result_expire_time=None):
19 | super(KyotoTycoonStorage, self).__init__(name)
20 | if client is None:
21 | client = KyotoTycoon(host, port, timeout, db, serializer=KT_NONE,
22 | max_age=max_age)
23 |
24 | self.blocking = blocking
25 | self.expire_time = result_expire_time
26 |
27 | self.kt = client
28 | self._db = db
29 | self._queue_db = queue_db if queue_db is not None else db
30 |
31 | self.qname = self.name + '.q'
32 | self.sname = self.name + '.s'
33 |
34 | self.q = self.kt.Queue(self.qname, self._queue_db)
35 | self.s = self.kt.Schedule(self.sname, self._queue_db)
36 |
37 | def enqueue(self, data, priority=None):
38 | self.q.add(data, priority)
39 |
40 | def dequeue(self):
41 | if self.blocking:
42 | return self.q.bpop(timeout=30)
43 | else:
44 | return self.q.pop()
45 |
46 | def queue_size(self):
47 | return len(self.q)
48 |
49 | def enqueued_items(self, limit=None):
50 | return self.q.peek(n=limit or -1)
51 |
52 | def flush_queue(self):
53 | return self.q.clear()
54 |
55 | def convert_ts(self, ts):
56 | return int(time.mktime(ts.timetuple()))
57 |
58 | def add_to_schedule(self, data, ts):
59 | self.s.add(data, self.convert_ts(ts))
60 |
61 | def read_schedule(self, ts):
62 | return self.s.read(self.convert_ts(ts))
63 |
64 | def schedule_size(self):
65 | return len(self.s)
66 |
67 | def scheduled_items(self, limit=None):
68 | return self.s.items(limit)
69 |
70 | def flush_schedule(self):
71 | return self.s.clear()
72 |
73 | def prefix_key(self, key):
74 | return '%s.%s' % (self.qname, decode(key))
75 |
76 | def put_data(self, key, value, is_result=False):
77 | xt = self.expire_time if is_result else None
78 | self.kt.set(self.prefix_key(key), value, self._db, expire_time=xt)
79 |
80 | def peek_data(self, key):
81 | result = self.kt.get_bytes(self.prefix_key(key), self._db)
82 | return EmptyData if result is None else result
83 |
84 | def pop_data(self, key):
85 | if self.expire_time is not None:
86 | return self.peek_data(key)
87 |
88 | result = self.kt.seize(self.prefix_key(key), self._db)
89 | return EmptyData if result is None else result
90 |
91 | def delete_data(self, key):
92 | return self.kt.seize(self.prefix_key(key), self._db) is not None
93 |
94 | def has_data_for_key(self, key):
95 | return self.kt.exists(self.prefix_key(key), self._db)
96 |
97 | def put_if_empty(self, key, value):
98 | return self.kt.add(self.prefix_key(key), value, self._db)
99 |
100 | def result_store_size(self):
101 | return len(self.kt.match_prefix(self.prefix_key(''), db=self._db))
102 |
103 | def result_items(self):
104 | prefix = self.prefix_key('')
105 | keys = self.kt.match_prefix(prefix, db=self._db)
106 | result = self.kt.get_bulk(keys, self._db)
107 |
108 | plen = len(prefix)
109 | return {key[plen:]: value for key, value in result.items()}
110 |
111 | def flush_results(self):
112 | prefix = self.prefix_key('')
113 | keys = self.kt.match_prefix(prefix, db=self._db)
114 | return self.kt.remove_bulk(keys, self._db)
115 |
116 | def flush_all(self):
117 | self.flush_queue()
118 | self.flush_schedule()
119 | self.flush_results()
120 |
121 |
122 | class KyotoTycoonHuey(Huey):
123 | storage_class = KyotoTycoonStorage
124 |
--------------------------------------------------------------------------------
/huey/contrib/mini.py:
--------------------------------------------------------------------------------
1 | #
2 | # Minimal huey-like API using gevent and running within the parent process.
3 | #
4 | import datetime
5 | import heapq
6 | import logging
7 | import time
8 | from functools import wraps
9 |
10 | import gevent
11 | from gevent.event import AsyncResult
12 | from gevent.event import Event
13 | from gevent.pool import Pool
14 |
15 | from huey.api import crontab
16 | from huey.utils import time_clock
17 |
18 |
19 | logger = logging.getLogger('huey.mini')
20 |
21 |
22 | class MiniHueyResult(AsyncResult):
23 | __call__ = AsyncResult.get
24 |
25 |
26 | class MiniHuey(object):
27 | def __init__(self, name='huey', interval=1, pool_size=None):
28 | self.name = name
29 | self._interval = interval
30 | self._last_check = datetime.datetime.now()
31 | self._periodic_interval = datetime.timedelta(seconds=60)
32 | self._periodic_tasks = []
33 | self._scheduled_tasks = []
34 | self._shutdown = Event()
35 | self._pool = Pool(pool_size)
36 | self._run_t = None
37 |
38 | def task(self, validate_func=None):
39 | if validate_func is not None:
40 | def periodic_task_wrapper(fn):
41 | self._periodic_tasks.append((validate_func, fn))
42 | return fn
43 | return periodic_task_wrapper
44 |
45 | def decorator(fn):
46 | @wraps(fn)
47 | def _inner(*args, **kwargs):
48 | async_result = MiniHueyResult()
49 | self._enqueue(fn, args, kwargs, async_result)
50 | return async_result
51 |
52 | def _schedule(args=None, kwargs=None, delay=None, eta=None):
53 | if delay is not None:
54 | eta = (datetime.datetime.now() +
55 | datetime.timedelta(seconds=delay))
56 | if eta is None:
57 | raise ValueError('Either a delay (in seconds) or an '
58 | 'eta (datetime) must be specified.')
59 | async_result = MiniHueyResult()
60 | heapq.heappush(self._scheduled_tasks,
61 | (eta, fn, args, kwargs, async_result))
62 | return async_result
63 |
64 | _inner.schedule = _schedule
65 | return _inner
66 |
67 | return decorator
68 |
69 | def periodic_task(self, validate_func):
70 | def decorator(fn):
71 | return self.task(validate_func)(fn)
72 | return decorator
73 |
74 | def start(self):
75 | if self._run_t is not None:
76 | raise Exception('Task runner is already running.')
77 | self._run_t = gevent.spawn(self._run)
78 |
79 | def stop(self):
80 | if self._run_t is None:
81 | raise Exception('Task runner does not appear to have started.')
82 | self._shutdown.set()
83 | logger.info('shutdown requested.')
84 | self._run_t.join()
85 | self._run_t = None
86 |
87 | def _enqueue(self, fn, args=None, kwargs=None, async_result=None):
88 | logger.info('enqueueing %s' % fn.__name__)
89 | self._pool.spawn(self._execute, fn, args, kwargs, async_result)
90 |
91 | def _execute(self, fn, args, kwargs, async_result):
92 | args = args or ()
93 | kwargs = kwargs or {}
94 | start = time_clock()
95 | try:
96 | ret = fn(*args, **kwargs)
97 | except Exception as exc:
98 | logger.exception('task %s failed' % fn.__name__)
99 | async_result.set_exception(exc)
100 | raise
101 | else:
102 | duration = time_clock() - start
103 |
104 | if async_result is not None:
105 | async_result.set(ret)
106 | logger.info('executed %s in %0.3fs', fn.__name__, duration)
107 |
108 | def _run(self):
109 | logger.info('task runner started.')
110 | while not self._shutdown.is_set():
111 | start = time_clock()
112 | now = datetime.datetime.now()
113 | if self._last_check + self._periodic_interval <= now:
114 | logger.debug('checking periodic task schedule')
115 | self._last_check = now
116 | for validate_func, fn in self._periodic_tasks:
117 | if validate_func(now):
118 | self._enqueue(fn)
119 |
120 | if self._scheduled_tasks:
121 | logger.debug('checking scheduled tasks')
122 | # The 0-th item of a heap is always the smallest.
123 | while self._scheduled_tasks and \
124 | self._scheduled_tasks[0][0] <= now:
125 |
126 | eta, fn, args, kwargs, async_result = (
127 | heapq.heappop(self._scheduled_tasks))
128 | self._enqueue(fn, args, kwargs, async_result)
129 |
130 | # Wait for most of the remained of the time remaining.
131 | remaining = self._interval - (time_clock() - start)
132 | if remaining > 0:
133 | if not self._shutdown.wait(remaining * 0.9):
134 | gevent.sleep(self._interval - (time_clock() - start))
135 | logger.info('exiting task runner')
136 |
--------------------------------------------------------------------------------
/huey/contrib/sql_huey.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 | import operator
3 |
4 | from peewee import *
5 | from playhouse.db_url import connect as db_url_connect
6 |
7 | from huey.api import Huey
8 | from huey.constants import EmptyData
9 | from huey.exceptions import ConfigurationError
10 | from huey.storage import BaseStorage
11 |
12 |
13 | class BytesBlobField(BlobField):
14 | def python_value(self, value):
15 | return value if isinstance(value, bytes) else bytes(value)
16 |
17 |
18 | class SqlStorage(BaseStorage):
19 | def __init__(self, name='huey', database=None, **kwargs):
20 | super(SqlStorage, self).__init__(name)
21 |
22 | if database is None:
23 | raise ConfigurationError('Use of SqlStorage requires a '
24 | 'database= argument, which should be a '
25 | 'peewee database or a connection string.')
26 |
27 | if isinstance(database, Database):
28 | self.database = database
29 | else:
30 | # Treat database argument as a URL connection string.
31 | self.database = db_url_connect(database)
32 |
33 | self.KV, self.Schedule, self.Task = self.create_models()
34 | self.create_tables()
35 |
36 | # Check for FOR UPDATE SKIP LOCKED support.
37 | if isinstance(self.database, PostgresqlDatabase):
38 | self.for_update = 'FOR UPDATE SKIP LOCKED'
39 | elif isinstance(self.database, MySQLDatabase):
40 | self.for_update = 'FOR UPDATE SKIP LOCKED' # Assume support.
41 | # Try to determine if we're using MariaDB or MySQL.
42 | version, = self.database.execute_sql('select version()').fetchone()
43 | if 'mariadb' in str(version).lower():
44 | # MariaDB added support in 10.6.0.
45 | if self.database.server_version < (10, 6):
46 | self.for_update = 'FOR UPDATE'
47 | elif self.database.server_version < (8, 0, 1):
48 | # MySQL added support in 8.0.1.
49 | self.for_update = 'FOR UPDATE'
50 | else:
51 | self.for_update = None
52 |
53 | def create_models(self):
54 | class Base(Model):
55 | class Meta:
56 | database = self.database
57 |
58 | class KV(Base):
59 | queue = CharField()
60 | key = CharField()
61 | value = BytesBlobField()
62 | class Meta:
63 | primary_key = CompositeKey('queue', 'key')
64 |
65 | class Schedule(Base):
66 | queue = CharField()
67 | data = BytesBlobField()
68 | timestamp = TimestampField(resolution=1000)
69 | class Meta:
70 | indexes = ((('queue', 'timestamp'), False),)
71 |
72 | class Task(Base):
73 | queue = CharField()
74 | data = BytesBlobField()
75 | priority = FloatField(default=0.0)
76 |
77 | Task.add_index(Task.priority.desc(), Task.id)
78 |
79 | return (KV, Schedule, Task)
80 |
81 | def create_tables(self):
82 | with self.database:
83 | self.database.create_tables([self.KV, self.Schedule, self.Task])
84 |
85 | def drop_tables(self):
86 | with self.database:
87 | self.database.drop_tables([self.KV, self.Schedule, self.Task])
88 |
89 | def close(self):
90 | return self.database.close()
91 |
92 | def tasks(self, *columns):
93 | return self.Task.select(*columns).where(self.Task.queue == self.name)
94 |
95 | def schedule(self, *columns):
96 | return (self.Schedule.select(*columns)
97 | .where(self.Schedule.queue == self.name))
98 |
99 | def kv(self, *columns):
100 | return self.KV.select(*columns).where(self.KV.queue == self.name)
101 |
102 | def check_conn(self):
103 | if not self.database.is_connection_usable():
104 | self.database.close()
105 | self.database.connect()
106 |
107 | def enqueue(self, data, priority=None):
108 | self.check_conn()
109 | self.Task.create(queue=self.name, data=data, priority=priority or 0)
110 |
111 | def dequeue(self):
112 | self.check_conn()
113 | query = (self.tasks(self.Task.id, self.Task.data)
114 | .order_by(self.Task.priority.desc(), self.Task.id)
115 | .limit(1))
116 | if self.for_update:
117 | query = query.for_update(self.for_update)
118 |
119 | with self.database.atomic():
120 | try:
121 | task = query.get()
122 | except self.Task.DoesNotExist:
123 | return
124 |
125 | nrows = self.Task.delete().where(self.Task.id == task.id).execute()
126 | if nrows == 1:
127 | return task.data
128 |
129 | def queue_size(self):
130 | return self.tasks().count()
131 |
132 | def enqueued_items(self, limit=None):
133 | query = self.tasks(self.Task.data).order_by(self.Task.priority.desc(),
134 | self.Task.id)
135 | if limit is not None:
136 | query = query.limit(limit)
137 | return list(map(operator.itemgetter(0), query.tuples()))
138 |
139 | def flush_queue(self):
140 | self.Task.delete().where(self.Task.queue == self.name).execute()
141 |
142 | def add_to_schedule(self, data, timestamp):
143 | self.check_conn()
144 | self.Schedule.create(queue=self.name, data=data, timestamp=timestamp)
145 |
146 | def read_schedule(self, timestamp):
147 | self.check_conn()
148 | query = (self.schedule(self.Schedule.id, self.Schedule.data)
149 | .where(self.Schedule.timestamp <= timestamp)
150 | .tuples())
151 | if self.for_update:
152 | query = query.for_update(self.for_update)
153 |
154 | with self.database.atomic():
155 | results = list(query)
156 | if not results:
157 | return []
158 |
159 | id_list, data = zip(*results)
160 | (self.Schedule
161 | .delete()
162 | .where(self.Schedule.id.in_(id_list))
163 | .execute())
164 |
165 | return list(data)
166 |
167 | def schedule_size(self):
168 | return self.schedule().count()
169 |
170 | def scheduled_items(self):
171 | tasks = (self.schedule(self.Schedule.data)
172 | .order_by(self.Schedule.timestamp)
173 | .tuples())
174 | return list(map(operator.itemgetter(0), tasks))
175 |
176 | def flush_schedule(self):
177 | (self.Schedule
178 | .delete()
179 | .where(self.Schedule.queue == self.name)
180 | .execute())
181 |
182 | def put_data(self, key, value, is_result=False):
183 | self.check_conn()
184 | if isinstance(self.database, PostgresqlDatabase):
185 | (self.KV
186 | .insert(queue=self.name, key=key, value=value)
187 | .on_conflict(conflict_target=[self.KV.queue, self.KV.key],
188 | preserve=[self.KV.value])
189 | .execute())
190 | else:
191 | self.KV.replace(queue=self.name, key=key, value=value).execute()
192 |
193 | def peek_data(self, key):
194 | self.check_conn()
195 | try:
196 | kv = self.kv(self.KV.value).where(self.KV.key == key).get()
197 | except self.KV.DoesNotExist:
198 | return EmptyData
199 | else:
200 | return kv.value
201 |
202 | def pop_data(self, key):
203 | self.check_conn()
204 | query = self.kv().where(self.KV.key == key)
205 | if self.for_update:
206 | query = query.for_update(self.for_update)
207 |
208 | with self.database.atomic():
209 | try:
210 | kv = query.get()
211 | except self.KV.DoesNotExist:
212 | return EmptyData
213 | else:
214 | dq = self.KV.delete().where(
215 | (self.KV.queue == self.name) &
216 | (self.KV.key == key))
217 | return kv.value if dq.execute() == 1 else EmptyData
218 |
219 | def has_data_for_key(self, key):
220 | self.check_conn()
221 | return self.kv().where(self.KV.key == key).exists()
222 |
223 | def put_if_empty(self, key, value):
224 | self.check_conn()
225 | try:
226 | with self.database.atomic():
227 | self.KV.insert(queue=self.name, key=key, value=value).execute()
228 | except IntegrityError:
229 | return False
230 | else:
231 | return True
232 |
233 | def result_store_size(self):
234 | return self.kv().count()
235 |
236 | def result_items(self):
237 | query = self.kv(self.KV.key, self.KV.value).tuples()
238 | return dict((k, v) for k, v in query.iterator())
239 |
240 | def flush_results(self):
241 | self.KV.delete().where(self.KV.queue == self.name).execute()
242 |
243 |
244 | SqlHuey = partial(Huey, storage_class=SqlStorage)
245 |
--------------------------------------------------------------------------------
/huey/exceptions.py:
--------------------------------------------------------------------------------
1 | class HueyException(Exception): pass
2 | class ConfigurationError(HueyException): pass
3 | class TaskLockedException(HueyException): pass
4 | class ResultTimeout(HueyException): pass
5 |
6 | class CancelExecution(Exception):
7 | def __init__(self, retry=None, *args, **kwargs):
8 | self.retry = retry
9 | super(CancelExecution, self).__init__(*args, **kwargs)
10 | class RetryTask(Exception):
11 | def __init__(self, msg=None, eta=None, delay=None, *args, **kwargs):
12 | self.eta, self.delay = eta, delay
13 | super(RetryTask, self).__init__(msg, *args, **kwargs)
14 | class TaskException(Exception):
15 | def __init__(self, metadata=None, *args):
16 | self.metadata = metadata or {}
17 | super(TaskException, self).__init__(*args)
18 |
19 | def __unicode__(self):
20 | return self.metadata.get('error') or 'unknown error'
21 | __str__ = __unicode__
22 |
--------------------------------------------------------------------------------
/huey/registry.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 |
3 | from huey.exceptions import HueyException
4 |
5 |
6 | Message = namedtuple('Message', ('id', 'name', 'eta', 'retries', 'retry_delay',
7 | 'priority', 'args', 'kwargs', 'on_complete',
8 | 'on_error', 'expires', 'expires_resolved'))
9 |
10 | # Automatically set missing parameters to None. This is kind-of a hack, but it
11 | # allows us to add new parameters while continuing to be able to handle
12 | # messages enqueued with a smaller-set of arguments.
13 | Message.__new__.__defaults__ = (None,) * len(Message._fields)
14 |
15 |
16 | class Registry(object):
17 | def __init__(self):
18 | self._registry = {}
19 | self._periodic_tasks = []
20 |
21 | def task_to_string(self, task_class):
22 | return '%s.%s' % (task_class.__module__, task_class.__name__)
23 |
24 | def register(self, task_class):
25 | task_str = self.task_to_string(task_class)
26 | if task_str in self._registry:
27 | raise ValueError('Attempting to register a task with the same '
28 | 'identifier as existing task. Specify a different'
29 | ' name= to register this task. "%s"' % task_str)
30 |
31 | self._registry[task_str] = task_class
32 | if hasattr(task_class, 'validate_datetime'):
33 | self._periodic_tasks.append(task_class)
34 | return True
35 |
36 | def unregister(self, task_class):
37 | task_str = self.task_to_string(task_class)
38 | if task_str not in self._registry:
39 | return False
40 |
41 | del self._registry[task_str]
42 | if hasattr(task_class, 'validate_datetime'):
43 | self._periodic_tasks = [t for t in self._periodic_tasks
44 | if t is not task_class]
45 | return True
46 |
47 | def string_to_task(self, task_str):
48 | if task_str not in self._registry:
49 | raise HueyException('%s not found in TaskRegistry' % task_str)
50 | return self._registry[task_str]
51 |
52 | def create_message(self, task):
53 | task_str = self.task_to_string(type(task))
54 | if task_str not in self._registry:
55 | raise HueyException('%s not found in TaskRegistry' % task_str)
56 |
57 | # Remove the "task" instance from any arguments before serializing.
58 | if task.kwargs and 'task' in task.kwargs:
59 | task.kwargs.pop('task')
60 |
61 | on_complete = None
62 | if task.on_complete is not None:
63 | on_complete = self.create_message(task.on_complete)
64 |
65 | on_error = None
66 | if task.on_error is not None:
67 | on_error = self.create_message(task.on_error)
68 |
69 | return Message(
70 | task.id,
71 | task_str,
72 | task.eta,
73 | task.retries,
74 | task.retry_delay,
75 | task.priority,
76 | task.args,
77 | task.kwargs,
78 | on_complete,
79 | on_error,
80 | task.expires,
81 | task.expires_resolved)
82 |
83 | def create_task(self, message):
84 | # Compatibility with Huey 1.11 message format.
85 | if not isinstance(message, Message) and isinstance(message, tuple):
86 | tid, name, eta, retries, retry_delay, (args, kwargs), oc = message
87 | message = Message(tid, name, eta, retries, retry_delay, None, args,
88 | kwargs, oc, None)
89 |
90 | TaskClass = self.string_to_task(message.name)
91 |
92 | on_complete = None
93 | if message.on_complete is not None:
94 | on_complete = self.create_task(message.on_complete)
95 |
96 | on_error = None
97 | if message.on_error is not None:
98 | on_error = self.create_task(message.on_error)
99 |
100 | return TaskClass(
101 | message.args,
102 | message.kwargs,
103 | message.id,
104 | message.eta,
105 | message.retries,
106 | message.retry_delay,
107 | message.priority,
108 | message.expires,
109 | on_complete,
110 | on_error,
111 | message.expires_resolved)
112 |
113 | @property
114 | def periodic_tasks(self):
115 | return [task_class() for task_class in self._periodic_tasks]
116 |
--------------------------------------------------------------------------------
/huey/serializer.py:
--------------------------------------------------------------------------------
1 | try:
2 | import gzip
3 | except ImportError:
4 | gzip = None
5 | try:
6 | import zlib
7 | except ImportError:
8 | zlib = None
9 | import hashlib
10 | import hmac
11 | import logging
12 | import pickle
13 | import sys
14 |
15 | from huey.exceptions import ConfigurationError
16 | from huey.utils import encode
17 |
18 |
19 | logger = logging.getLogger('huey.serializer')
20 |
21 |
22 | if gzip is not None:
23 | if sys.version_info[0] > 2:
24 | gzip_compress = gzip.compress
25 | gzip_decompress = gzip.decompress
26 | else:
27 | from io import BytesIO
28 |
29 | def gzip_compress(data, comp_level):
30 | buf = BytesIO()
31 | fh = gzip.GzipFile(fileobj=buf, mode='wb',
32 | compresslevel=comp_level)
33 | fh.write(data)
34 | fh.close()
35 | return buf.getvalue()
36 |
37 | def gzip_decompress(data):
38 | buf = BytesIO(data)
39 | fh = gzip.GzipFile(fileobj=buf, mode='rb')
40 | try:
41 | return fh.read()
42 | finally:
43 | fh.close()
44 |
45 |
46 | if sys.version_info[0] == 2:
47 | def is_compressed(data):
48 | return data and (data[0] == b'\x1f' or data[0] == b'\x78')
49 | else:
50 | def is_compressed(data):
51 | return data and data[0] == 0x1f or data[0] == 0x78
52 |
53 |
54 | class Serializer(object):
55 | def __init__(self, compression=False, compression_level=6, use_zlib=False,
56 | pickle_protocol=pickle.HIGHEST_PROTOCOL):
57 | self.comp = compression
58 | self.comp_level = compression_level
59 | self.use_zlib = use_zlib
60 | self.pickle_protocol = pickle_protocol or pickle.HIGHEST_PROTOCOL
61 | if self.comp:
62 | if self.use_zlib and zlib is None:
63 | raise ConfigurationError('use_zlib specified, but zlib module '
64 | 'not found.')
65 | elif gzip is None:
66 | raise ConfigurationError('gzip module required to enable '
67 | 'compression.')
68 |
69 | def _serialize(self, data):
70 | return pickle.dumps(data, self.pickle_protocol)
71 |
72 | def _deserialize(self, data):
73 | return pickle.loads(data)
74 |
75 | def serialize(self, data):
76 | data = self._serialize(data)
77 | if self.comp:
78 | if self.use_zlib:
79 | data = zlib.compress(data, self.comp_level)
80 | else:
81 | data = gzip_compress(data, self.comp_level)
82 | return data
83 |
84 | def deserialize(self, data):
85 | if self.comp:
86 | if not is_compressed(data):
87 | logger.warning('compression enabled but message data does not '
88 | 'appear to be compressed.')
89 | elif self.use_zlib:
90 | data = zlib.decompress(data)
91 | else:
92 | data = gzip_decompress(data)
93 | return self._deserialize(data)
94 |
95 |
96 | def constant_time_compare(s1, s2):
97 | return hmac.compare_digest(s1, s2)
98 |
99 |
100 | class SignedSerializer(Serializer):
101 | def __init__(self, secret=None, salt='huey', **kwargs):
102 | super(SignedSerializer, self).__init__(**kwargs)
103 | if not secret or not salt:
104 | raise ConfigurationError('The secret and salt parameters are '
105 | 'required by %r' % type(self))
106 | self.secret = encode(secret)
107 | self.salt = encode(salt)
108 | self.separator = b':'
109 | self._key = hashlib.sha1(self.salt + self.secret).digest()
110 |
111 | def _signature(self, message):
112 | signature = hmac.new(self._key, msg=message, digestmod=hashlib.sha1)
113 | return signature.hexdigest().encode('utf8')
114 |
115 | def _sign(self, message):
116 | return message + self.separator + self._signature(message)
117 |
118 | def _unsign(self, signed):
119 | if self.separator not in signed:
120 | raise ValueError('Separator "%s" not found' % self.separator)
121 |
122 | msg, sig = signed.rsplit(self.separator, 1)
123 | if constant_time_compare(sig, self._signature(msg)):
124 | return msg
125 |
126 | raise ValueError('Signature "%s" mismatch!' % sig)
127 |
128 | def _serialize(self, message):
129 | data = super(SignedSerializer, self)._serialize(message)
130 | return self._sign(data)
131 |
132 | def _deserialize(self, data):
133 | return super(SignedSerializer, self)._deserialize(self._unsign(data))
134 |
--------------------------------------------------------------------------------
/huey/signals.py:
--------------------------------------------------------------------------------
1 | import itertools
2 |
3 |
4 | SIGNAL_CANCELED = 'canceled'
5 | SIGNAL_COMPLETE = 'complete'
6 | SIGNAL_ERROR = 'error'
7 | SIGNAL_EXECUTING = 'executing'
8 | SIGNAL_EXPIRED = 'expired'
9 | SIGNAL_LOCKED = 'locked'
10 | SIGNAL_RETRYING = 'retrying'
11 | SIGNAL_REVOKED = 'revoked'
12 | SIGNAL_SCHEDULED = 'scheduled'
13 | SIGNAL_INTERRUPTED = 'interrupted'
14 | SIGNAL_ENQUEUED = 'enqueued'
15 |
16 |
17 | class Signal(object):
18 | __slots__ = ('receivers',)
19 |
20 | def __init__(self):
21 | self.receivers = {'any': []}
22 |
23 | def connect(self, receiver, *signals):
24 | if not signals:
25 | signals = ('any',)
26 | for signal in signals:
27 | self.receivers.setdefault(signal, [])
28 | self.receivers[signal].append(receiver)
29 |
30 | def disconnect(self, receiver, *signals):
31 | if not signals:
32 | signals = list(self.receivers)
33 | for signal in signals:
34 | try:
35 | self.receivers[signal].remove(receiver)
36 | except ValueError:
37 | pass
38 |
39 | def send(self, signal, task, *args, **kwargs):
40 | receivers = itertools.chain(self.receivers.get(signal, ()),
41 | self.receivers['any'])
42 | for receiver in receivers:
43 | receiver(signal, task, *args, **kwargs)
44 |
--------------------------------------------------------------------------------
/huey/tests/__init__.py:
--------------------------------------------------------------------------------
1 | from huey.tests.test_api import *
2 | from huey.tests.test_consumer import *
3 | from huey.tests.test_crontab import *
4 | from huey.tests.test_helpers import *
5 | from huey.tests.test_immediate import *
6 | from huey.tests.test_kt_huey import *
7 | from huey.tests.test_priority import *
8 | from huey.tests.test_registry import *
9 | from huey.tests.test_serializer import *
10 | from huey.tests.test_signals import *
11 | from huey.tests.test_sql_huey import *
12 | from huey.tests.test_storage import *
13 | from huey.tests.test_utils import *
14 | from huey.tests.test_wrappers import *
15 |
--------------------------------------------------------------------------------
/huey/tests/__main__.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import unittest
3 |
4 | from huey.tests import *
5 |
6 |
7 | if __name__ == '__main__':
8 | unittest.main(argv=sys.argv)
9 |
--------------------------------------------------------------------------------
/huey/tests/base.py:
--------------------------------------------------------------------------------
1 | import contextlib
2 | import logging
3 | import os
4 | import unittest
5 |
6 | from huey.api import MemoryHuey
7 | from huey.consumer import Consumer
8 | from huey.exceptions import TaskException
9 |
10 |
11 | class NullHandler(logging.Handler):
12 | def emit(self, record): pass
13 |
14 |
15 | logger = logging.getLogger('huey')
16 | logger.addHandler(NullHandler())
17 |
18 | TRAVIS = bool(os.environ.get('HUEY_TRAVIS'))
19 |
20 |
21 | class BaseTestCase(unittest.TestCase):
22 | consumer_class = Consumer
23 |
24 | def setUp(self):
25 | super(BaseTestCase, self).setUp()
26 | self.huey = self.get_huey()
27 |
28 | def get_huey(self):
29 | return MemoryHuey(utc=False)
30 |
31 | def execute_next(self, timestamp=None):
32 | task = self.huey.dequeue()
33 | self.assertTrue(task is not None)
34 | return self.huey.execute(task, timestamp=timestamp)
35 |
36 | def trap_exception(self, fn, exc_type=TaskException):
37 | try:
38 | fn()
39 | except exc_type as exc_val:
40 | return exc_val
41 | raise AssertionError('trap_exception() failed to catch %s' % exc_type)
42 |
43 | def consumer(self, **params):
44 | params.setdefault('initial_delay', 0.001)
45 | params.setdefault('max_delay', 0.001)
46 | params.setdefault('workers', 2)
47 | params.setdefault('check_worker_health', False)
48 | return self.consumer_class(self.huey, **params)
49 |
50 | @contextlib.contextmanager
51 | def consumer_context(self, **kwargs):
52 | consumer = self.consumer(**kwargs)
53 | consumer.start()
54 | try:
55 | yield
56 | finally:
57 | consumer.stop(graceful=True)
58 |
--------------------------------------------------------------------------------
/huey/tests/test_consumer.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import time
3 |
4 | from huey.api import crontab
5 | from huey.consumer import Consumer
6 | from huey.consumer import Scheduler
7 | from huey.consumer_options import ConsumerConfig
8 | from huey.tests.base import BaseTestCase
9 | from huey.utils import time_clock
10 |
11 |
12 | class TestConsumer(Consumer):
13 | class _Scheduler(Scheduler):
14 | def sleep_for_interval(self, current, interval):
15 | pass
16 | scheduler_class = _Scheduler
17 |
18 |
19 | class TestConsumerIntegration(BaseTestCase):
20 | consumer_class = TestConsumer
21 |
22 | def test_consumer_minimal(self):
23 | @self.huey.task()
24 | def task_a(n):
25 | return n + 1
26 |
27 | with self.consumer_context():
28 | result = task_a(1)
29 | self.assertEqual(result.get(blocking=True, timeout=2), 2)
30 |
31 | def work_on_tasks(self, consumer, n=1, now=None):
32 | worker, _ = consumer.worker_threads[0]
33 | for i in range(n):
34 | self.assertEqual(len(self.huey), n - i)
35 | worker.loop(now)
36 |
37 | def schedule_tasks(self, consumer, now=None):
38 | scheduler = consumer._create_scheduler()
39 | scheduler._next_loop = time_clock() + 60
40 | scheduler._next_periodic = time_clock() - 60
41 | scheduler.loop(now)
42 |
43 | def test_consumer_schedule_task(self):
44 | @self.huey.task()
45 | def task_a(n):
46 | return n + 1
47 |
48 | now = datetime.datetime.now()
49 | eta = now + datetime.timedelta(days=1)
50 | r60 = task_a.schedule((2,), delay=60)
51 | rday = task_a.schedule((3,), eta=eta)
52 |
53 | consumer = self.consumer(workers=1)
54 | self.work_on_tasks(consumer, 2) # Process the two messages.
55 |
56 | self.assertEqual(len(self.huey), 0)
57 | self.assertEqual(self.huey.scheduled_count(), 2)
58 |
59 | self.schedule_tasks(consumer, now)
60 | self.assertEqual(len(self.huey), 0)
61 | self.assertEqual(self.huey.scheduled_count(), 2)
62 |
63 | # Ensure that the task that had a delay of 60s is read from schedule.
64 | later = now + datetime.timedelta(seconds=65)
65 | self.schedule_tasks(consumer, later)
66 | self.assertEqual(len(self.huey), 1)
67 | self.assertEqual(self.huey.scheduled_count(), 1)
68 |
69 | # We can now work on our scheduled task.
70 | self.work_on_tasks(consumer, 1, later)
71 | self.assertEqual(r60.get(), 3)
72 |
73 | # Verify the task was run and that there is only one task remaining to
74 | # be scheduled (in a day).
75 | self.assertEqual(len(self.huey), 0)
76 | self.assertEqual(self.huey.scheduled_count(), 1)
77 |
78 | tomorrow = now + datetime.timedelta(days=1)
79 | self.schedule_tasks(consumer, tomorrow)
80 | self.work_on_tasks(consumer, 1, tomorrow)
81 | self.assertEqual(rday.get(), 4)
82 | self.assertEqual(len(self.huey), 0)
83 | self.assertEqual(self.huey.scheduled_count(), 0)
84 |
85 | def test_consumer_periodic_tasks(self):
86 | state = []
87 |
88 | @self.huey.periodic_task(crontab(minute='*/10'))
89 | def task_p1():
90 | state.append('p1')
91 |
92 | @self.huey.periodic_task(crontab(minute='0', hour='0'))
93 | def task_p2():
94 | state.append('p2')
95 |
96 | consumer = self.consumer(workers=1)
97 | dt = datetime.datetime(2000, 1, 1, 0, 0)
98 | self.schedule_tasks(consumer, dt)
99 | self.assertEqual(len(self.huey), 2)
100 | self.work_on_tasks(consumer, 2)
101 | self.assertEqual(state, ['p1', 'p2'])
102 |
103 | dt = datetime.datetime(2000, 1, 1, 12, 0)
104 | self.schedule_tasks(consumer, dt)
105 | self.assertEqual(len(self.huey), 1)
106 | self.work_on_tasks(consumer, 1)
107 | self.assertEqual(state, ['p1', 'p2', 'p1'])
108 |
109 | task_p1.revoke()
110 | self.schedule_tasks(consumer, dt)
111 | self.assertEqual(len(self.huey), 1) # Enqueued despite being revoked.
112 | self.work_on_tasks(consumer, 1)
113 | self.assertEqual(state, ['p1', 'p2', 'p1']) # No change, not executed.
114 |
115 |
116 | class TestConsumerConfig(BaseTestCase):
117 | def test_default_config(self):
118 | cfg = ConsumerConfig()
119 | cfg.validate()
120 | consumer = self.huey.create_consumer(**cfg.values)
121 | self.assertEqual(consumer.workers, 1)
122 | self.assertEqual(consumer.worker_type, 'thread')
123 | self.assertTrue(consumer.periodic)
124 | self.assertEqual(consumer.default_delay, 0.1)
125 | self.assertEqual(consumer.scheduler_interval, 1)
126 | self.assertTrue(consumer._health_check)
127 |
128 | def test_consumer_config(self):
129 | cfg = ConsumerConfig(workers=3, worker_type='process', initial_delay=1,
130 | backoff=2, max_delay=4, check_worker_health=False,
131 | scheduler_interval=30, periodic=False)
132 | cfg.validate()
133 | consumer = self.huey.create_consumer(**cfg.values)
134 |
135 | self.assertEqual(consumer.workers, 3)
136 | self.assertEqual(consumer.worker_type, 'process')
137 | self.assertFalse(consumer.periodic)
138 | self.assertEqual(consumer.default_delay, 1)
139 | self.assertEqual(consumer.backoff, 2)
140 | self.assertEqual(consumer.max_delay, 4)
141 | self.assertEqual(consumer.scheduler_interval, 30)
142 | self.assertFalse(consumer._health_check)
143 |
144 | def test_invalid_values(self):
145 | def assertInvalid(**kwargs):
146 | cfg = ConsumerConfig(**kwargs)
147 | self.assertRaises(ValueError, cfg.validate)
148 |
149 | assertInvalid(backoff=0.5)
150 | assertInvalid(scheduler_interval=90)
151 | assertInvalid(scheduler_interval=7)
152 | assertInvalid(scheduler_interval=45)
153 |
--------------------------------------------------------------------------------
/huey/tests/test_crontab.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import unittest
3 |
4 | from huey import crontab
5 |
6 |
7 | class TestCrontab(unittest.TestCase):
8 | def test_crontab_month(self):
9 | # validates the following months, 1, 4, 7, 8, 9
10 | valids = [1, 4, 7, 8, 9]
11 | validate_m = crontab(month='1,4,*/6,8-9')
12 |
13 | for x in range(1, 13):
14 | res = validate_m(datetime.datetime(2011, x, 1))
15 | self.assertEqual(res, x in valids)
16 |
17 | def test_crontab_day(self):
18 | # validates the following days
19 | valids = [1, 4, 7, 8, 9, 13, 19, 25, 31]
20 | validate_d = crontab(day='*/6,1,4,8-9')
21 |
22 | for x in range(1, 32):
23 | res = validate_d(datetime.datetime(2011, 1, x))
24 | self.assertEqual(res, x in valids)
25 |
26 | valids = [1, 11, 21, 31]
27 | validate_d = crontab(day='*/10')
28 | for x in range(1, 32):
29 | res = validate_d(datetime.datetime(2011, 1, x))
30 | self.assertEqual(res, x in valids)
31 |
32 | valids.pop() # Remove 31, as feb only has 28 days.
33 | for x in range(1, 29):
34 | res = validate_d(datetime.datetime(2011, 2, x))
35 | self.assertEqual(res, x in valids)
36 |
37 | def test_crontab_hour(self):
38 | # validates the following hours
39 | valids = [0, 1, 4, 6, 8, 9, 12, 18]
40 | validate_h = crontab(hour='8-9,*/6,1,4')
41 |
42 | for x in range(24):
43 | res = validate_h(datetime.datetime(2011, 1, 1, x))
44 | self.assertEqual(res, x in valids)
45 |
46 | edge = crontab(hour=0)
47 | self.assertTrue(edge(datetime.datetime(2011, 1, 1, 0, 0)))
48 | self.assertFalse(edge(datetime.datetime(2011, 1, 1, 12, 0)))
49 |
50 | def test_crontab_minute(self):
51 | # validates the following minutes
52 | valids = [0, 1, 4, 6, 8, 9, 12, 18, 24, 30, 36, 42, 48, 54]
53 | validate_m = crontab(minute='4,8-9,*/6,1')
54 |
55 | for x in range(60):
56 | res = validate_m(datetime.datetime(2011, 1, 1, 1, x))
57 | self.assertEqual(res, x in valids)
58 |
59 | # We don't ensure *every* X minutes, but just on the given intervals.
60 | valids = [0, 16, 32, 48]
61 | validate_m = crontab(minute='*/16')
62 | for x in range(60):
63 | res = validate_m(datetime.datetime(2011, 1, 1, 1, x))
64 | self.assertEqual(res, x in valids)
65 |
66 | def test_crontab_day_of_week(self):
67 | # validates the following days of week
68 | # jan, 1, 2011 is a saturday
69 | valids = [2, 4, 9, 11, 16, 18, 23, 25, 30]
70 | validate_dow = crontab(day_of_week='0,2')
71 |
72 | for x in range(1, 32):
73 | res = validate_dow(datetime.datetime(2011, 1, x))
74 | self.assertEqual(res, x in valids)
75 |
76 | def test_crontab_sunday(self):
77 | for dow in ('0', '7'):
78 | validate = crontab(day_of_week=dow, hour='0', minute='0')
79 | valid = set((2, 9, 16, 23, 30))
80 | for x in range(1, 32):
81 | if x in valid:
82 | self.assertTrue(validate(datetime.datetime(2011, 1, x)))
83 | else:
84 | self.assertFalse(validate(datetime.datetime(2011, 1, x)))
85 |
86 | def test_crontab_all_together(self):
87 | # jan 1, 2011 is a saturday
88 | # may 1, 2011 is a sunday
89 | validate = crontab(
90 | month='1,5',
91 | day='1,4,7',
92 | day_of_week='0,6',
93 | hour='*/4',
94 | minute='1-5,10-15,50'
95 | )
96 |
97 | self.assertTrue(validate(datetime.datetime(2011, 5, 1, 4, 11)))
98 | self.assertTrue(validate(datetime.datetime(2011, 5, 7, 20, 50)))
99 | self.assertTrue(validate(datetime.datetime(2011, 1, 1, 0, 1)))
100 |
101 | # fails validation on month
102 | self.assertFalse(validate(datetime.datetime(2011, 6, 4, 4, 11)))
103 |
104 | # fails validation on day
105 | self.assertFalse(validate(datetime.datetime(2011, 1, 6, 4, 11)))
106 |
107 | # fails validation on day_of_week
108 | self.assertFalse(validate(datetime.datetime(2011, 1, 4, 4, 11)))
109 |
110 | # fails validation on hour
111 | self.assertFalse(validate(datetime.datetime(2011, 1, 1, 1, 11)))
112 |
113 | # fails validation on minute
114 | self.assertFalse(validate(datetime.datetime(2011, 1, 1, 4, 6)))
115 |
116 | def test_invalid_crontabs(self):
117 | # check invalid configurations are detected and reported
118 | self.assertRaises(ValueError, crontab, minute='61')
119 | self.assertRaises(ValueError, crontab, minute='0-61')
120 | self.assertRaises(ValueError, crontab, day_of_week='*/3')
121 |
122 | def test_invalid_crontabs_2(self):
123 | self.assertTrue(crontab(minute='*abc'))
124 | invalid = ('abc', '*abc', 'a-b', '1-c', '0x9')
125 | for i in invalid:
126 | self.assertRaises(ValueError, crontab, minute=i, strict=True)
127 |
--------------------------------------------------------------------------------
/huey/tests/test_helpers.py:
--------------------------------------------------------------------------------
1 | from huey import RedisHuey
2 | from huey.contrib.helpers import RedisSemaphore
3 | from huey.contrib.helpers import lock_task_semaphore
4 | from huey.exceptions import TaskLockedException
5 | from huey.tests.base import BaseTestCase
6 |
7 |
8 | class TestLockTaskSemaphore(BaseTestCase):
9 | def setUp(self):
10 | super(TestLockTaskSemaphore, self).setUp()
11 | self.semaphore = RedisSemaphore(self.huey, 'lock_a', 2)
12 | self.huey.storage.conn.delete(self.semaphore.key)
13 |
14 | def tearDown(self):
15 | self.huey.storage.conn.delete(self.semaphore.key)
16 | super(TestLockTaskSemaphore, self).tearDown()
17 |
18 | def get_huey(self):
19 | return RedisHuey()
20 |
21 | def test_redis_semaphore(self):
22 | s = self.semaphore
23 | aid1 = s.acquire()
24 | self.assertTrue(aid1 is not None)
25 | aid2 = s.acquire()
26 | self.assertTrue(aid2 is not None) # We can acquire it twice.
27 | self.assertTrue(s.acquire() is None) # Cannot acquire 3 times.
28 | self.assertEqual(s.release(aid2), 1) # Release succeeded.
29 | self.assertEqual(s.release(aid2), 0) # Already released.
30 | self.assertEqual(s.acquire(aid2), aid2) # Re-acquired.
31 | self.assertEqual(s.acquire(aid2), aid2) # No-op (still acquired).
32 |
33 | self.assertEqual(s.release(aid2), 1) # Release succeeded.
34 | self.assertEqual(s.release(aid1), 1) # Release succeeded.
35 |
36 | self.assertTrue(s.acquire() is not None) # Acquire twice.
37 | self.assertTrue(s.acquire() is not None)
38 | self.assertTrue(s.acquire() is None) # Cannot acquire 3 times.
39 | self.huey.storage.conn.delete(s.key)
40 |
--------------------------------------------------------------------------------
/huey/tests/test_immediate.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | from huey.api import Huey
4 | from huey.api import MemoryHuey
5 | from huey.exceptions import TaskException
6 | from huey.storage import BlackHoleStorage
7 | from huey.tests.base import BaseTestCase
8 |
9 |
10 | class TestImmediate(BaseTestCase):
11 | def get_huey(self):
12 | return MemoryHuey(immediate=True, utc=False)
13 |
14 | def test_immediate(self):
15 | @self.huey.task()
16 | def task_a(n):
17 | return n + 1
18 |
19 | r = task_a(3)
20 |
21 | # Task is not enqueued, but the result *is* stored in the result-store.
22 | self.assertEqual(len(self.huey), 0)
23 | self.assertEqual(self.huey.result_count(), 1)
24 | self.assertEqual(r.get(), 4)
25 |
26 | # After reading, result is removed, as we would expect.
27 | self.assertEqual(self.huey.result_count(), 0)
28 |
29 | # Cannot add 1 to "None", this produces an error. We get the usual
30 | # TaskException, which wraps the TypeError.
31 | r_err = task_a(None)
32 | self.assertRaises(TaskException, r_err.get)
33 |
34 | def test_immediate_pipeline(self):
35 | @self.huey.task()
36 | def add(a, b):
37 | return a + b
38 |
39 | p = add.s(3, 4).then(add, 5).then(add, 6).then(add, 7)
40 | result_group = self.huey.enqueue(p)
41 | self.assertEqual(result_group(), [7, 12, 18, 25])
42 |
43 | def test_immediate_scheduling(self):
44 | @self.huey.task()
45 | def task_a(n):
46 | return n + 1
47 |
48 | r = task_a.schedule((3,), delay=10)
49 |
50 | # Task is not enqueued, no result is generated, the task is added to
51 | # the schedule, however -- even though the scheduler never runs in
52 | # immediate mode.
53 | self.assertEqual(len(self.huey), 0)
54 | self.assertEqual(self.huey.result_count(), 0)
55 | self.assertEqual(self.huey.scheduled_count(), 1)
56 | self.assertTrue(r.get() is None)
57 |
58 | def test_immediate_reschedule(self):
59 | state = []
60 |
61 | @self.huey.task(context=True)
62 | def task_s(task=None):
63 | state.append(task.id)
64 | return 1
65 |
66 | r = task_s.schedule(delay=60)
67 | self.assertEqual(len(self.huey), 0)
68 | self.assertTrue(r() is None)
69 |
70 | r2 = r.reschedule()
71 | self.assertTrue(r.id != r2.id)
72 | self.assertEqual(state, [r2.id])
73 | self.assertEqual(r2(), 1)
74 | self.assertEqual(len(self.huey), 0)
75 | self.assertTrue(r.is_revoked())
76 |
77 | # Because the scheduler never picks up the original task (r), its
78 | # revocation key sits in the result store and the task is in the
79 | # schedule still.
80 | self.assertEqual(self.huey.result_count(), 1)
81 | self.assertEqual(self.huey.scheduled_count(), 1)
82 |
83 | def test_immediate_revoke_restore(self):
84 | @self.huey.task()
85 | def task_a(n):
86 | return n + 1
87 |
88 | task_a.revoke()
89 | r = task_a(3)
90 | self.assertEqual(len(self.huey), 0)
91 | self.assertTrue(r.get() is None)
92 |
93 | self.assertTrue(task_a.restore())
94 | r = task_a(4)
95 | self.assertEqual(r.get(), 5)
96 |
97 | def test_swap_immediate(self):
98 | @self.huey.task()
99 | def task_a(n):
100 | return n + 1
101 |
102 | r = task_a(1)
103 | self.assertEqual(r.get(), 2)
104 |
105 | self.huey.immediate = False
106 | r = task_a(2)
107 | self.assertEqual(len(self.huey), 1)
108 | self.assertEqual(self.huey.result_count(), 0)
109 | task = self.huey.dequeue()
110 | self.assertEqual(self.huey.execute(task), 3)
111 | self.assertEqual(r.get(), 3)
112 |
113 | self.huey.immediate = True
114 | r = task_a(3)
115 | self.assertEqual(r.get(), 4)
116 | self.assertEqual(len(self.huey), 0)
117 | self.assertEqual(self.huey.result_count(), 0)
118 |
119 | def test_map(self):
120 | @self.huey.task()
121 | def task_a(n):
122 | return n + 1
123 |
124 | result_group = task_a.map(range(8))
125 | self.assertEqual(result_group(), [1, 2, 3, 4, 5, 6, 7, 8])
126 |
127 |
128 | class NoUseException(Exception): pass
129 | class NoUseStorage(BlackHoleStorage):
130 | def enqueue(self, data, priority=None): raise NoUseException()
131 | def dequeue(self): raise NoUseException()
132 | def add_to_schedule(self, data, ts, utc): raise NoUseException()
133 | def read_schedule(self, ts): raise NoUseException()
134 | def put_data(self, key, value): raise NoUseException()
135 | def peek_data(self, key): raise NoUseException()
136 | def pop_data(self, key): raise NoUseException()
137 | def has_data_for_key(self, key): raise NoUseException()
138 | def put_if_empty(self, key, value): raise NoUseException()
139 | class NoUseHuey(Huey):
140 | def get_storage(self, **storage_kwargs):
141 | return NoUseStorage()
142 |
143 |
144 | class TestImmediateMemoryStorage(BaseTestCase):
145 | def get_huey(self):
146 | return NoUseHuey(utc=False)
147 |
148 | def test_immediate_storage(self):
149 | @self.huey.task()
150 | def task_a(n):
151 | return n + 1
152 |
153 | self.huey.immediate = True
154 |
155 | # If any operation happens to touch the "real" storage engine, an
156 | # exception will be raised. These tests validate that immediate mode
157 | # doesn't accidentally interact with the live storage.
158 | res = task_a(2)
159 | self.assertEqual(res(), 3)
160 |
161 | task_a.revoke()
162 | res = task_a(3)
163 | self.assertTrue(res() is None)
164 | self.assertTrue(task_a.restore())
165 |
166 | res = task_a(4)
167 | self.assertEqual(res(), 5)
168 |
169 | eta = datetime.datetime.now() + datetime.timedelta(seconds=60)
170 | res = task_a.schedule((5,), eta=eta)
171 | self.assertTrue(res() is None)
172 |
173 | minus_1 = eta - datetime.timedelta(seconds=1)
174 | self.assertEqual(self.huey.read_schedule(minus_1), [])
175 |
176 | tasks = self.huey.read_schedule(eta)
177 | self.assertEqual([t.id for t in tasks], [res.id])
178 | self.assertTrue(res() is None)
179 |
180 | # Switch back to regular storage / non-immediate mode.
181 | self.huey.immediate = False
182 | self.assertRaises(NoUseException, task_a, 1)
183 |
184 | # Switch back to immediate mode.
185 | self.huey.immediate = True
186 | res = task_a(10)
187 | self.assertEqual(res(), 11)
188 |
189 | def test_immediate_real_storage(self):
190 | self.huey.immediate_use_memory = False
191 |
192 | @self.huey.task()
193 | def task_a(n):
194 | return n + 1
195 |
196 | self.huey.immediate = True
197 | self.assertRaises(NoUseException, task_a, 1)
198 |
199 | self.huey.immediate = False
200 | self.assertRaises(NoUseException, task_a, 2)
201 |
--------------------------------------------------------------------------------
/huey/tests/test_kt_huey.py:
--------------------------------------------------------------------------------
1 | import os
2 | import subprocess as sp
3 | import unittest
4 |
5 | try:
6 | import ukt
7 | except ImportError:
8 | ukt = None
9 |
10 | try:
11 | from huey.contrib.kyototycoon import KyotoTycoonHuey
12 | from huey.contrib.kyototycoon import KyotoTycoonStorage
13 | except ImportError:
14 | if ukt is not None:
15 | raise
16 |
17 | from huey.tests.base import BaseTestCase
18 | from huey.tests.test_storage import StorageTests
19 |
20 | has_ktserver = sp.call(['which', 'ktserver'], stdout=sp.PIPE) == 0
21 |
22 |
23 | @unittest.skipIf(ukt is None, 'requires ukt')
24 | @unittest.skipIf(not has_ktserver, 'kyototycoon server not installed')
25 | class TestKyotoTycoonHuey(StorageTests, BaseTestCase):
26 | @classmethod
27 | def setUpClass(cls):
28 | lua_path = os.path.join(os.path.dirname(__file__), 'scripts/')
29 | lua_script = os.path.join(lua_path, 'kt.lua')
30 | cls._server = ukt.EmbeddedServer(database='%', serializer=ukt.KT_NONE,
31 | server_args=['-scr', lua_script])
32 | cls._server.run()
33 | cls.db = cls._server.client
34 |
35 | @classmethod
36 | def tearDownClass(cls):
37 | if cls._server is not None:
38 | cls._server.stop()
39 | cls.db.close_all()
40 | cls.db = None
41 |
42 | def tearDown(self):
43 | if self.db is not None:
44 | self.db.clear()
45 |
46 | def get_huey(self):
47 | return KyotoTycoonHuey(client=self.db, utc=False)
48 |
49 | def test_expire_results(self):
50 | huey = KyotoTycoonHuey(client=self.db, utc=False,
51 | result_expire_time=3600)
52 | s = huey.storage
53 |
54 | s.put_data(b'k1', b'v1')
55 | s.put_data(b'k2', b'v2', is_result=True)
56 | self.assertEqual(s.pop_data(b'k1'), b'v1')
57 | self.assertEqual(s.pop_data(b'k2'), b'v2')
58 |
59 | self.assertTrue(s.has_data_for_key(b'k2'))
60 | self.assertFalse(s.put_if_empty(b'k2', b'v2-x'))
61 | self.assertFalse(s.has_data_for_key(b'k3'))
62 | self.assertTrue(s.put_if_empty(b'k3', b'v3'))
63 |
64 | self.assertTrue(s.delete_data(b'k2'))
65 | self.assertFalse(s.delete_data(b'k2'))
66 | self.assertEqual(s.result_items(), {'k1': b'v1', 'k3': b'v3'})
67 | self.assertEqual(s.result_store_size(), 2)
68 |
--------------------------------------------------------------------------------
/huey/tests/test_priority.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | from huey.api import MemoryHuey
4 | from huey.api import crontab
5 | from huey.exceptions import TaskException
6 | from huey.tests.base import BaseTestCase
7 |
8 |
9 | class TestPriority(BaseTestCase):
10 | def setUp(self):
11 | super(TestPriority, self).setUp()
12 |
13 | self.state = []
14 |
15 | def task(n):
16 | self.state.append(n)
17 | return n
18 |
19 | self.task_1 = self.huey.task(priority=1, name='task_1')(task)
20 | self.task_2 = self.huey.task(priority=2, name='task_2')(task)
21 | self.task_0 = self.huey.task(name='task_0')(task)
22 |
23 | def tearDown(self):
24 | super(TestPriority, self).tearDown()
25 | self.task_1 = self.task_2 = self.task_0 = None
26 |
27 | def test_priority_simple(self):
28 | self.task_0(0)
29 | self.task_1(10)
30 | self.task_2(100)
31 |
32 | self.task_0(2)
33 | self.task_1(12)
34 | self.task_2(120)
35 | self.assertEqual(len(self.huey), 6)
36 |
37 | # First the task_2 invocations, then the task_1, then the task_0.
38 | results = [100, 120, 10, 12, 0, 2]
39 | for result in results:
40 | self.assertEqual(self.execute_next(), result)
41 |
42 | self.assertEqual(len(self.huey), 0)
43 | self.assertEqual(self.state, results)
44 |
45 | def test_priority_override(self):
46 | r0_0 = self.task_0(0)
47 | r1_0 = self.task_1(10)
48 | r2_0 = self.task_2(100)
49 |
50 | r0_1 = self.task_0(1, priority=2)
51 | r1_1 = self.task_1(11, priority=0)
52 | r2_1 = self.task_2(110, priority=1)
53 |
54 | r0_2 = self.task_0(2, priority=1)
55 | r1_2 = self.task_1(12, priority=2)
56 | r2_2 = self.task_2(120, priority=0)
57 |
58 | results = [100, 1, 12, 10, 110, 2, 0, 11, 120]
59 | for result in results:
60 | self.assertEqual(self.execute_next(), result)
61 |
62 | self.assertEqual(len(self.huey), 0)
63 | self.assertEqual(self.state, results)
64 |
65 | r0_3 = self.task_0(3)
66 | r1_3 = self.task_1(13)
67 | r2_3 = self.task_2(130)
68 | rx = self.task_0(9, priority=9)
69 | results.extend((9, 130, 13, 3))
70 | for result in results[-4:]:
71 | self.assertEqual(self.execute_next(), result)
72 |
73 | self.assertEqual(len(self.huey), 0)
74 | self.assertEqual(self.state, results)
75 |
76 | def test_schedule_priority(self):
77 | eta = datetime.datetime.now() + datetime.timedelta(seconds=60)
78 | r0_0 = self.task_0.schedule((0,), eta=eta)
79 | r1_0 = self.task_1.schedule((10,), eta=eta)
80 | r2_0 = self.task_2.schedule((100,), eta=eta)
81 |
82 | r0_1 = self.task_0.schedule((1,), eta=eta, priority=2)
83 | r1_1 = self.task_1.schedule((11,), eta=eta, priority=0)
84 | r2_1 = self.task_2.schedule((110,), eta=eta, priority=1)
85 |
86 | expected = {
87 | r0_0.id: None,
88 | r1_0.id: 1,
89 | r2_0.id: 2,
90 | r0_1.id: 2,
91 | r1_1.id: 0,
92 | r2_1.id: 1}
93 |
94 | for _ in range(6):
95 | self.assertTrue(self.execute_next() is None)
96 |
97 | # Priorities are preserved when added to the schedule.
98 | priorities = dict((t.id, t.priority) for t in self.huey.scheduled())
99 | self.assertEqual(priorities, expected)
100 |
101 | # Priorities are preserved when read from the schedule.
102 | items = self.huey.read_schedule(timestamp=eta)
103 | priorities = dict((t.id, t.priority) for t in items)
104 | self.assertEqual(priorities, expected)
105 |
106 | def test_periodic_priority(self):
107 | @self.huey.periodic_task(crontab(), priority=3, name='ptask')
108 | def task_p():
109 | pass
110 |
111 | self.task_0(0)
112 | self.task_1(10)
113 | self.task_2(100)
114 |
115 | for task in self.huey.read_periodic(datetime.datetime.now()):
116 | self.huey.enqueue(task)
117 |
118 | # Our periodic task has a higher priority than the other tasks in the
119 | # queue, and will be executed first.
120 | self.assertEqual(len(self.huey), 4)
121 | ptask = self.huey.dequeue()
122 | self.assertEqual(ptask.name, 'ptask') # Verify is our periodic task.
123 | self.assertEqual(ptask.priority, 3) # Priority is preserved.
124 |
125 | def test_priority_retry(self):
126 | @self.huey.task(priority=3, retries=1)
127 | def task_3(n):
128 | raise ValueError('uh-oh')
129 |
130 | self.task_0(0)
131 | self.task_1(10)
132 | r2 = self.task_2(100)
133 | r3 = task_3(3)
134 |
135 | self.assertEqual(len(self.huey), 4)
136 | task = self.huey.dequeue()
137 | self.assertEqual(task.id, r3.id)
138 | self.assertEqual(task.priority, 3)
139 | self.assertEqual(task.retries, 1)
140 | self.assertTrue(self.huey.execute(task) is None)
141 | self.assertRaises(TaskException, r3.get)
142 |
143 | # Task has been re-enqueued for retry. Verify priority is preserved.
144 | self.assertEqual(len(self.huey), 4)
145 | rtask = self.huey.dequeue()
146 | self.assertEqual(rtask.id, r3.id)
147 | self.assertEqual(rtask.priority, 3)
148 | self.assertEqual(rtask.retries, 0)
149 | self.assertTrue(self.huey.execute(rtask) is None)
150 |
151 | # No more retries, now we'll get our task_2.
152 | self.assertEqual(len(self.huey), 3)
153 | task = self.huey.dequeue()
154 | self.assertEqual(task.id, r2.id)
155 |
--------------------------------------------------------------------------------
/huey/tests/test_registry.py:
--------------------------------------------------------------------------------
1 | import pickle
2 |
3 | from huey.exceptions import HueyException
4 | from huey.tests.base import BaseTestCase
5 |
6 |
7 | class TestRegistry(BaseTestCase):
8 | def setUp(self):
9 | super(TestRegistry, self).setUp()
10 | self.registry = self.huey._registry
11 |
12 | def test_register_unique(self):
13 | def task_a(): pass
14 | def task_b(): pass
15 |
16 | ta = self.huey.task()(task_a)
17 | self.assertRaises(ValueError, self.huey.task(), task_a)
18 | self.assertRaises(ValueError, self.huey.task(name='task_a'), task_b)
19 |
20 | # We can register task_b and re-register task_a providing a new name.
21 | tb = self.huey.task()(task_b)
22 | ta2 = self.huey.task(name='task_a2')(task_a)
23 |
24 | t1 = ta.s()
25 | t2 = ta2.s()
26 | self.assertTrue(t1.name != t2.name)
27 |
28 | def test_register_unregister(self):
29 | @self.huey.task()
30 | def task_a():
31 | pass
32 |
33 | self.assertTrue(task_a.unregister())
34 | self.assertFalse(task_a.unregister())
35 |
36 | def test_message_wrapping(self):
37 | @self.huey.task(retries=1)
38 | def task_a(p1, p2, p3=3, p4=None):
39 | pass
40 |
41 | task = task_a.s('v1', 'v2', p4='v4')
42 | message = self.registry.create_message(task)
43 | self.assertEqual(message.id, task.id)
44 | self.assertEqual(message.retries, 1)
45 | self.assertEqual(message.retry_delay, 0)
46 | self.assertEqual(message.args, ('v1', 'v2'))
47 | self.assertEqual(message.kwargs, {'p4': 'v4'})
48 | self.assertTrue(message.on_complete is None)
49 | self.assertTrue(message.on_error is None)
50 | self.assertTrue(message.expires is None)
51 | self.assertTrue(message.expires_resolved is None)
52 |
53 | task2 = self.registry.create_task(message)
54 | self.assertEqual(task2.id, task.id)
55 | self.assertEqual(task2.retries, 1)
56 | self.assertEqual(task2.retry_delay, 0)
57 | self.assertEqual(task2.args, ('v1', 'v2'))
58 | self.assertEqual(task2.kwargs, {'p4': 'v4'})
59 | self.assertTrue(task2.on_complete is None)
60 | self.assertTrue(task2.on_error is None)
61 | self.assertTrue(task2.expires is None)
62 | self.assertTrue(task2.expires_resolved is None)
63 |
64 | def test_missing_task(self):
65 | @self.huey.task()
66 | def task_a():
67 | pass
68 |
69 | # Serialize the task invocation.
70 | task = task_a.s()
71 | message = self.registry.create_message(task)
72 |
73 | # Unregister the task, which will raise an error when we try to
74 | # deserialize the message back into a task instance.
75 | self.assertTrue(task_a.unregister())
76 | self.assertRaises(HueyException, self.registry.create_task, message)
77 |
78 | # Similarly, we can no longer serialize the task to a message.
79 | self.assertRaises(HueyException, self.registry.create_message, task)
80 |
81 | def test_periodic_tasks(self):
82 | def task_fn(): pass
83 | self.huey.task(name='a')(task_fn)
84 | p1 = self.huey.periodic_task(lambda _: False, name='p1')(task_fn)
85 | p2 = self.huey.periodic_task(lambda _: False, name='p2')(task_fn)
86 | self.huey.task(name='b')(task_fn)
87 |
88 | periodic = sorted(t.name for t in self.registry.periodic_tasks)
89 | self.assertEqual(periodic, ['p1', 'p2'])
90 |
91 | self.assertTrue(p1.unregister())
92 | periodic = sorted(t.name for t in self.registry.periodic_tasks)
93 | self.assertEqual(periodic, ['p2'])
94 |
95 | def test_huey1_compat(self):
96 | @self.huey.task()
97 | def task_a(n):
98 | return n + 1
99 |
100 | t = task_a.s(2)
101 |
102 | # Enqueue a message using the old message serialization format.
103 | tc = task_a.task_class
104 | old_message = (t.id, '%s.%s' % (tc.__module__, tc.__name__), None, 0,
105 | 0, ((2,), {}), None)
106 | self.huey.storage.enqueue(pickle.dumps(old_message))
107 |
108 | self.assertEqual(len(self.huey), 1)
109 | self.assertEqual(self.execute_next(), 3)
110 | self.assertEqual(self.huey.result(t.id), 3)
111 |
--------------------------------------------------------------------------------
/huey/tests/test_serializer.py:
--------------------------------------------------------------------------------
1 | try:
2 | import gzip
3 | except ImportError:
4 | gzip = None
5 | import unittest
6 | try:
7 | import zlib
8 | except ImportError:
9 | zlib = None
10 |
11 | from huey.serializer import Serializer
12 | from huey.tests.base import BaseTestCase
13 |
14 |
15 | class TestSerializer(BaseTestCase):
16 | data = [
17 | None,
18 | 0, 1,
19 | b'a' * 1024,
20 | ['k1', 'k2', 'k3'],
21 | {'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}]
22 |
23 | def _test_serializer(self, s):
24 | for item in self.data:
25 | self.assertEqual(s.deserialize(s.serialize(item)), item)
26 |
27 | def test_serializer(self):
28 | self._test_serializer(Serializer())
29 |
30 | @unittest.skipIf(gzip is None, 'gzip module not installed')
31 | def test_serializer_gzip(self):
32 | self._test_serializer(Serializer(compression=True))
33 |
34 | @unittest.skipIf(zlib is None, 'zlib module not installed')
35 | def test_serializer_zlib(self):
36 | self._test_serializer(Serializer(compression=True, use_zlib=True))
37 |
38 | @unittest.skipIf(zlib is None, 'zlib module not installed')
39 | @unittest.skipIf(gzip is None, 'gzip module not installed')
40 | def test_mismatched_compression(self):
41 | for use_zlib in (False, True):
42 | s = Serializer()
43 | scomp = Serializer(compression=True, use_zlib=use_zlib)
44 | for item in self.data:
45 | self.assertEqual(scomp.deserialize(s.serialize(item)), item)
46 |
--------------------------------------------------------------------------------
/huey/tests/test_signals.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | from huey.signals import *
4 | from huey.tests.base import BaseTestCase
5 |
6 |
7 | class TestSignals(BaseTestCase):
8 | def setUp(self):
9 | super(TestSignals, self).setUp()
10 | self._state = []
11 |
12 | @self.huey.signal()
13 | def signal_handle(signal, task, *args):
14 | self._state.append((signal, task, args))
15 |
16 | def assertSignals(self, expected):
17 | self.assertEqual([s[0] for s in self._state], expected)
18 | self._state = []
19 |
20 | def test_signals_simple(self):
21 | @self.huey.task()
22 | def task_a(n):
23 | return n + 1
24 |
25 | r = task_a(3)
26 | self.assertSignals([SIGNAL_ENQUEUED])
27 | self.assertEqual(self.execute_next(), 4)
28 | self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
29 |
30 | r = task_a.schedule((2,), delay=60)
31 | self.assertSignals([SIGNAL_ENQUEUED])
32 | self.assertTrue(self.execute_next() is None)
33 | self.assertSignals([SIGNAL_SCHEDULED])
34 |
35 | r = task_a(None)
36 | self.assertSignals([SIGNAL_ENQUEUED])
37 | self.assertTrue(self.execute_next() is None)
38 | self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR])
39 |
40 | def test_signal_complete_result_ready(self):
41 | @self.huey.task()
42 | def task_a(n):
43 | return n + 1
44 |
45 | results = []
46 |
47 | @self.huey.signal(SIGNAL_COMPLETE)
48 | def on_complete(sig, task, *_):
49 | results.append(self.huey.result(task.id))
50 |
51 | r = task_a(2)
52 | self.assertEqual(self.execute_next(), 3)
53 | self.assertEqual(results, [3])
54 |
55 | def test_signals_on_retry(self):
56 | @self.huey.task(retries=1)
57 | def task_a(n):
58 | return n + 1
59 |
60 | r = task_a(None)
61 | self.assertSignals([SIGNAL_ENQUEUED])
62 | self.assertTrue(self.execute_next() is None)
63 | self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR, SIGNAL_RETRYING,
64 | SIGNAL_ENQUEUED])
65 | self.assertTrue(self.execute_next() is None)
66 | self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR])
67 |
68 | @self.huey.task(retries=1, retry_delay=60)
69 | def task_b(n):
70 | return n + 1
71 |
72 | r = task_b(None)
73 | self.assertSignals([SIGNAL_ENQUEUED])
74 | self.assertTrue(self.execute_next() is None)
75 | self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR, SIGNAL_RETRYING,
76 | SIGNAL_SCHEDULED])
77 |
78 | def test_signals_revoked(self):
79 | @self.huey.task()
80 | def task_a(n):
81 | return n + 1
82 |
83 | task_a.revoke(revoke_once=True)
84 | r = task_a(2)
85 | self.assertSignals([SIGNAL_ENQUEUED])
86 | self.assertTrue(self.execute_next() is None)
87 | self.assertSignals([SIGNAL_REVOKED])
88 |
89 | r = task_a(3)
90 | self.assertEqual(self.execute_next(), 4)
91 | self.assertSignals([SIGNAL_ENQUEUED, SIGNAL_EXECUTING,
92 | SIGNAL_COMPLETE])
93 |
94 | def test_signals_locked(self):
95 | @self.huey.task()
96 | @self.huey.lock_task('lock-a')
97 | def task_a(n):
98 | return n + 1
99 |
100 | r = task_a(1)
101 | self.assertSignals([SIGNAL_ENQUEUED])
102 | self.assertEqual(self.execute_next(), 2)
103 | self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
104 |
105 | with self.huey.lock_task('lock-a'):
106 | r = task_a(2)
107 | self.assertSignals([SIGNAL_ENQUEUED])
108 | self.assertTrue(self.execute_next() is None)
109 | self.assertSignals([SIGNAL_EXECUTING, SIGNAL_LOCKED])
110 |
111 | def test_signal_expired(self):
112 | @self.huey.task(expires=10)
113 | def task_a(n):
114 | return n + 1
115 |
116 | now = datetime.datetime.now()
117 | expires = now + datetime.timedelta(seconds=15)
118 | r = task_a(2)
119 | self.assertSignals([SIGNAL_ENQUEUED])
120 | self.assertTrue(self.execute_next(expires) is None)
121 | self.assertSignals([SIGNAL_EXPIRED])
122 |
123 | r = task_a(3)
124 | self.assertSignals([SIGNAL_ENQUEUED])
125 | self.assertTrue(self.execute_next(), 4)
126 | self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
127 |
128 | def test_specific_handler(self):
129 | extra_state = []
130 |
131 | @self.huey.signal(SIGNAL_EXECUTING)
132 | def extra_handler(signal, task):
133 | extra_state.append(task.args[0])
134 |
135 | @self.huey.task()
136 | def task_a(n):
137 | return n + 1
138 |
139 | r = task_a(3)
140 | self.assertEqual(extra_state, [])
141 | self.assertEqual(self.execute_next(), 4)
142 | self.assertEqual(extra_state, [3])
143 | self.assertSignals([SIGNAL_ENQUEUED, SIGNAL_EXECUTING,
144 | SIGNAL_COMPLETE])
145 |
146 | r2 = task_a(1)
147 | self.assertEqual(self.execute_next(), 2)
148 | self.assertEqual(extra_state, [3, 1])
149 | self.assertSignals([SIGNAL_ENQUEUED, SIGNAL_EXECUTING,
150 | SIGNAL_COMPLETE])
151 |
152 | self.huey.disconnect_signal(extra_handler, SIGNAL_EXECUTING)
153 | r3 = task_a(2)
154 | self.assertEqual(self.execute_next(), 3)
155 | self.assertEqual(extra_state, [3, 1])
156 | self.assertSignals([SIGNAL_ENQUEUED, SIGNAL_EXECUTING,
157 | SIGNAL_COMPLETE])
158 |
159 | def test_multi_handlers(self):
160 | state1 = []
161 | state2 = []
162 |
163 | @self.huey.signal(SIGNAL_EXECUTING, SIGNAL_COMPLETE)
164 | def handler1(signal, task):
165 | state1.append(signal)
166 |
167 | @self.huey.signal(SIGNAL_EXECUTING, SIGNAL_COMPLETE)
168 | def handler2(signal, task):
169 | state2.append(signal)
170 |
171 | @self.huey.task()
172 | def task_a(n):
173 | return n + 1
174 |
175 | r = task_a(1)
176 | self.assertEqual(self.execute_next(), 2)
177 | self.assertEqual(state1, ['executing', 'complete'])
178 | self.assertEqual(state2, ['executing', 'complete'])
179 |
180 | self.huey.disconnect_signal(handler1, SIGNAL_COMPLETE)
181 | self.huey.disconnect_signal(handler2)
182 |
183 | r2 = task_a(2)
184 | self.assertEqual(self.execute_next(), 3)
185 | self.assertEqual(state1, ['executing', 'complete', 'executing'])
186 | self.assertEqual(state2, ['executing', 'complete'])
187 |
--------------------------------------------------------------------------------
/huey/tests/test_sql_huey.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 |
4 | try:
5 | import peewee
6 | except ImportError:
7 | peewee = None
8 |
9 | try:
10 | from huey.contrib.sql_huey import SqlHuey
11 | from huey.contrib.sql_huey import SqlStorage
12 | except ImportError:
13 | if peewee is not None:
14 | raise
15 | from huey.tests.base import BaseTestCase
16 | from huey.tests.test_storage import StorageTests
17 |
18 |
19 | SQLHUEY_URL = os.environ.get('SQLHUEY_URL') or 'sqlite:////tmp/huey-sqlite.db'
20 |
21 |
22 | @unittest.skipIf(peewee is None, 'requires peewee')
23 | class TestSqlStorage(StorageTests, BaseTestCase):
24 | db_file = '/tmp/huey-sqlite.db'
25 |
26 | def setUp(self):
27 | if os.path.exists(self.db_file):
28 | os.unlink(self.db_file)
29 | super(TestSqlStorage, self).setUp()
30 |
31 | def tearDown(self):
32 | super(TestSqlStorage, self).tearDown()
33 | self.huey.storage.drop_tables()
34 |
35 | @classmethod
36 | def tearDownClass(cls):
37 | super(TestSqlStorage, cls).tearDownClass()
38 | if os.path.exists(cls.db_file):
39 | os.unlink(cls.db_file)
40 |
41 | def get_huey(self):
42 | return SqlHuey(database=SQLHUEY_URL, utc=False)
43 |
44 | def test_sql_huey_basic(self):
45 | @self.huey.task()
46 | def task_a(n):
47 | return n + 1
48 |
49 | r1 = task_a(1)
50 | r2 = task_a(2)
51 | self.assertEqual(self.execute_next(), 2)
52 | self.assertEqual(len(self.huey), 1)
53 | self.assertEqual(self.huey.result_count(), 1)
54 | r2.revoke()
55 | self.assertEqual(self.huey.result_count(), 2)
56 |
57 | self.assertTrue(self.execute_next() is None)
58 | self.assertEqual(len(self.huey), 0)
59 | self.assertEqual(self.huey.result_count(), 1)
60 |
61 | r3 = task_a.schedule((3,), delay=10)
62 | self.assertEqual(len(self.huey), 1)
63 | self.assertTrue(self.execute_next() is None)
64 | self.assertEqual(self.huey.scheduled_count(), 1)
65 | self.assertEqual(len(self.huey), 0)
66 | self.assertEqual(self.huey.result_count(), 1)
67 |
68 | tasks = self.huey.read_schedule(r3.task.eta)
69 | self.assertEqual(len(tasks), 1)
70 | self.assertEqual(tasks[0].id, r3.id)
71 |
72 | def test_sql_huey_priority(self):
73 | @self.huey.task()
74 | def task_a(n):
75 | return n
76 |
77 | @self.huey.task(priority=1)
78 | def task_b(n):
79 | return n * 10
80 |
81 | task_a(1)
82 | task_b(2)
83 | task_a(3, priority=2)
84 | task_b(4, priority=2)
85 | task_a(5, priority=1)
86 | task_b(6, priority=0)
87 | task_a(7)
88 | task_b(8)
89 |
90 | results = [3, 40, 20, 5, 80, 1, 60, 7]
91 | for result in results:
92 | self.assertEqual(self.execute_next(), result)
93 |
94 | self.assertEqual(len(self.huey), 0)
95 |
--------------------------------------------------------------------------------
/huey/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import os
3 | import time
4 | import unittest
5 |
6 | from huey.utils import UTC
7 | from huey.utils import normalize_time
8 | from huey.utils import reraise_as
9 |
10 |
11 | class MyException(Exception): pass
12 |
13 |
14 | class TestReraiseAs(unittest.TestCase):
15 | def test_wrap_exception(self):
16 | def raise_keyerror():
17 | try:
18 | {}['huey']
19 | except KeyError as exc:
20 | reraise_as(MyException)
21 |
22 | self.assertRaises(MyException, raise_keyerror)
23 | try:
24 | raise_keyerror()
25 | except MyException as exc:
26 | self.assertEqual(str(exc), "KeyError: 'huey'")
27 | else:
28 | raise AssertionError('MyException not raised as expected.')
29 |
30 |
31 | class FakePacific(datetime.tzinfo):
32 | def utcoffset(self, dt):
33 | return datetime.timedelta(hours=-8)
34 | def tzname(self, dt):
35 | return 'US/Pacific'
36 | def dst(self, dt):
37 | return datetime.timedelta(0)
38 |
39 |
40 | class TestNormalizeTime(unittest.TestCase):
41 | def setUp(self):
42 | self._orig_tz = os.environ.get('TZ')
43 | os.environ['TZ'] = 'US/Pacific'
44 | time.tzset()
45 |
46 | def tearDown(self):
47 | del os.environ['TZ']
48 | if self._orig_tz:
49 | os.environ['TZ'] = self._orig_tz
50 | time.tzset()
51 |
52 | def test_normalize_time(self):
53 | ts_local = datetime.datetime(2000, 1, 1, 12, 0, 0) # Noon on Jan 1.
54 | ts_utc = ts_local + datetime.timedelta(hours=8) # For fake tz.
55 | ts_inv = ts_local - datetime.timedelta(hours=8)
56 |
57 | # Naive datetime.
58 |
59 | # No conversion is applied, as we treat everything as local time.
60 | self.assertEqual(normalize_time(ts_local, utc=False), ts_local)
61 |
62 | # So we provided a naive timestamp from the localtime (us/pacific),
63 | # which is 8 hours behind UTC in January.
64 | self.assertEqual(normalize_time(ts_local, utc=True), ts_utc)
65 |
66 | # TZ-aware datetime in local timezone (Fake US/Pacific).
67 |
68 | # Here we provide a tz-aware timestamp from the localtime (us/pacific).
69 | ts = datetime.datetime(2000, 1, 1, 12, 0, 0, tzinfo=FakePacific())
70 |
71 | # No conversion, treated as local time.
72 | self.assertEqual(normalize_time(ts, utc=False), ts_local)
73 |
74 | # Converted to UTC according to rules from our fake tzinfo, +8 hours.
75 | self.assertEqual(normalize_time(ts, utc=True), ts_utc)
76 |
77 | # TZ-aware datetime in UTC timezone.
78 |
79 | # Here we provide a tz-aware timestamp using UTC timezone.
80 | ts = datetime.datetime(2000, 1, 1, 12, 0, 0, tzinfo=UTC())
81 |
82 | # Since we're specifying utc=False, we are dealing with localtimes
83 | # internally. The timestamp passed in is a tz-aware timestamp in UTC.
84 | # To convert to a naive localtime, we subtract 8 hours (since UTC is
85 | # 8 hours ahead of our local time).
86 | self.assertEqual(normalize_time(ts, utc=False), ts_inv)
87 |
88 | # When utc=True there's no change, since the timestamp is already UTC.
89 | self.assertEqual(normalize_time(ts, utc=True), ts_local)
90 |
--------------------------------------------------------------------------------
/huey/tests/test_wrappers.py:
--------------------------------------------------------------------------------
1 | from functools import partial
2 |
3 | from huey import api
4 | from huey import storage
5 | from huey.tests.base import BaseTestCase
6 |
7 |
8 | class TestWrappers(BaseTestCase):
9 | def test_wrappers(self):
10 | wrappers = {
11 | api.BlackHoleHuey: storage.BlackHoleStorage,
12 | api.MemoryHuey: storage.MemoryStorage,
13 | api.RedisExpireHuey: storage.RedisExpireStorage,
14 | api.RedisHuey: storage.RedisStorage,
15 | api.SqliteHuey: storage.SqliteStorage,
16 | }
17 | for huey_wrapper, storage_class in wrappers.items():
18 | h = huey_wrapper('testhuey')
19 | self.assertEqual(h.name, 'testhuey')
20 | self.assertEqual(h.storage.name, 'testhuey')
21 | self.assertTrue(isinstance(h.storage, storage_class))
22 |
23 | def test_fake_wrapper(self):
24 | # This is kind-of a silly test, as we're essentially just testing
25 | # functools.partial(), but let's just make sure that parameters are
26 | # getting passed to the storage correctly - and that the storage is
27 | # initialized correctly.
28 | class BogusStorage(storage.BlackHoleStorage):
29 | def __init__(self, name, host='127.0.0.1', port=None, db=None):
30 | super(BogusStorage, self).__init__(name)
31 | self.host = host
32 | self.port = port
33 | self.db = db
34 |
35 | BH = partial(api.Huey, storage_class=BogusStorage, port=1337)
36 |
37 | bh = BH('test')
38 | self.assertEqual(bh.name, 'test')
39 | self.assertEqual(bh.storage.name, 'test')
40 | self.assertTrue(isinstance(bh.storage, BogusStorage))
41 | self.assertEqual(bh.storage.host, '127.0.0.1')
42 | self.assertEqual(bh.storage.port, 1337)
43 | self.assertTrue(bh.storage.db is None)
44 |
45 | bh2 = BH('test2', host='localhost', port=31337, db=15)
46 | self.assertEqual(bh2.storage.host, 'localhost')
47 | self.assertEqual(bh2.storage.port, 31337)
48 | self.assertEqual(bh2.storage.db, 15)
49 |
--------------------------------------------------------------------------------
/huey/utils.py:
--------------------------------------------------------------------------------
1 | from collections import namedtuple
2 | import calendar
3 | import datetime
4 | import errno
5 | import os
6 | import sys
7 | import time
8 | import warnings
9 | try:
10 | import fcntl
11 | except ImportError:
12 | fcntl = None
13 |
14 | if sys.version_info < (3, 12):
15 | utcnow = datetime.datetime.utcnow
16 | else:
17 | def utcnow():
18 | return (datetime.datetime
19 | .now(datetime.timezone.utc)
20 | .replace(tzinfo=None))
21 |
22 |
23 | Error = namedtuple('Error', ('metadata',))
24 |
25 |
26 | class UTC(datetime.tzinfo):
27 | zero = datetime.timedelta(0)
28 |
29 | def __repr__(self):
30 | return ""
31 | def utcoffset(self, dt):
32 | return self.zero
33 | def tzname(self, dt):
34 | return "UTC"
35 | def dst(self, dt):
36 | return self.zero
37 | _UTC = UTC()
38 |
39 |
40 | def load_class(s):
41 | path, klass = s.rsplit('.', 1)
42 | __import__(path)
43 | mod = sys.modules[path]
44 | return getattr(mod, klass)
45 |
46 |
47 | def reraise_as(new_exc_class):
48 | exc_class, exc, tb = sys.exc_info()
49 | raise new_exc_class('%s: %s' % (exc_class.__name__, exc))
50 |
51 |
52 | def is_naive(dt):
53 | """
54 | Determines if a given datetime.datetime is naive.
55 | The concept is defined in Python's docs:
56 | http://docs.python.org/library/datetime.html#datetime.tzinfo
57 | Assuming value.tzinfo is either None or a proper datetime.tzinfo,
58 | value.utcoffset() implements the appropriate logic.
59 | """
60 | return dt.utcoffset() is None
61 |
62 |
63 | def make_naive(dt):
64 | """
65 | Makes an aware datetime.datetime naive in local time zone.
66 | """
67 | tt = dt.utctimetuple()
68 | ts = calendar.timegm(tt)
69 | local_tt = time.localtime(ts)
70 | return datetime.datetime(*local_tt[:6])
71 |
72 |
73 | def aware_to_utc(dt):
74 | """
75 | Converts an aware datetime.datetime in UTC time zone.
76 | """
77 | return dt.astimezone(_UTC).replace(tzinfo=None)
78 |
79 |
80 | def local_to_utc(dt):
81 | """
82 | Converts a naive local datetime.datetime in UTC time zone.
83 | """
84 | return datetime.datetime(*time.gmtime(time.mktime(dt.timetuple()))[:6])
85 |
86 |
87 | def normalize_expire_time(expires, utc=True):
88 | if isinstance(expires, datetime.datetime):
89 | return normalize_time(eta=expires, utc=utc)
90 | return normalize_time(delay=expires, utc=utc)
91 |
92 |
93 | def normalize_time(eta=None, delay=None, utc=True):
94 | if not ((delay is None) ^ (eta is None)):
95 | raise ValueError('Specify either an eta (datetime) or delay (seconds)')
96 | elif delay:
97 | method = (utc and utcnow or
98 | datetime.datetime.now)
99 | if not isinstance(delay, datetime.timedelta):
100 | delay = datetime.timedelta(seconds=delay)
101 | return method() + delay
102 | elif eta:
103 | has_tz = not is_naive(eta)
104 | if utc:
105 | if not has_tz:
106 | eta = local_to_utc(eta)
107 | else:
108 | eta = aware_to_utc(eta)
109 | elif has_tz:
110 | # Convert TZ-aware into naive localtime.
111 | eta = make_naive(eta)
112 | return eta
113 |
114 |
115 | if sys.version_info[0] == 2:
116 | string_type = basestring
117 | text_type = unicode
118 | def to_timestamp(dt):
119 | return time.mktime(dt.timetuple())
120 | else:
121 | string_type = (bytes, str)
122 | text_type = str
123 | def to_timestamp(dt):
124 | return dt.timestamp()
125 |
126 |
127 | def encode(s):
128 | if isinstance(s, bytes):
129 | return s
130 | elif isinstance(s, text_type):
131 | return s.encode('utf8')
132 | elif s is not None:
133 | return text_type(s).encode('utf8')
134 |
135 |
136 | def decode(s):
137 | if isinstance(s, text_type):
138 | return s
139 | elif isinstance(s, bytes):
140 | return s.decode('utf8')
141 | elif s is not None:
142 | return text_type(s)
143 |
144 |
145 | class FileLock(object):
146 | def __init__(self, filename):
147 | if fcntl is None:
148 | warnings.warn('FileLock not supported on this platform. Please '
149 | 'use a different storage implementation.')
150 | self.filename = filename
151 | self.fd = None
152 |
153 | dirname = os.path.dirname(filename)
154 | if not os.path.exists(dirname):
155 | os.makedirs(dirname)
156 | elif os.path.exists(self.filename):
157 | os.unlink(self.filename)
158 |
159 | def acquire(self):
160 | flags = os.O_CREAT | os.O_TRUNC | os.O_RDWR
161 | self.fd = os.open(self.filename, flags)
162 | if fcntl is not None:
163 | fcntl.flock(self.fd, fcntl.LOCK_EX)
164 |
165 | def release(self):
166 | if self.fd is not None:
167 | fd, self.fd = self.fd, None
168 | if fcntl is not None:
169 | fcntl.flock(fd, fcntl.LOCK_UN)
170 | os.close(fd)
171 |
172 | def __enter__(self):
173 | self.acquire()
174 | return self
175 |
176 | def __exit__(self, exc_type, exc_val, exc_tb):
177 | self.release()
178 |
179 |
180 | if sys.version_info[0] < 3:
181 | time_clock = time.time
182 | else:
183 | time_clock = time.monotonic
184 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["setuptools", "wheel"]
3 |
--------------------------------------------------------------------------------
/runtests.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import glob
4 | import optparse
5 | import os
6 | import sys
7 | import unittest
8 |
9 | from huey import tests
10 |
11 |
12 | def collect_tests(args=None):
13 | suite = unittest.TestSuite()
14 |
15 | if not args:
16 | from huey import tests
17 | module_suite = unittest.TestLoader().loadTestsFromModule(tests)
18 | suite.addTest(module_suite)
19 | else:
20 | tmpl = 'huey.tests.test_%s'
21 | cleaned = [tmpl % arg if not arg.startswith('test') else arg
22 | for arg in args]
23 | user_suite = unittest.TestLoader().loadTestsFromNames(cleaned)
24 | suite.addTest(user_suite)
25 | return suite
26 |
27 |
28 | def runtests(suite, verbosity=1, failfast=False):
29 | runner = unittest.TextTestRunner(verbosity=verbosity, failfast=failfast)
30 | results = runner.run(suite)
31 | return results.failures, results.errors
32 |
33 |
34 | if __name__ == '__main__':
35 | parser = optparse.OptionParser()
36 | parser.add_option('-v', '--verbosity', dest='verbosity', default=1,
37 | type='int', help='Verbosity of output')
38 | parser.add_option('-f', '--failfast', action='store_true', default=False,
39 | help='Stop on first failure or error.')
40 |
41 | options, args = parser.parse_args()
42 | suite = collect_tests(args)
43 | failures, errors = runtests(suite, options.verbosity, options.failfast)
44 | for f in glob.glob('huey*.db*'):
45 | os.unlink(f)
46 |
47 | if errors or failures:
48 | sys.exit(1)
49 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | from setuptools import setup, find_packages
3 |
4 |
5 | with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as fh:
6 | readme = fh.read()
7 |
8 | extras_require = {
9 | 'backends': ['redis>=3.0.0'],
10 | 'redis': ['redis>=3.0.0'],
11 | }
12 |
13 | setup(
14 | name='huey',
15 | version=__import__('huey').__version__,
16 | description='huey, a little task queue',
17 | long_description=readme,
18 | author='Charles Leifer',
19 | author_email='coleifer@gmail.com',
20 | url='http://github.com/coleifer/huey/',
21 | packages=find_packages(),
22 | extras_require=extras_require,
23 | package_data={
24 | 'huey': [
25 | ],
26 | },
27 | classifiers=[
28 | 'Development Status :: 5 - Production/Stable',
29 | 'Intended Audience :: Developers',
30 | 'License :: OSI Approved :: MIT License',
31 | 'Operating System :: OS Independent',
32 | 'Programming Language :: Python',
33 | 'Programming Language :: Python :: 2.7',
34 | 'Programming Language :: Python :: 3.4',
35 | 'Programming Language :: Python :: 3.5',
36 | 'Programming Language :: Python :: 3.6',
37 | 'Programming Language :: Python :: 3.7',
38 | 'Programming Language :: Python :: 3.8',
39 | 'Programming Language :: Python :: 3.9',
40 | 'Programming Language :: Python :: 3.10',
41 | 'Programming Language :: Python :: 3.11',
42 | 'Programming Language :: Python :: 3.12',
43 | #'Programming Language :: Python :: 3.13',
44 | 'Topic :: Software Development :: Libraries :: Python Modules',
45 | ],
46 | test_suite='runtests.collect_tests',
47 | entry_points={
48 | 'console_scripts': [
49 | 'huey_consumer = huey.bin.huey_consumer:consumer_main'
50 | ]
51 | },
52 | scripts=['huey/bin/huey_consumer.py'],
53 | )
54 |
--------------------------------------------------------------------------------