├── .flake8 ├── .github └── workflows │ └── tests.yml ├── .gitignore ├── CHANGES ├── LICENSE ├── README.rst ├── dev-requirements.txt ├── manage.py ├── pgq ├── __init__.py ├── apps.py ├── commands.py ├── decorators.py ├── exceptions.py ├── migrations │ ├── 0001_initial.py │ └── __init__.py ├── models.py ├── py.typed └── queue.py ├── setup.cfg ├── setup.py ├── testproj ├── __init__.py ├── management │ ├── __init__.py │ └── commands │ │ ├── __init__.py │ │ └── test_worker.py ├── models.py ├── queue.py ├── requirements.txt ├── settings.py ├── test_decorators.py ├── test_queues.py ├── tests.py ├── urls.py └── wsgi.py └── tox.ini /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 80 3 | select = C,E,F,W,B,B950 4 | ignore = W292,W504,E501,W503,E303,W293,E122,E231,E271,E222,E261,E203,E704 5 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Tests 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | tests: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | fail-fast: false 10 | matrix: 11 | python-version: [3.6, 3.7, 3.8, 3.9] 12 | 13 | # Service containers to run with `container-job` 14 | services: 15 | # Label used to access the service container 16 | postgres: 17 | # Docker Hub image 18 | image: postgres 19 | # Provide the password for postgres 20 | env: 21 | POSTGRES_USER: pgq 22 | POSTGRES_PASSWORD: pgq 23 | POSTGRES_DB: pgq_testproj 24 | TZ: UTC 25 | PGTZ: UTC 26 | # Set health checks to wait until postgres has started 27 | options: >- 28 | --health-cmd pg_isready 29 | --health-interval 10s 30 | --health-timeout 5s 31 | --health-retries 5 32 | ports: 33 | # Maps tcp port 5432 on service container to the host 34 | - 5432:5432 35 | 36 | steps: 37 | - uses: actions/checkout@v2 38 | - name: Set up Python ${{ matrix.python-version }} 39 | uses: actions/setup-python@v2 40 | with: 41 | python-version: ${{ matrix.python-version }} 42 | - name: psycopg2 prerequisites 43 | run: sudo apt-get install libpq-dev 44 | - name: Install dependencies 45 | run: | 46 | python -m pip install --upgrade pip tox tox-factor 47 | - name: Run tox targets for ${{ matrix.python-version }} 48 | run: | 49 | PYVERSION=$(python -c "import sys; print(''.join([str(sys.version_info.major), str(sys.version_info.minor)]))") 50 | python -m tox -f "py${PYVERSION}" 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | dist/ 3 | *.egg-info/ 4 | *.swp 5 | .mypy_cache 6 | .tox 7 | -------------------------------------------------------------------------------- /CHANGES: -------------------------------------------------------------------------------- 1 | 0.8.2 (2024-02-01) 2 | ================== 3 | 4 | - Ensures run_once always has a job variable thanks @myers 5 | 6 | 7 | 0.8.1 (2021-07-19) 8 | ================== 9 | 10 | - Fixes worker crash on exception in task transaction on_commit thanks @gavinwahl 11 | 12 | 13 | 0.8.0 (2021-06-18) 14 | ================== 15 | 16 | - Django 3.2 compat thanks @danifus 17 | 18 | 19 | 0.6.1 (2020-04-30) 20 | ================== 21 | 22 | - Abstract job model 23 | - bulk_enqueue thanks @danifus 24 | - mypy fixes 25 | 26 | 0.5.2 (2020-02-28) 27 | ================== 28 | 29 | - new migrations and tables. 30 | 31 | 0.5.1 (2020-02-28) 32 | ================== 33 | 34 | - rename to pgq to avoid clashing namespace. 35 | 36 | 0.5.0 (2020-02-28) 37 | ================== 38 | 39 | - fork and rename to django-pg-queue 40 | - queues separated by queue name 41 | - fixes bug where a queue attempts to work on a job that isn't its own. 42 | - black formatting; mypy types; code cleanup 43 | 44 | 45 | 0.4.3 (2019-06-12) 46 | ================== 47 | 48 | - Fix failed task processing to allow progress on other tasks when there is a 49 | task consistently failing. 50 | 51 | 52 | 0.4.2 (2019-05-17) 53 | ================== 54 | 55 | - Add a postgres application_name to the worker. 56 | 57 | 58 | 0.4.1 (2019-05-16) 59 | ================== 60 | 61 | - Handle exceptions raised outside of job execution 62 | - Don't allow listening without a notify channel 63 | 64 | 65 | 0.4.0 (2019-04-19) 66 | ================== 67 | 68 | - Add dpq_scheduler, a replacement for celery beat. 69 | - Allow priorities to be negative. 70 | 71 | 72 | 0.3.0 (2019-03-19) 73 | ================== 74 | 75 | - Implement warm shutdown on TERM and INT signals. 76 | - Rename management module to dpq.commands. 77 | 78 | 79 | 0.2.1 (2017-09-11) 80 | ================== 81 | 82 | - Fix packaging to include migrations. 83 | 84 | 85 | 0.2.0 (2017-09-11) 86 | ================== 87 | 88 | - Don't force subclassing to create a queue, you can just instantiate one. 89 | 90 | 91 | 0.1.0 (2017-09-10) 92 | ================== 93 | 94 | - First release 95 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2017 Gavin Wahl 2 | Copyright 2020 SweetProcess 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | django-pg-queue 2 | ===================== 3 | 4 | django-pg-queue is a task queue system for Django backed by postgres. 5 | 6 | It was forked from the wonderful and simpler django-postgres-queue (https://github.com/gavinwahl/django-postgres-queue/) 7 | Written by Gavin Wahl. 8 | 9 | 10 | Why postgres? 11 | ------------- 12 | 13 | I thought you were never supposed to use an RDBMS as a queue? Well, postgres 14 | has some features that make it not as bad as you might think, it has some 15 | compelling advantages. 16 | 17 | - Transactional behavior and reliability. 18 | 19 | Adding tasks is atomic with respect to other database work. There is no need 20 | to use ``transaction.on_commit`` hooks and there is no risk of a transaction 21 | being committed but the tasks it queued being lost. 22 | 23 | Processing tasks is atomic with respect to other database work. Database work 24 | done by a task will either be committed, or the task will not be marked as 25 | processed, no exceptions. If the task only does database work, you achieve 26 | true exactly-once message processing. 27 | 28 | - Operational simplicity 29 | 30 | By reusing the durable, transactional storage that we're already using 31 | anyway, there's no need to configure, monitor, and backup another stateful 32 | service. For small teams and light workloads, this is the right trade-off. 33 | 34 | - Easy introspection 35 | 36 | Since tasks are stored in a database table, it's easy to query and monitor 37 | the state of the queue. 38 | 39 | - Safety 40 | 41 | By using postgres transactions, there is no possibility of jobs being left in 42 | a locked or ambiguous state if a worker dies. Tasks immediately become 43 | available for another worker to pick up. You can even ``kill -9`` a worker 44 | and be sure your database and queue will be left in a consistent state. 45 | 46 | - Priority queues 47 | 48 | Since ordering is specified explicitly when selecting the next task to work 49 | on, it's easy to ensure high-priority tasks are processed first. 50 | 51 | 52 | - Queues 53 | 54 | Simply implemented by allowing filtering by a queue name in the query. 55 | 56 | 57 | 58 | Disadvantages 59 | ------------- 60 | 61 | - Lower throughput than a dedicated queue server. 62 | - Harder to scale a relational database than a dedicated queue server. 63 | - Thundering herd. Postgres will notify all workers who LISTEN for the same name. 64 | - With at-least-once delivery, a postgres transaction has to be held open for 65 | the duration of the task. For long running tasks, this can cause table bloat 66 | and performance problems. 67 | - When a task crashes or raises an exception under at-least-once delivery, it 68 | immediately becomes eligible to be retried. If you want to implement a retry 69 | delay, you must catch exceptions and requeue the task with a delay. If your 70 | task crashes without throwing an exception (eg SIGKILL), you could end up in 71 | an endless retry loop that prevents other tasks from being processed. 72 | 73 | 74 | How it works 75 | ------------ 76 | 77 | django-pg-queue is able to claim, process, and remove a task in a single (simplified) 78 | query. 79 | 80 | .. code:: sql 81 | 82 | DELETE FROM pgq_job 83 | WHERE id = ( 84 | SELECT id 85 | FROM pgq_job 86 | WHERE execute_at <= now() 87 | ORDER BY priority DESC, created_at 88 | FOR UPDATE SKIP LOCKED 89 | LIMIT 1 90 | ) 91 | RETURNING *; 92 | 93 | As soon as this query runs, the task is unable to be claimed by other workers. 94 | When the transaction commits, the task will be deleted. If the transaction 95 | rolls back or the worker crashes, the task will immediately become available 96 | for another worker. 97 | 98 | To achieve at-least-once delivery, we begin a transaction, process the task, 99 | then commit the transaction. For at-most-once, we claim the task and 100 | immediately commit the transaction, then process the task. For tasks that don't 101 | have any external effects and only do database work, the at-least-once behavior 102 | is actually exactly-once (because both the claiming of the job and the database 103 | work will commit or rollback together). 104 | 105 | 106 | Comparison to Celery 107 | -------------------- 108 | 109 | django-pg-queue fills the same role as Celery. You must use postgres as the backend 110 | and the library is small enough that you can read and understand all the code. 111 | 112 | 113 | A note on the use of ``AtLeastOnceQueue`` and Django's ``transaction.on_commit()`` 114 | ---------------------------------------------------------------------------------- 115 | 116 | A failure in an ``on_commit()`` callback will not cause that job to be retried 117 | when using an ``AtLeastOnceQueue`` (usually a job in an ``AtLeastOnceQueue`` 118 | queue will remain in the queue if the job fails). This is because 119 | ``on_commit()`` callbacks are executed after the transaction has been committed 120 | and, for django-pg-queue, the job is removed from the queue when the transaction 121 | commits. 122 | 123 | If you require more certainty that the code in an ``on_commit()`` callback is 124 | executed successfully, you may need to ensure it is idempotent and call it from 125 | within the job rather than using ``on_commit()``. 126 | 127 | Usage 128 | ===== 129 | 130 | Requirements 131 | ------------ 132 | 133 | django-pg-queue requires Python 3, at least postgres 9.5 and at least 134 | Django 2.1. 135 | 136 | 137 | Installation 138 | ------------ 139 | 140 | Install with pip:: 141 | 142 | pip install django-pg-queue 143 | 144 | Then add ``'pgq'`` to your ``INSTALLED_APPS``. Run ``manage.py migrate`` to 145 | create the jobs table. 146 | 147 | Instantiate a queue object. This can go wherever you like and be named whatever 148 | you like. For example, ``someapp/queue.py``: 149 | 150 | .. code:: python 151 | 152 | from pgq.queue import AtLeastOnceQueue 153 | 154 | queue = AtLeastOnceQueue( 155 | tasks={ 156 | # ... 157 | }, 158 | queue='my-queue', 159 | notify_channel='my-queue', 160 | ) 161 | 162 | 163 | You will need to import this queue instance to queue or process tasks. Use 164 | ``AtLeastOnceQueue`` for at-least-once delivery, or ``AtMostOnceQueue`` for 165 | at-most-once delivery. 166 | 167 | django-pg-queue comes with a management command base class that you can 168 | use to consume your tasks. It can be called whatever you like, for example in a 169 | ``someapp/managment/commands/worker.py``: 170 | 171 | .. code:: python 172 | 173 | from pgq.commands import Worker 174 | 175 | from someapp.queue import queue 176 | 177 | class Command(Worker): 178 | queue = queue 179 | 180 | Then you can run ``manage.py worker`` to start your worker. 181 | 182 | A task function takes two arguments -- the queue instance in use, and the Job 183 | instance for this task. The function can be defined anywhere and called 184 | whatever you like. Here's an example: 185 | 186 | .. code:: python 187 | 188 | from pgq.decorators import task 189 | 190 | from .queues import queue 191 | 192 | @task(queue) 193 | def debug_task(queue, job): 194 | print(job.args) 195 | 196 | Instead of using the task decorator, you can manually register it as a task. 197 | Add it to your queue instance when it is being created: 198 | 199 | .. code:: python 200 | 201 | queue = AtLeastOnceQueue(tasks={ 202 | 'debug_task': debug_task, 203 | }, queue='my-queue') 204 | 205 | The key is the task name, used to queue the task. It doesn't have to match the 206 | function name. 207 | 208 | To queue the task, if you used the task decorator you may: 209 | 210 | .. code:: python 211 | 212 | debug_task.enqueue({'some_args': 0}) 213 | 214 | 215 | To manually queue the task, use the ``enqueue`` method on your queue instance: 216 | 217 | .. code:: python 218 | 219 | queue.enqueue('debug_task', {'some_args': 0}) 220 | 221 | Assuming you have a worker running for this queue, the task will be run 222 | immediately. The second argument must be a single json-serializeable value and 223 | will be available to the task as ``job.args``. 224 | 225 | Tasks registered using the ``@task`` decorator will only be available on the 226 | queue if the file in which the task is defined has been imported. If your 227 | worker doesn't import the file containing the ``@task`` decorators somewhere, 228 | the tasks will not be available for dispatch. Importing files in the 229 | ``apps.py`` ``AppConfig.ready()`` method will ensure that the tasks are always 230 | available on the queue without having to import them in your worker just for 231 | the import side effects. 232 | 233 | .. code:: python 234 | 235 | # Contents of someapp/apps.py 236 | from django.apps import AppConfig 237 | 238 | class SomeAppAppConfig(AppConfig): 239 | def ready(self): 240 | # Tasks registered with @task are defined in this import 241 | import someapp.tasks 242 | 243 | Multiple Queues 244 | --------------- 245 | 246 | You may run multiple queues and workers may each listen to a queue. You can have multiple workers 247 | listening to the same queue too. A queue is implemented as a CharField in the database. 248 | The queue would simply filter for jobs matching its queue name. 249 | 250 | Bulk Enqueue 251 | ------------ 252 | 253 | Many jobs can be efficiently created using ``bulk_enqueue()`` which accepts one 254 | task name for all the jobs being created and a list of dictionaries containing 255 | ``args`` for the task to execute with and, optionally, ``priority`` and 256 | ``execute_at`` for that particular job. 257 | 258 | .. code:: python 259 | 260 | queue.bulk_enqueue( 261 | 'debug_task', 262 | [ 263 | {'args': {'some_args': 0}}, 264 | { 265 | 'args': {'some_args': 10} 266 | 'priority': 10, 267 | 'execute_at': timezone.now() + timedelta(days=1), 268 | }, 269 | ] 270 | ) 271 | 272 | 273 | Monitoring 274 | ---------- 275 | 276 | Tasks are just database rows stored in the ``pgq_job`` table, so you can 277 | monitor the system with SQL. 278 | 279 | To get a count of current tasks: 280 | 281 | .. code:: sql 282 | 283 | SELECT queue, count(*) FROM pgq_job WHERE execute_at <= now() GROUP BY queue 284 | 285 | 286 | This will include both tasks ready to process and tasks currently being 287 | processed. To see tasks currently being processed, we need visibility into 288 | postgres row locks. This can be provided by the `pgrowlocks extension 289 | `_. Once 290 | installed, this query will count currently-running tasks: 291 | 292 | .. code:: sql 293 | 294 | SELECT queue, count(*) 295 | FROM pgrowlocks('pgq_job') 296 | WHERE 'For Update' = ANY(modes) 297 | GROUP BY queue; 298 | 299 | You could join the results of ``pgrowlocks`` with ``pgq_job`` to get the full 300 | list of tasks in progress if you want. 301 | 302 | Logging 303 | ------- 304 | 305 | django-pg-queue logs through Python's logging framework, so can be 306 | configured with the ``LOGGING`` dict in your Django settings. It will not log 307 | anything under the default config, so be sure to configure some form of 308 | logging. Everything is logged under the ``pgq`` namespace. Here is an example 309 | configuration that will log INFO level messages to stdout: 310 | 311 | .. code:: python 312 | 313 | LOGGING = { 314 | 'version': 1, 315 | 'root': { 316 | 'level': 'DEBUG', 317 | 'handlers': ['console'], 318 | }, 319 | 'formatters': { 320 | 'verbose': { 321 | 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s', 322 | }, 323 | }, 324 | 'handlers': { 325 | 'console': { 326 | 'level': 'INFO', 327 | 'class': 'logging.StreamHandler', 328 | 'formatter': 'verbose', 329 | }, 330 | }, 331 | 'loggers': { 332 | 'pgq': { 333 | 'handlers': ['console'], 334 | 'level': 'INFO', 335 | 'propagate': False, 336 | }, 337 | } 338 | } 339 | 340 | It would also be sensible to log WARNING and higher messages to something like 341 | Sentry: 342 | 343 | .. code:: python 344 | 345 | LOGGING = { 346 | 'version': 1, 347 | 'root': { 348 | 'level': 'INFO', 349 | 'handlers': ['sentry', 'console'], 350 | }, 351 | 'formatters': { 352 | 'verbose': { 353 | 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s', 354 | }, 355 | }, 356 | 'handlers': { 357 | 'console': { 358 | 'level': 'INFO', 359 | 'class': 'logging.StreamHandler', 360 | 'formatter': 'verbose', 361 | }, 362 | 'sentry': { 363 | 'level': 'WARNING', 364 | 'class': 'raven.contrib.django.handlers.SentryHandler', 365 | }, 366 | }, 367 | 'loggers': { 368 | 'pgq': { 369 | 'level': 'INFO', 370 | 'handlers': ['console', 'sentry'], 371 | 'propagate': False, 372 | }, 373 | }, 374 | } 375 | 376 | You could also log to a file by using the built-in ``logging.FileHandler``. 377 | 378 | Useful Recipes 379 | ============== 380 | These recipes aren't officially supported features of `django-pg-queue`. We provide them so that you can mimick some of the common features in other task queues. 381 | 382 | `CELERY_ALWAYS_EAGER` 383 | --------------------- 384 | Celery uses the `CELERY_ALWAYS_EAGER` setting to run a task immediately, without queueing it for a worker. It could be used during tests, and while debugging in a development environment with any workers turned off. 385 | 386 | .. code:: python 387 | 388 | class EagerAtLeastOnceQueue(AtLeastOnceQueue): 389 | def enqueue(self, *args, **kwargs): 390 | job = super().enqueue(*args, **kwargs) 391 | if settings.QUEUE_ALWAYS_EAGER: 392 | self.run_job(job) 393 | return job 394 | -------------------------------------------------------------------------------- /dev-requirements.txt: -------------------------------------------------------------------------------- 1 | Django==3.0.3 2 | mypy==0.770 3 | mypy-extensions==0.4.3 4 | django-stubs==1.5.0 5 | psycopg2-binary==2.8.4 6 | typing-extensions==3.7.4.1 7 | -------------------------------------------------------------------------------- /manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import os 3 | import sys 4 | 5 | if __name__ == "__main__": 6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproj.settings") 7 | try: 8 | from django.core.management import execute_from_command_line 9 | except ImportError: 10 | # The above import may fail for some other reason. Ensure that the 11 | # issue is really that Django is missing to avoid masking other 12 | # exceptions on Python 2. 13 | try: 14 | import django 15 | except ImportError: 16 | raise ImportError( 17 | "Couldn't import Django. Are you sure it's installed and " 18 | "available on your PYTHONPATH environment variable? Did you " 19 | "forget to activate a virtual environment?" 20 | ) 21 | raise 22 | execute_from_command_line(sys.argv) 23 | -------------------------------------------------------------------------------- /pgq/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SweetProcess/django-pg-queue/b4b174ac5e8b516d30fe1afc0d07d3a0431a9626/pgq/__init__.py -------------------------------------------------------------------------------- /pgq/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | 4 | class PgqConfig(AppConfig): 5 | name = "pgq" 6 | -------------------------------------------------------------------------------- /pgq/commands.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import signal 3 | import time 4 | from typing import Any, Optional, Set 5 | import os 6 | 7 | from django.core.management.base import BaseCommand 8 | from django.db import connection 9 | 10 | from .exceptions import PgqException, PgqNoDefinedQueue 11 | from .queue import Queue 12 | 13 | 14 | class Worker(BaseCommand): 15 | # The queue to process. Subclass and set this. 16 | queue: Optional[Queue] = None 17 | logger = logging.getLogger(__name__) 18 | 19 | def add_arguments(self, parser: Any) -> None: 20 | parser.add_argument( 21 | "--delay", 22 | type=float, 23 | default=1, 24 | help="The number of seconds to wait to check for new tasks.", 25 | ) 26 | parser.add_argument( 27 | "--listen", 28 | action="store_true", 29 | help="Use LISTEN/NOTIFY to wait for events.", 30 | ) 31 | 32 | def handle_shutdown(self, sig: Any, frame: Any) -> None: 33 | if self._in_task: 34 | self.logger.info("Waiting for active tasks to finish...") 35 | self._shutdown = True 36 | else: 37 | raise InterruptedError 38 | 39 | def run_available_tasks(self) -> None: 40 | """ 41 | Runs tasks continuously until there are no more available. 42 | """ 43 | # Prevents tasks that failed from blocking others. 44 | failed_tasks: Set[int] = set() 45 | 46 | if self.queue is None: 47 | raise PgqNoDefinedQueue 48 | 49 | while True: 50 | self._in_task = True 51 | try: 52 | job = self.queue.run_once(exclude_ids=failed_tasks) 53 | if job is None: 54 | # No more jobs 55 | return 56 | except PgqException as e: 57 | if e.job is not None: 58 | # Make sure we do at least one more iteration of the loop 59 | # with the failed task excluded. 60 | failed_job = e.job 61 | self.logger.exception( 62 | "Error in %r: %r.", 63 | failed_job, 64 | e, 65 | extra={"data": {"job": failed_job.to_json()}}, 66 | ) 67 | failed_tasks.add(failed_job.id) 68 | else: 69 | raise 70 | self._in_task = False 71 | if self._shutdown: 72 | raise InterruptedError 73 | 74 | def handle(self, **options: Any) -> None: # type: ignore 75 | self._shutdown = False 76 | self._in_task = False 77 | 78 | self.delay: int = options["delay"] 79 | self.listen: bool = options["listen"] 80 | 81 | if self.queue is None: 82 | raise PgqNoDefinedQueue 83 | 84 | with connection.cursor() as cursor: 85 | cursor.execute("SET application_name TO %s", ["pgq#{}".format(os.getpid())]) 86 | 87 | if self.listen: 88 | self.queue.listen() 89 | try: 90 | # Handle the signals for warm shutdown. 91 | signal.signal(signal.SIGINT, self.handle_shutdown) 92 | signal.signal(signal.SIGTERM, self.handle_shutdown) 93 | 94 | while True: 95 | self.run_available_tasks() 96 | self.wait() 97 | except InterruptedError: 98 | # got shutdown signal 99 | pass 100 | 101 | def wait(self) -> int: 102 | if self.listen and self.queue is not None: 103 | count = len(self.queue.wait(self.delay)) 104 | self.logger.debug("Woke up with %s NOTIFYs.", count) 105 | return count 106 | else: 107 | time.sleep(self.delay) 108 | return 1 109 | -------------------------------------------------------------------------------- /pgq/decorators.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import datetime 3 | from dataclasses import dataclass 4 | import functools 5 | import logging 6 | import random 7 | from typing import Any, Callable, Dict, Optional, Type, TYPE_CHECKING 8 | 9 | from django.db import transaction 10 | 11 | if TYPE_CHECKING: 12 | from .queue import Queue 13 | from .models import BaseJob 14 | 15 | DelayFnType = Callable[[int], datetime.timedelta] 16 | TaskFnType = Callable[[Queue, BaseJob], Any] 17 | else: 18 | Queue = None 19 | BaseJob = None 20 | DelayFnType = None 21 | TaskFnType = None 22 | 23 | 24 | def repeat(delay: datetime.timedelta) -> Callable[..., Any]: 25 | """ 26 | Endlessly repeats a task, every `delay` (a timedelta). 27 | 28 | Under at-least-once delivery, the tasks can not overlap. The next scheduled 29 | task only becomes visible once the previous one commits. 30 | 31 | @repeat(datetime.timedelta(minutes=5) 32 | def task(queue, job): 33 | pass 34 | 35 | This will run `task` every 5 minutes. It's up to you to kick off the first 36 | task, though. 37 | """ 38 | 39 | def decorator(fn: TaskFnType) -> TaskFnType: 40 | def inner(queue: Queue, job: BaseJob) -> Any: 41 | queue.enqueue( 42 | job.task, 43 | job.args, 44 | execute_at=job.execute_at + delay, 45 | priority=job.priority, 46 | ) 47 | return fn(queue, job) 48 | 49 | return inner 50 | 51 | return decorator 52 | 53 | 54 | def exponential_with_jitter(offset: int = 6) -> DelayFnType: 55 | def delayfn(retries: int) -> datetime.timedelta: 56 | jitter = random.randrange(-15, 15) 57 | return datetime.timedelta(seconds=2 ** (retries + offset) + jitter) 58 | 59 | return delayfn 60 | 61 | 62 | def retry( 63 | max_retries: int = 0, 64 | delay_offset_seconds: int = 5, 65 | delayfn: Optional[DelayFnType] = None, 66 | Exc: Type[Exception] = Exception, 67 | on_failure: Optional[ 68 | Callable[[Queue, BaseJob, Any, "JobMeta", Exception], Any] 69 | ] = None, 70 | on_success: Optional[Callable[[BaseJob, Any], Any]] = None, 71 | JobMetaType: Optional[Type["JobMeta"]] = None, 72 | ): 73 | if delayfn is None: 74 | delayfn = exponential_with_jitter(delay_offset_seconds) 75 | if JobMetaType is None: 76 | JobMetaType = JobMeta 77 | 78 | def decorator(fn): 79 | logger = logging.getLogger(__name__) 80 | 81 | @functools.wraps(fn) 82 | def inner(queue, job): 83 | original_job_id = job.args["meta"].setdefault("job_id", job.id) 84 | 85 | try: 86 | args = copy.deepcopy(job.args) 87 | with transaction.atomic(): 88 | result = fn( 89 | queue, job, args["func_args"], JobMetaType(**args["meta"]) 90 | ) 91 | except Exc as e: 92 | retries = job.args["meta"].get("retries", 0) 93 | if retries < max_retries: 94 | job.args["meta"].update( 95 | {"retries": retries + 1, "job_id": original_job_id} 96 | ) 97 | delay = delayfn(retries) 98 | job.execute_at += delay 99 | job.id = None 100 | job.save(force_insert=True) 101 | logger.warning( 102 | "Task %r failed: %s. Retrying in %s.", 103 | job, 104 | e, 105 | delay, 106 | exc_info=True, 107 | ) 108 | else: 109 | if on_failure: 110 | args = copy.deepcopy(job.args) 111 | return on_failure( 112 | queue, 113 | job, 114 | args["func_args"], 115 | JobMetaType(**args["meta"]), 116 | error=e, 117 | ) 118 | logger.exception( 119 | "Task %r exceeded its retry limit: %s.", job, e, exc_info=True 120 | ) 121 | else: 122 | if on_success is not None: 123 | on_success(job, result) 124 | return result 125 | 126 | return inner 127 | 128 | return decorator 129 | 130 | 131 | class AsyncTask: 132 | """ 133 | A useful standin for celery async tasks. 134 | 135 | Represents an async task, can be used like so: 136 | 137 | @task 138 | def increment_followers(...): ... 139 | 140 | increment_followers.enqueue(...) 141 | """ 142 | 143 | def __init__(self, queue: Queue, name: str): 144 | self.queue = queue 145 | self.name = name 146 | 147 | def enqueue( 148 | self, args: Dict[str, Any], meta: Optional[Dict[str, Any]] = None 149 | ) -> BaseJob: 150 | wrapped_args = {"func_args": args, "meta": meta if meta is not None else {}} 151 | return self.queue.enqueue(self.name, wrapped_args) 152 | 153 | def __str__(self) -> str: 154 | return f"AsyncTask({self.queue.notify_channel}, {self.name})" 155 | 156 | 157 | def task( 158 | queue: Queue, 159 | max_retries: int = 0, 160 | delay_offset_seconds: int = 5, 161 | on_failure: Optional[Callable[..., Any]] = None, 162 | JobMetaType: Optional[Type["JobMeta"]] = None, 163 | ) -> Callable[..., Any]: 164 | """ 165 | Decorator to register the task to the queue. 166 | 167 | @task(queuename, max_retries=5) 168 | 169 | delay_offset_seconds: 170 | 5th retry will take half hour at 5; delay (seconds) = 2 ** (retry + offset) 171 | """ 172 | if JobMetaType is None: 173 | JobMetaType = JobMeta 174 | 175 | def register(fn: Callable[..., Any]) -> AsyncTask: 176 | name = fn.__name__ 177 | assert name not in queue.tasks 178 | queue.tasks[name] = retry( 179 | max_retries=max_retries, 180 | delay_offset_seconds=delay_offset_seconds, 181 | on_failure=on_failure, 182 | JobMetaType=JobMetaType, 183 | )(fn) 184 | return AsyncTask(queue, name) 185 | 186 | return register 187 | 188 | 189 | @dataclass 190 | class JobMeta: 191 | job_id: int 192 | retries: int = 0 193 | -------------------------------------------------------------------------------- /pgq/exceptions.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, TYPE_CHECKING 2 | 3 | if TYPE_CHECKING: 4 | from .models import BaseJob 5 | else: 6 | BaseJob = None 7 | 8 | 9 | class PgqException(Exception): 10 | """Base exception for pgq""" 11 | 12 | job: Optional[BaseJob] = None 13 | 14 | def __init__(self, job: Optional[BaseJob] = None): 15 | self.job = job 16 | 17 | 18 | class PgqIncorrectQueue(PgqException): 19 | """Job placed on incorrect queue.""" 20 | 21 | 22 | class PgqNoDefinedQueue(PgqException): 23 | """There is no queue to work.""" 24 | -------------------------------------------------------------------------------- /pgq/migrations/0001_initial.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.0.3 on 2020-02-28 06:27 2 | 3 | import django.contrib.postgres.fields.jsonb 4 | import django.contrib.postgres.functions 5 | from django.db import migrations, models 6 | 7 | 8 | try: 9 | from django.db.models import JSONField 10 | except ImportError: 11 | from django.contrib.postgres.fields import JSONField # type: ignore[misc] 12 | 13 | 14 | class Migration(migrations.Migration): 15 | 16 | initial = True 17 | 18 | dependencies = [] # type: ignore 19 | 20 | operations = [ 21 | migrations.CreateModel( 22 | name="Job", 23 | fields=[ 24 | ("id", models.BigAutoField(primary_key=True, serialize=False)), 25 | ( 26 | "created_at", 27 | models.DateTimeField( 28 | default=django.contrib.postgres.functions.TransactionNow 29 | ), 30 | ), 31 | ( 32 | "execute_at", 33 | models.DateTimeField( 34 | default=django.contrib.postgres.functions.TransactionNow 35 | ), 36 | ), 37 | ( 38 | "priority", 39 | models.IntegerField( 40 | default=0, 41 | help_text="Jobs with higher priority will be processed first.", 42 | ), 43 | ), 44 | ("task", models.CharField(max_length=255)), 45 | ("args", JSONField()), 46 | ( 47 | "queue", 48 | models.CharField( 49 | default="default", 50 | help_text="Use a unique name to represent each queue.", 51 | max_length=32, 52 | ), 53 | ), 54 | ], 55 | ), 56 | migrations.AddIndex( 57 | model_name="job", 58 | index=models.Index( 59 | fields=["-priority", "created_at"], name="pgq_job_priorit_947f45_idx" 60 | ), 61 | ), 62 | migrations.AddIndex( 63 | model_name="job", 64 | index=models.Index(fields=["queue"], name="pgq_job_queue_99c86e_idx"), 65 | ), 66 | ] 67 | -------------------------------------------------------------------------------- /pgq/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SweetProcess/django-pg-queue/b4b174ac5e8b516d30fe1afc0d07d3a0431a9626/pgq/migrations/__init__.py -------------------------------------------------------------------------------- /pgq/models.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Iterable, Optional, Sequence, Type, TypeVar 2 | 3 | from django.db import models 4 | from django.db import connection 5 | from django.contrib.postgres.functions import TransactionNow 6 | 7 | try: 8 | from django.db.models import JSONField 9 | except ImportError: 10 | from django.contrib.postgres.fields import JSONField # type: ignore[misc] 11 | 12 | DEFAULT_QUEUE_NAME = "default" 13 | 14 | 15 | _Self = TypeVar("_Self", bound="BaseJob") 16 | 17 | 18 | class BaseJob(models.Model): 19 | id = models.BigAutoField(primary_key=True) 20 | created_at = models.DateTimeField(default=TransactionNow) 21 | execute_at = models.DateTimeField(default=TransactionNow) 22 | priority = models.IntegerField( 23 | default=0, help_text="Jobs with higher priority will be processed first." 24 | ) 25 | task = models.CharField(max_length=255) 26 | args = JSONField() 27 | queue = models.CharField( 28 | max_length=32, 29 | default=DEFAULT_QUEUE_NAME, 30 | help_text="Use a unique name to represent each queue.", 31 | ) 32 | 33 | class Meta: 34 | abstract = True 35 | indexes = [ 36 | models.Index(fields=["-priority", "created_at"]), 37 | models.Index(fields=["queue"]), 38 | ] 39 | 40 | def __str__(self) -> str: 41 | return "%s: %s" % (self.id, self.task) 42 | 43 | @classmethod 44 | def dequeue( 45 | cls: Type[_Self], 46 | exclude_ids: Optional[Iterable[int]] = None, 47 | tasks: Optional[Sequence[str]] = None, 48 | queue: str = DEFAULT_QUEUE_NAME, 49 | ) -> Optional[_Self]: 50 | """ 51 | Claims the first available task and returns it. If there are no 52 | tasks available, returns None. 53 | 54 | exclude_ids: Iterable[int] - excludes jobs with these ids 55 | tasks: Optional[Sequence[str]] - filters by jobs with these tasks. 56 | 57 | For at-most-once delivery, commit the transaction before 58 | processing the task. For at-least-once delivery, dequeue and 59 | finish processing the task in the same transaction. 60 | 61 | To put a job back in the queue, you can just call 62 | .save(force_insert=True) on the returned object. 63 | """ 64 | 65 | WHERE = "WHERE execute_at <= now() AND NOT id = ANY(%s) AND queue = %s" 66 | args = [[] if exclude_ids is None else list(exclude_ids), queue] 67 | if tasks is not None: 68 | WHERE += " AND TASK = ANY(%s)" 69 | args.append(tasks) 70 | 71 | jobs = list( 72 | cls.objects.raw( 73 | """ 74 | DELETE FROM {db_table} 75 | WHERE id = ( 76 | SELECT id 77 | FROM {db_table} 78 | {WHERE} 79 | ORDER BY priority DESC, created_at 80 | FOR UPDATE SKIP LOCKED 81 | LIMIT 1 82 | ) 83 | RETURNING *; 84 | """.format( 85 | db_table=connection.ops.quote_name(cls._meta.db_table), WHERE=WHERE 86 | ), 87 | args, 88 | ) 89 | ) 90 | assert len(jobs) <= 1 91 | if jobs: 92 | return jobs[0] 93 | else: 94 | return None 95 | 96 | def to_json(self) -> Dict[str, Any]: 97 | return { 98 | "id": self.id, 99 | "created_at": self.created_at, 100 | "execute_at": self.execute_at, 101 | "priority": self.priority, 102 | "queue": self.queue, 103 | "task": self.task, 104 | "args": self.args, 105 | } 106 | 107 | 108 | class Job(BaseJob): 109 | """pgq builtin Job model""" 110 | -------------------------------------------------------------------------------- /pgq/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SweetProcess/django-pg-queue/b4b174ac5e8b516d30fe1afc0d07d3a0431a9626/pgq/py.typed -------------------------------------------------------------------------------- /pgq/queue.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import contextlib 3 | import datetime 4 | import logging 5 | import select 6 | import time 7 | from typing import ( 8 | Any, 9 | Callable, 10 | Dict, 11 | Iterable, 12 | Generic, 13 | List, 14 | Optional, 15 | Sequence, 16 | Tuple, 17 | Type, 18 | TypeVar, 19 | TYPE_CHECKING, 20 | ) 21 | 22 | from django.db import connection, transaction 23 | 24 | from .exceptions import PgqException 25 | from .models import BaseJob, Job, DEFAULT_QUEUE_NAME 26 | 27 | 28 | _Job = TypeVar("_Job", bound=BaseJob) 29 | # mypy doesn't support binding to BaseQueue[_Job] and may never do so... 30 | if TYPE_CHECKING: 31 | _Self = TypeVar("_Self", bound="BaseQueue"[Any]) 32 | else: 33 | _Self = None 34 | 35 | 36 | @contextlib.contextmanager 37 | def maybe_atomic(is_atomic: bool): 38 | if is_atomic: 39 | with transaction.atomic(): 40 | yield 41 | else: 42 | yield 43 | 44 | 45 | class BaseQueue(Generic[_Job], metaclass=abc.ABCMeta): 46 | job_model: Type[_Job] 47 | logger = logging.getLogger(__name__) 48 | 49 | def __init__( 50 | self: _Self, 51 | tasks: Dict[str, Callable[[_Self, _Job], Any]], 52 | notify_channel: Optional[str] = None, 53 | queue: str = DEFAULT_QUEUE_NAME, 54 | ) -> None: 55 | self.tasks = tasks 56 | self.notify_channel = notify_channel 57 | self.queue = queue 58 | 59 | @abc.abstractmethod 60 | def run_once( 61 | self, exclude_ids: Optional[Iterable[int]] = None 62 | ) -> Optional[Tuple[_Job, Any]]: 63 | """Get a job from the queue and run it. 64 | 65 | Returns: 66 | - if a job was run: the Job obj run (now removed from the db) and 67 | it's returned values. 68 | - If there was no job, return None. 69 | 70 | If a job fails, ``PgqException`` is raised with the job object that 71 | failed stored in it. 72 | """ 73 | raise NotImplementedError 74 | 75 | def run_job(self: _Self, job: _Job) -> Any: 76 | """Execute job, return the output of job.""" 77 | task = self.tasks[job.task] 78 | start_time = time.time() 79 | retval = task(self, job) 80 | self.logger.info( 81 | "Processing %r took %0.4f seconds. Task returned %r.", 82 | job, 83 | time.time() - start_time, 84 | retval, 85 | extra={"data": {"job": job.to_json(), "retval": retval,}}, 86 | ) 87 | return retval 88 | 89 | def enqueue( 90 | self: _Self, 91 | task: str, 92 | args: Optional[Dict[str, Any]] = None, 93 | execute_at: Optional[datetime.datetime] = None, 94 | priority: Optional[int] = None, 95 | ) -> _Job: 96 | assert task in self.tasks 97 | if args is None: 98 | args = {} 99 | 100 | kwargs: Dict[str, Any] = {"task": task, "args": args, "queue": self.queue} 101 | if execute_at is not None: 102 | kwargs["execute_at"] = execute_at 103 | if priority is not None: 104 | kwargs["priority"] = priority 105 | 106 | job = self.job_model.objects.create(**kwargs) 107 | if self.notify_channel: 108 | self.notify() 109 | return job 110 | 111 | def bulk_enqueue( 112 | self: _Self, 113 | task: str, 114 | kwargs_list: Sequence[Dict[str, Any]], 115 | batch_size: Optional[int] = None, 116 | ) -> List[_Job]: 117 | 118 | assert task in self.tasks 119 | 120 | jobs = self.job_model.objects.bulk_create( 121 | [ 122 | self.job_model(task=task, queue=self.queue, **kwargs) 123 | for kwargs in kwargs_list 124 | ], 125 | batch_size=batch_size, 126 | ) 127 | 128 | if self.notify_channel: 129 | self.notify() 130 | return jobs 131 | 132 | def listen(self) -> None: 133 | assert self.notify_channel, "You must set a notify channel in order to listen." 134 | with connection.cursor() as cur: 135 | cur.execute('LISTEN "{}";'.format(self.notify_channel)) 136 | 137 | def wait(self: _Self, timeout: int = 30) -> Sequence[str]: 138 | connection.connection.poll() 139 | notifies = self.filter_notifies() 140 | if notifies: 141 | return notifies 142 | 143 | select.select([connection.connection], [], [], timeout) 144 | connection.connection.poll() 145 | return self.filter_notifies() 146 | 147 | def filter_notifies(self: _Self) -> Sequence[str]: 148 | notifies = [ 149 | i 150 | for i in connection.connection.notifies 151 | if i.channel == self.notify_channel 152 | ] 153 | connection.connection.notifies = [ 154 | i 155 | for i in connection.connection.notifies 156 | if i.channel != self.notify_channel 157 | ] 158 | return notifies 159 | 160 | def notify(self: _Self) -> None: 161 | with connection.cursor() as cur: 162 | cur.execute('NOTIFY "%s";' % self.notify_channel) 163 | 164 | def _run_once( 165 | self: _Self, exclude_ids: Optional[Iterable[int]] = None, is_atomic: bool = True 166 | ) -> Optional[Tuple[_Job, Any]]: 167 | """Get a job from the queue and run it. 168 | 169 | Implements the same function signature as ``run_once()`` 170 | 171 | Returns: 172 | - if a job was run: the Job obj run (now removed from the db) and 173 | it's returned values. 174 | - If there was no job, return None. 175 | 176 | If a job fails, ``PgqException`` is raised with the job object that 177 | failed stored in it. 178 | """ 179 | job = None 180 | try: 181 | with maybe_atomic(is_atomic): 182 | job = self.job_model.dequeue( 183 | exclude_ids=exclude_ids, queue=self.queue, tasks=list(self.tasks) 184 | ) 185 | if job: 186 | self.logger.debug( 187 | "Claimed %r.", job, extra={"data": {"job": job.to_json(),}} 188 | ) 189 | return job, self.run_job(job) 190 | else: 191 | return None 192 | except Exception as e: 193 | # Add job info to exception to be accessible for logging. 194 | raise PgqException(job=job) from e 195 | 196 | 197 | class Queue(BaseQueue[Job]): 198 | job_model = Job 199 | 200 | 201 | class AtMostOnceQueue(Queue): 202 | def run_once( 203 | self, exclude_ids: Optional[Iterable[int]] = None 204 | ) -> Optional[Tuple[Job, Any]]: 205 | assert not connection.in_atomic_block 206 | return self._run_once(exclude_ids=exclude_ids, is_atomic=False) 207 | 208 | 209 | class AtLeastOnceQueue(Queue): 210 | def run_once( 211 | self, exclude_ids: Optional[Iterable[int]] = None 212 | ) -> Optional[Tuple[Job, Any]]: 213 | return self._run_once(exclude_ids=exclude_ids, is_atomic=True) 214 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [mypy] 2 | python_version = 3.7 3 | 4 | check_untyped_defs = False 5 | disallow_any_generics = True 6 | disallow_untyped_calls = True 7 | disallow_untyped_decorators = True 8 | ignore_errors = False 9 | ignore_missing_imports = True 10 | implicit_reexport = False 11 | strict_optional = True 12 | strict_equality = True 13 | no_implicit_optional = True 14 | warn_unused_ignores = True 15 | warn_redundant_casts = True 16 | warn_unused_configs = True 17 | warn_unreachable = True 18 | warn_no_return = True 19 | plugins = mypy_django_plugin.main 20 | 21 | [mypy.plugins.django-stubs] 22 | django_settings_module = testproj.settings 23 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from shutil import rmtree 4 | 5 | from setuptools import setup, Command 6 | 7 | VERSION = "0.8.2" 8 | 9 | HERE = os.path.abspath(os.path.dirname(__file__)) 10 | 11 | 12 | class UploadCommand(Command): 13 | """Support setup.py upload.""" 14 | 15 | description = "Build and publish the package." 16 | user_options = [] 17 | 18 | @staticmethod 19 | def status(s): 20 | """Prints things in bold.""" 21 | print("\033[1m{0}\033[0m".format(s)) 22 | 23 | def initialize_options(self): 24 | pass 25 | 26 | def finalize_options(self): 27 | pass 28 | 29 | def run(self): 30 | try: 31 | self.status("Removing previous builds…") 32 | rmtree(os.path.join(HERE, "dist")) 33 | except OSError: 34 | pass 35 | 36 | self.status("Building Source and Wheel (universal) distribution…") 37 | os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable)) 38 | 39 | self.status("Uploading the package to PyPi via Twine…") 40 | os.system("twine upload dist/*") 41 | 42 | self.status("Pushing git tags…") 43 | os.system("git tag v{0}".format(VERSION)) 44 | os.system("git push --tags") 45 | 46 | sys.exit() 47 | 48 | 49 | setup( 50 | name="django-pg-queue", 51 | version=VERSION, 52 | packages=[ 53 | "pgq", 54 | "pgq.migrations", 55 | ], 56 | package_data={"pgq": ["py.typed"]}, 57 | license="BSD", 58 | long_description=open("README.rst").read(), 59 | author="SweetProcess", 60 | author_email="support@sweetprocess.com", 61 | url="https://github.com/SweetProcess/django-pg-queue", 62 | install_requires=[ 63 | "Django>=2.1", 64 | ], 65 | # $ setup.py publish support. 66 | cmdclass={ 67 | "upload": UploadCommand, 68 | }, 69 | ) 70 | -------------------------------------------------------------------------------- /testproj/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SweetProcess/django-pg-queue/b4b174ac5e8b516d30fe1afc0d07d3a0431a9626/testproj/__init__.py -------------------------------------------------------------------------------- /testproj/management/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SweetProcess/django-pg-queue/b4b174ac5e8b516d30fe1afc0d07d3a0431a9626/testproj/management/__init__.py -------------------------------------------------------------------------------- /testproj/management/commands/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SweetProcess/django-pg-queue/b4b174ac5e8b516d30fe1afc0d07d3a0431a9626/testproj/management/commands/__init__.py -------------------------------------------------------------------------------- /testproj/management/commands/test_worker.py: -------------------------------------------------------------------------------- 1 | from pgq.commands import Worker 2 | 3 | from testproj.queue import queue 4 | 5 | 6 | class Command(Worker): 7 | queue = queue 8 | -------------------------------------------------------------------------------- /testproj/models.py: -------------------------------------------------------------------------------- 1 | from django.db import models 2 | 3 | from pgq.models import BaseJob 4 | 5 | 6 | class AltJob(BaseJob): 7 | pass 8 | -------------------------------------------------------------------------------- /testproj/queue.py: -------------------------------------------------------------------------------- 1 | import time 2 | from datetime import timedelta 3 | 4 | from pgq.decorators import repeat 5 | from pgq.models import BaseJob 6 | from pgq.queue import AtLeastOnceQueue, Queue 7 | 8 | 9 | def foo(queue: Queue, job: BaseJob): 10 | print("foo {}".format(job.args)) 11 | 12 | 13 | def timer(queue: Queue, job: BaseJob): 14 | print(time.time() - job.args["time"]) 15 | 16 | 17 | def n_times(queue: Queue, job: BaseJob): 18 | print("n_times", job.args["count"]) 19 | if job.args["count"] > 1: 20 | queue.enqueue(job.task, {"count": job.args["count"] - 1}) 21 | 22 | 23 | @repeat(timedelta(seconds=1)) 24 | def repeater(queue, job): 25 | print("repeat {}; eta {}".format(job, job.execute_at)) 26 | 27 | 28 | def long_task(queue, job): 29 | print("job started: {}".format(job.id)) 30 | time.sleep(10) 31 | print("job finished: {}".format(job.id)) 32 | 33 | 34 | queue = AtLeastOnceQueue( 35 | notify_channel="channel", 36 | tasks={ 37 | "foo": foo, 38 | "timer": timer, 39 | "repeater": repeater, 40 | "n_times": n_times, 41 | "long_task": long_task, 42 | }, 43 | ) 44 | -------------------------------------------------------------------------------- /testproj/requirements.txt: -------------------------------------------------------------------------------- 1 | django>=1.11 2 | psycopg2 3 | -------------------------------------------------------------------------------- /testproj/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for testproj project. 3 | 4 | Generated by 'django-admin startproject' using Django 1.11.3. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/1.11/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/1.11/ref/settings/ 11 | """ 12 | 13 | import os 14 | 15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 | 18 | 19 | # Quick-start development settings - unsuitable for production 20 | # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/ 21 | 22 | # SECURITY WARNING: keep the secret key used in production secret! 23 | SECRET_KEY = "-7fx821yv50v06hl!xy(e^nvgpa*c5_hg7^ytywqkr+03tpp@n" 24 | 25 | # SECURITY WARNING: don't run with debug turned on in production! 26 | DEBUG = True 27 | 28 | ALLOWED_HOSTS = [] # type: ignore 29 | 30 | 31 | # Application definition 32 | 33 | INSTALLED_APPS = [ 34 | "django.contrib.admin", 35 | "django.contrib.auth", 36 | "django.contrib.contenttypes", 37 | "django.contrib.sessions", 38 | "django.contrib.messages", 39 | "django.contrib.staticfiles", 40 | "testproj", 41 | "pgq", 42 | ] 43 | 44 | MIDDLEWARE = [ 45 | "django.middleware.security.SecurityMiddleware", 46 | "django.contrib.sessions.middleware.SessionMiddleware", 47 | "django.middleware.common.CommonMiddleware", 48 | "django.middleware.csrf.CsrfViewMiddleware", 49 | "django.contrib.auth.middleware.AuthenticationMiddleware", 50 | "django.contrib.messages.middleware.MessageMiddleware", 51 | "django.middleware.clickjacking.XFrameOptionsMiddleware", 52 | ] 53 | 54 | ROOT_URLCONF = "testproj.urls" 55 | 56 | TEMPLATES = [ 57 | { 58 | "BACKEND": "django.template.backends.django.DjangoTemplates", 59 | "DIRS": [], 60 | "APP_DIRS": True, 61 | "OPTIONS": { 62 | "context_processors": [ 63 | "django.template.context_processors.debug", 64 | "django.template.context_processors.request", 65 | "django.contrib.auth.context_processors.auth", 66 | "django.contrib.messages.context_processors.messages", 67 | ], 68 | }, 69 | }, 70 | ] 71 | 72 | WSGI_APPLICATION = "testproj.wsgi.application" 73 | 74 | 75 | # Database 76 | # https://docs.djangoproject.com/en/1.11/ref/settings/#databases 77 | 78 | DATABASES = { 79 | "default": { 80 | "ENGINE": "django.db.backends.postgresql", 81 | "NAME": "pgq_testproj", 82 | "USER": "pgq", 83 | "PASSWORD": "pgq", 84 | } 85 | } 86 | 87 | if os.environ.get("GITHUB_WORKFLOW"): 88 | DATABASES = { 89 | "default": { 90 | "ENGINE": "django.db.backends.postgresql", 91 | "NAME": "pgq_testproj", 92 | "USER": "pgq", 93 | "PASSWORD": "pgq", 94 | "HOST": "127.0.0.1", 95 | "PORT": "5432", 96 | } 97 | } 98 | 99 | 100 | # Password validation 101 | # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators 102 | 103 | AUTH_PASSWORD_VALIDATORS = [ 104 | { 105 | "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa 106 | }, 107 | {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",}, 108 | {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",}, 109 | {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",}, 110 | ] 111 | 112 | 113 | # Internationalization 114 | # https://docs.djangoproject.com/en/1.11/topics/i18n/ 115 | 116 | LANGUAGE_CODE = "en-us" 117 | 118 | TIME_ZONE = "UTC" 119 | 120 | USE_I18N = True 121 | 122 | USE_L10N = True 123 | 124 | USE_TZ = True 125 | 126 | 127 | # Static files (CSS, JavaScript, Images) 128 | # https://docs.djangoproject.com/en/1.11/howto/static-files/ 129 | 130 | STATIC_URL = "/static/" 131 | 132 | LOGGING = { 133 | "version": 1, 134 | "handlers": { 135 | "console": { 136 | "class": "logging.StreamHandler", 137 | "level": "CRITICAL", 138 | "formatter": "verbose", 139 | }, 140 | }, 141 | "formatters": { 142 | "verbose": { 143 | "format": "%(levelname)s %(asctime)s %(name)s %(process)d: %(message)s", 144 | }, 145 | "simple": {"format": "%(levelname)s %(message)s",}, 146 | }, 147 | "loggers": {"pgq": {"handlers": ["console"], "level": "DEBUG",},}, 148 | } 149 | -------------------------------------------------------------------------------- /testproj/test_decorators.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from django.contrib.auth.models import Group 4 | from django.test import TestCase 5 | 6 | from pgq.decorators import task, JobMeta 7 | from pgq.models import Job 8 | from pgq.queue import AtLeastOnceQueue, AtMostOnceQueue, Queue 9 | 10 | 11 | class PgqDecoratorsTests(TestCase): 12 | def test_using_task_decorator_to_add_to_queue(self) -> None: 13 | """ 14 | The task decorator makes a celery-like task object 15 | which can be used for adding tasks to the queue and registering 16 | the task to the queue. 17 | """ 18 | queue = AtLeastOnceQueue(tasks={}) 19 | 20 | @task(queue) 21 | def demotask(queue: Queue, job: Job, args: Any, meta: JobMeta) -> int: 22 | return job.id 23 | 24 | demotask.enqueue({"count": 5}) 25 | self.assertIn("demotask", queue.tasks) 26 | queue.run_once() 27 | 28 | def test_atleastonce_retry_during_database_failure(self) -> None: 29 | """ 30 | Force a database error in the task. Check that it was retried. 31 | """ 32 | 33 | queue = AtLeastOnceQueue(tasks={}) 34 | 35 | @task(queue, max_retries=2) 36 | def failuretask(queue: Queue, job: Job, args: Any, meta: JobMeta) -> None: 37 | # group has max 150 chars for its name. 38 | Group.objects.create(name="!" * 151) 39 | return None 40 | 41 | failuretask.enqueue({}) 42 | originaljob = Job.objects.all()[0] 43 | 44 | queue.run_once() 45 | 46 | retryjob = Job.objects.all()[0] 47 | self.assertNotEqual(originaljob.id, retryjob.id) 48 | self.assertEqual(retryjob.args["meta"]["retries"], 1) 49 | -------------------------------------------------------------------------------- /testproj/test_queues.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | from pgq.models import Job 4 | from pgq.queue import AtLeastOnceQueue, Queue 5 | 6 | 7 | def demotask(queue: Queue, job: Job) -> int: 8 | return job.id 9 | 10 | 11 | class PgqMultipleQueueTests(TestCase): 12 | def test_multiple_queues_mutually_exclusive_tasks(self) -> None: 13 | """ 14 | Test for a bug where defining multiple queues 15 | with exclusive tasks raises KeyError when a worker for one queue 16 | gets a job meant for a different queue 17 | """ 18 | 19 | queue1 = AtLeastOnceQueue(tasks={"task1": demotask, "task3": demotask}) 20 | queue2 = AtLeastOnceQueue(tasks={"task2": demotask, "task4": demotask}) 21 | 22 | queue1.enqueue("task1", {"count": 5}) 23 | 24 | queue2.run_once() 25 | 26 | def test_dequeue_without_tasks(self) -> None: 27 | """ 28 | Check that a job can dequeue without any tasks given. 29 | """ 30 | Job.dequeue() 31 | -------------------------------------------------------------------------------- /testproj/tests.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from typing import Any, Iterable, Optional, Tuple 3 | 4 | from django.contrib.auth.models import Group 5 | from django.db import transaction 6 | from django.test import TestCase, TransactionTestCase 7 | from django.utils import timezone 8 | 9 | from pgq.decorators import task, JobMeta 10 | from pgq.exceptions import PgqException 11 | from pgq.models import Job, DEFAULT_QUEUE_NAME 12 | from pgq.queue import AtLeastOnceQueue, AtMostOnceQueue, BaseQueue, Queue 13 | from pgq.commands import Worker 14 | 15 | from .models import AltJob 16 | 17 | 18 | def demotask(queue: Queue, job: Job) -> int: 19 | return job.id 20 | 21 | 22 | class PgqQueueTests(TestCase): 23 | def test_create_job_on_queue(self) -> None: 24 | """ 25 | Creates a basic queue with a name, and puts the job onto the queue. 26 | """ 27 | NAME = "machine_a" 28 | queue = AtLeastOnceQueue(tasks={"demotask": demotask}, queue=NAME) 29 | 30 | queue.enqueue("demotask", {"count": 5}) 31 | job = Job.dequeue(queue=queue.queue) 32 | if job is None: 33 | self.fail() 34 | self.assertEqual(job.args["count"], 5) 35 | self.assertEqual(job.queue, NAME) 36 | 37 | def test_job_contained_to_queue(self) -> None: 38 | """ 39 | Test that a job added to one queue won't be visible on another queue. 40 | """ 41 | NAME = "machine_a" 42 | queue = AtLeastOnceQueue(tasks={"demotask": demotask}, queue=NAME) 43 | 44 | NAME2 = "machine_b" 45 | queue2 = AtLeastOnceQueue(tasks={"demotask": demotask}, queue=NAME2) 46 | 47 | queue.enqueue("demotask", {"count": 5}) 48 | job = Job.dequeue(queue=queue2.queue) 49 | self.assertEqual(job, None) 50 | 51 | job = Job.dequeue(queue=queue.queue) 52 | self.assertNotEqual(job, None) 53 | 54 | def test_job_legacy_queues(self) -> None: 55 | """ 56 | Test jobs can be added without a queue name defined. 57 | """ 58 | queue = AtLeastOnceQueue(tasks={"demotask": demotask}) 59 | 60 | queue.enqueue("demotask", {"count": 5}) 61 | job = Job.dequeue(queue=queue.queue) 62 | if job is None: 63 | self.fail() 64 | self.assertEqual(job.args["count"], 5) 65 | self.assertEqual(job.queue, DEFAULT_QUEUE_NAME) 66 | 67 | def test_same_name_queues_can_fetch_tasks(self) -> None: 68 | NAME = "machine_a" 69 | queue = AtLeastOnceQueue(tasks={"demotask": demotask}, queue=NAME) 70 | 71 | queue2 = AtLeastOnceQueue(tasks={"demotask": demotask}, queue=NAME) 72 | 73 | queue.enqueue("demotask", {"count": 5}) 74 | job = Job.dequeue(queue=queue2.queue) 75 | # job is dequeued.. 76 | self.assertNotEqual(job, None) 77 | 78 | # now the job should be gone... 79 | job = Job.dequeue(queue=queue.queue) 80 | self.assertEqual(job, None) 81 | 82 | def test_bulk_enqueue_tasks(self) -> None: 83 | NAME = "machine_a" 84 | queue = AtLeastOnceQueue(tasks={"demotask": demotask}, queue=NAME) 85 | 86 | self.assertEqual(Job.objects.count(), 0) 87 | 88 | day_from_now = timezone.now() + datetime.timedelta(days=1) 89 | task_name = "demotask" 90 | ret_jobs = queue.bulk_enqueue( 91 | task_name, 92 | [ 93 | {"args": {"count": 5}}, 94 | {"args": {"count": 7}, "priority": 10, "execute_at": day_from_now,}, 95 | ], 96 | ) 97 | jobs = Job.objects.all() 98 | self.assertEqual(len(ret_jobs), 2) 99 | self.assertEqual(len(jobs), 2) 100 | for job in jobs: 101 | self.assertEqual(job.queue, NAME) 102 | self.assertEqual(job.task, task_name) 103 | 104 | self.assertEqual(jobs[0].args["count"], 5) 105 | self.assertEqual(jobs[0].priority, 0) 106 | 107 | self.assertEqual(jobs[1].args["count"], 7) 108 | self.assertEqual(jobs[1].priority, 10) 109 | self.assertEqual(jobs[1].execute_at, day_from_now) 110 | 111 | def test_queue_subclass_enqueue(self): 112 | """ 113 | BaseQueue subclassed enqueue with different job_model uses its own table. 114 | """ 115 | NAME = "machine_a" 116 | 117 | class AltQueue(BaseQueue[AltJob]): 118 | job_model = AltJob 119 | 120 | def run_once( 121 | self, exclude_ids: Optional[Iterable[int]] = None 122 | ) -> Optional[Tuple[AltJob, Any]]: 123 | return self._run_once(exclude_ids=exclude_ids) 124 | 125 | queue = AltQueue(tasks={"demotask": demotask}, queue=NAME) 126 | 127 | self.assertEqual(queue.job_model, AltJob) 128 | self.assertEqual(AltJob.objects.count(), 0) 129 | 130 | job = queue.enqueue("demotask", args={"count": 1}) 131 | 132 | self.assertIsInstance(job, AltJob) 133 | self.assertEqual(AltJob.objects.count(), 1) 134 | 135 | def test_queue_subclass_bulk_enqueue(self): 136 | """ 137 | BaseQueue subclassed bulk_enqueue with different job_model uses its own table. 138 | """ 139 | NAME = "machine_a" 140 | 141 | class AltQueue(BaseQueue[AltJob]): 142 | job_model = AltJob 143 | 144 | def run_once( 145 | self, exclude_ids: Optional[Iterable[int]] = None 146 | ) -> Optional[Tuple[AltJob, Any]]: 147 | return self._run_once(exclude_ids=exclude_ids) 148 | 149 | queue = AltQueue(tasks={"demotask": demotask}, queue=NAME) 150 | 151 | self.assertEqual(queue.job_model, AltJob) 152 | self.assertEqual(AltJob.objects.count(), 0) 153 | 154 | jobs = queue.bulk_enqueue( 155 | "demotask", [{"args": {"count": 5}}, {"args": {"count": 7}},], 156 | ) 157 | 158 | self.assertEqual(AltJob.objects.count(), 2) 159 | self.assertIsInstance(jobs[0], AltJob) 160 | 161 | def test_basejob_subclass_dequeue(self): 162 | NAME = "machine_a" 163 | 164 | class AltQueue(BaseQueue[AltJob]): 165 | job_model = AltJob 166 | 167 | def run_once( 168 | self, exclude_ids: Optional[Iterable[int]] = None 169 | ) -> Optional[Tuple[AltJob, Any]]: 170 | return self._run_once(exclude_ids=exclude_ids) 171 | 172 | queue = AltQueue(tasks={"demotask": demotask}, queue=NAME) 173 | 174 | self.assertEqual(queue.job_model, AltJob) 175 | self.assertEqual(AltJob.objects.count(), 0) 176 | 177 | job = queue.enqueue("demotask", args={"count": 1}) 178 | 179 | self.assertEqual(AltJob.objects.count(), 1) 180 | 181 | db_job = AltJob.dequeue(queue=queue.queue) 182 | 183 | self.assertEqual(job, db_job) 184 | self.assertEqual(AltJob.objects.count(), 0) 185 | 186 | 187 | class PgqTransactionTests(TransactionTestCase): 188 | def test_notify_and_listen(self) -> None: 189 | """ 190 | After `listen()`, `enqueue()` makes a notification 191 | appear via `filter_notifies()`. 192 | """ 193 | NAME = "machine_a" 194 | queue = AtLeastOnceQueue( 195 | tasks={"demotask": demotask}, notify_channel="queue_a", queue=NAME 196 | ) 197 | 198 | queue.listen() 199 | queue.enqueue("demotask", {"count": 5}) 200 | self.assertEqual(len(queue.filter_notifies()), 1) 201 | 202 | queue.enqueue("demotask", {"count": 5}) 203 | queue.enqueue("demotask", {"count": 5}) 204 | self.assertEqual(len(queue.filter_notifies()), 2) 205 | 206 | def test_notify_only_returns_one_notify_per_channel_per_txn(self) -> None: 207 | """ 208 | Only one notification returned per channel per txn regardless of number 209 | enqueued tasks. 210 | 211 | By default, postgres will 'fold' notifications within a transaction 212 | that have the same channel and payload. 213 | """ 214 | NAME = "machine_a" 215 | queue = AtLeastOnceQueue( 216 | tasks={"demotask": demotask}, notify_channel="queue_a", queue=NAME 217 | ) 218 | queue.listen() 219 | 220 | NAME2 = "machine_b" 221 | queue2 = AtLeastOnceQueue( 222 | tasks={"demotask": demotask}, notify_channel="queue_b", queue=NAME2 223 | ) 224 | queue2.listen() 225 | 226 | with transaction.atomic(): 227 | queue.enqueue("demotask", {"count": 5}) 228 | queue.enqueue("demotask", {"count": 5}) 229 | 230 | queue2.enqueue("demotask", {"count": 5}) 231 | queue2.enqueue("demotask", {"count": 5}) 232 | 233 | self.assertEqual(len(queue.wait()), 1) 234 | self.assertEqual(len(queue2.wait()), 1) 235 | 236 | def test_bulk_create_notifies(self) -> None: 237 | NAME = "machine_a" 238 | queue = AtLeastOnceQueue( 239 | tasks={"demotask": demotask}, notify_channel="queue_a", queue=NAME 240 | ) 241 | queue.listen() 242 | 243 | now = timezone.now() 244 | queue.bulk_enqueue( 245 | "demotask", 246 | [ 247 | {"args": {"count": 5}}, 248 | {"args": {"count": 7}, "priority": 10, "execute_at": now,}, 249 | ], 250 | ) 251 | 252 | self.assertEqual(len(queue.wait()), 1) 253 | 254 | def test_atmostonce_retry_during_database_failure(self) -> None: 255 | """ 256 | As above. but for atmost once queue 257 | """ 258 | 259 | queue = AtMostOnceQueue(tasks={}) 260 | 261 | @task(queue, max_retries=2) 262 | def failuretask(queue: Queue, job: Job, args: Any, meta: JobMeta) -> None: 263 | # group has max 150 chars for its name. 264 | Group.objects.create(name="!" * 151) 265 | return None 266 | 267 | failuretask.enqueue({}) 268 | originaljob = Job.objects.all()[0] 269 | 270 | queue.run_once() 271 | 272 | retryjob = Job.objects.all()[0] 273 | self.assertNotEqual(originaljob.id, retryjob.id) 274 | self.assertEqual(retryjob.args["meta"]["retries"], 1) 275 | 276 | def test_atleastonce_retry_during_on_commit_failure(self) -> None: 277 | """Raising an error in on_commit doesn't retry the job. 278 | 279 | This test is more documentation showing what may be considered 280 | surprising behaviour. The behaviour is due to the transaction 281 | being committed before the exception is raised (so the job has 282 | already been popped from the db as successful). 283 | """ 284 | queue = AtLeastOnceQueue(tasks={}) 285 | 286 | @task(queue, max_retries=2) 287 | def failuretask(queue: Queue, job: Job, args: Any, meta: JobMeta) -> None: 288 | transaction.on_commit(lambda: 1 / 0) 289 | return None 290 | 291 | failuretask.enqueue({}) 292 | self.assertEqual(Job.objects.count(), 1) 293 | 294 | with self.assertRaises(PgqException): 295 | queue.run_once() 296 | 297 | # The job has been completed and will not be retried despite the 298 | # error raised in the `on_commit()` callback. 299 | self.assertEqual(Job.objects.count(), 0) 300 | 301 | def test_atleastonce_on_commit_failure(self) -> None: 302 | """Raising an error in on_commit doesn't retry the job. 303 | 304 | This test is more documentation showing what may be considered 305 | surprising behaviour. The behaviour is due to the transaction 306 | being committed before the exception is raised (so the job has 307 | already been popped from the db as successful). 308 | """ 309 | def failuretask(queue: Queue, job: Job): 310 | transaction.on_commit(lambda: 1 / 0) 311 | return None 312 | 313 | queue = AtLeastOnceQueue(tasks={"failuretask": failuretask}, queue="machinea") 314 | 315 | queue.enqueue("failuretask") 316 | self.assertEqual(Job.objects.count(), 1) 317 | 318 | with self.assertRaises(PgqException): 319 | queue.run_once() 320 | 321 | # The job has been completed and will not be retried despite the 322 | # error raised in the `on_commit()` callback. 323 | self.assertEqual(Job.objects.count(), 0) 324 | 325 | def test_worker_on_commit_failure(self) -> None: 326 | """An error raised in ``run_once()`` doesn't crash the worker. 327 | 328 | This is triggered using an ``on_commit()`` callback to create an 329 | error at the outer most ``transaction.atomic()`` site (in 330 | ``_run_once()`` for ``AtLeastOnceQueue``). 331 | """ 332 | queue_name = "machine_a" 333 | 334 | def failuretask(queue: Queue, job: Job): 335 | transaction.on_commit(lambda: 1 / 0) 336 | return None 337 | 338 | test_queue = AtLeastOnceQueue(tasks={"failuretask": failuretask}, queue=queue_name) 339 | 340 | test_queue.enqueue("failuretask") 341 | self.assertEqual(Job.objects.count(), 1) 342 | 343 | class TestWorker(Worker): 344 | queue = test_queue 345 | _shutdown = False 346 | 347 | worker = TestWorker() 348 | 349 | worker.run_available_tasks() 350 | # The error in the `on_commit()` callback is triggered after the 351 | # transaction is committed so the job has been removed from the 352 | # queue. 353 | self.assertEqual(Job.objects.count(), 0) 354 | 355 | def test_atleastonce_run_once_is_atomic(self) -> None: 356 | """``AtLeastOnceQueue`` runs in an atomic block. 357 | 358 | Database operations are rolled back on failure. Job remains in queue. 359 | """ 360 | group_name = "test_group" 361 | 362 | def failuretask(queue: Queue, job: Job): 363 | Group.objects.create(name=group_name) 364 | raise Exception() 365 | 366 | queue = AtLeastOnceQueue(tasks={"failuretask": failuretask}, queue="machinea") 367 | 368 | queue.enqueue("failuretask") 369 | self.assertEqual(Job.objects.count(), 1) 370 | 371 | with self.assertRaises(PgqException): 372 | queue.run_once() 373 | 374 | self.assertEqual(Group.objects.filter(name=group_name).count(), 0) 375 | # The job has been completed and will not be retried despite the 376 | # error raised in the `on_commit()` callback. 377 | self.assertEqual(Job.objects.count(), 1) 378 | 379 | def test_atmostonce_run_once_is_not_atomic(self) -> None: 380 | """``AtMostOnceQueue`` does not run in an atomic block. 381 | 382 | Database operations are persisted despite failure. Job is removed from 383 | queue. 384 | """ 385 | group_name = "test_group" 386 | 387 | def failuretask(queue: Queue, job: Job): 388 | Group.objects.create(name=group_name) 389 | raise Exception() 390 | 391 | queue = AtMostOnceQueue(tasks={"failuretask": failuretask}, queue="machinea") 392 | 393 | queue.enqueue("failuretask") 394 | self.assertEqual(Job.objects.count(), 1) 395 | 396 | with self.assertRaises(PgqException): 397 | queue.run_once() 398 | 399 | self.assertEqual(Group.objects.filter(name=group_name).count(), 1) 400 | # The job has been completed and will not be retried despite the 401 | # error raised in the `on_commit()` callback. 402 | self.assertEqual(Job.objects.count(), 0) 403 | -------------------------------------------------------------------------------- /testproj/urls.py: -------------------------------------------------------------------------------- 1 | """testproj URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/1.11/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.conf.urls import url, include 14 | 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) 15 | """ 16 | from django.conf.urls import url 17 | from django.contrib import admin 18 | 19 | urlpatterns = [ 20 | url(r"^admin/", admin.site.urls), 21 | ] 22 | -------------------------------------------------------------------------------- /testproj/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for testproj project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testproj.settings") 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = 3 | py{36,37,38,39}-django22, 4 | py{36,37,38,39}-django31, 5 | py{36,37,38,39}-django32, 6 | 7 | [testenv] 8 | passenv = GITHUB_WORKFLOW 9 | commands = python manage.py test 10 | deps = 11 | psycopg2-binary 12 | dataclasses; python_version<"3.7" 13 | django22: django~=2.2.17 # first patch release with Python 3.9 support 14 | django22: psycopg2-binary<2.9 15 | django31: django~=3.1.3 # first patch release with Python 3.9 support 16 | django32: django~=3.2.0 17 | --------------------------------------------------------------------------------