├── .github ├── FUNDING.yml ├── release-drafter.yml └── workflows │ ├── codeql-analysis.yml │ ├── release_log.yml │ └── test.yml ├── .gitignore ├── .readthedocs.yml ├── CHANGELOG.md ├── Dockerfile.dev ├── LICENSE ├── README.rst ├── django_q ├── __init__.py ├── admin.py ├── apps.py ├── brokers │ ├── __init__.py │ ├── aws_sqs.py │ ├── disque.py │ ├── ironmq.py │ ├── mongo.py │ ├── orm.py │ └── redis_broker.py ├── cluster.py ├── conf.py ├── core_signing.py ├── humanhash.py ├── locale │ ├── de │ │ └── LC_MESSAGES │ │ │ ├── django.mo │ │ │ └── django.po │ └── fr │ │ └── LC_MESSAGES │ │ ├── django.mo │ │ └── django.po ├── management │ ├── __init__.py │ └── commands │ │ ├── __init__.py │ │ ├── qcluster.py │ │ ├── qinfo.py │ │ ├── qmemory.py │ │ └── qmonitor.py ├── migrations │ ├── 0001_initial.py │ ├── 0002_auto_20150630_1624.py │ ├── 0003_auto_20150708_1326.py │ ├── 0004_auto_20150710_1043.py │ ├── 0005_auto_20150718_1506.py │ ├── 0006_auto_20150805_1817.py │ ├── 0007_ormq.py │ ├── 0008_auto_20160224_1026.py │ ├── 0009_auto_20171009_0915.py │ ├── 0010_auto_20200610_0856.py │ ├── 0011_auto_20200628_1055.py │ ├── 0012_auto_20200702_1608.py │ ├── 0013_task_attempt_count.py │ ├── 0014_schedule_cluster.py │ └── __init__.py ├── models.py ├── monitor.py ├── queues.py ├── signals.py ├── signing.py ├── status.py ├── tasks.py └── tests │ ├── __init__.py │ ├── settings.py │ ├── tasks.py │ ├── test_admin.py │ ├── test_brokers.py │ ├── test_cached.py │ ├── test_cluster.py │ ├── test_commands.py │ ├── test_monitor.py │ ├── test_scheduler.py │ ├── testing_utilities │ ├── __init__.py │ └── multiple_database_routers.py │ └── urls.py ├── docs ├── Makefile ├── _static │ ├── cluster.png │ ├── favicon.ico │ ├── info.png │ ├── logo.png │ ├── logo_large.png │ ├── monitor.png │ ├── scheduled.png │ └── successful.png ├── admin.rst ├── architecture.rst ├── brokers.rst ├── chain.rst ├── cluster.rst ├── conf.py ├── configure.rst ├── errors.rst ├── examples.rst ├── group.rst ├── index.rst ├── install.rst ├── iterable.rst ├── monitor.rst ├── schedules.rst ├── signals.rst └── tasks.rst ├── poetry.lock ├── pyproject.toml ├── pytest.ini ├── requirements.txt └── test-services-docker-compose.yaml /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [koed00] 4 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | categories: 2 | - 3 | label: breaking 4 | title: Breaking 5 | - 6 | label: feature 7 | title: New 8 | - 9 | label: bug 10 | title: "Bug Fixes" 11 | - 12 | label: dependencies 13 | title: "Dependency Updates" 14 | - 15 | label: security 16 | title: Security 17 | name-template: v$NEXT_PATCH_VERSION 18 | tag-template: v$NEXT_PATCH_VERSION 19 | template: | 20 | # Changes 21 | $CHANGES 22 | version-resolver: 23 | major: 24 | labels: 25 | - breaking 26 | - major 27 | minor: 28 | labels: 29 | - feature 30 | - minor 31 | patch: 32 | labels: 33 | - bug 34 | - dependencies 35 | - security 36 | - patch 37 | default: patch -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ master ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ master ] 20 | schedule: 21 | - cron: '32 0 * * 5' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v2 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v1 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v1 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v1 72 | -------------------------------------------------------------------------------- /.github/workflows/release_log.yml: -------------------------------------------------------------------------------- 1 | name: Update release draft 2 | on: 3 | push: 4 | branches: 5 | - master 6 | jobs: 7 | update_release_draft: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: release-drafter/release-drafter@v5 11 | with: 12 | config-name: release-drafter.yml 13 | env: 14 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: tests 2 | on: [ push, pull_request ] 3 | jobs: 4 | build: 5 | runs-on: ubuntu-latest 6 | strategy: 7 | matrix: 8 | python-version: [ 3.7, 3.8, 3.9 ] 9 | django: [ "2.2", "3.2" ] 10 | services: 11 | disque: 12 | image: efrecon/disque:1.0-rc1 13 | ports: 14 | - '7711:7711/tcp' 15 | mongodb: 16 | image: mongo 17 | ports: 18 | - 27017:27017 19 | postgres: 20 | image: postgres 21 | env: 22 | POSTGRES_USER: postgres 23 | POSTGRES_PASSWORD: postgres 24 | POSTGRES_DB: postgres 25 | ports: 26 | - 5432:5432 27 | # needed because the postgres container does not provide a health check 28 | options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 29 | redis: 30 | image: redis 31 | ports: 32 | - 6379:6379 33 | options: --entrypoint redis-server 34 | steps: 35 | - uses: actions/checkout@v2 36 | - name: Set up Python ${{ matrix.python-version }} 37 | uses: actions/setup-python@v2 38 | with: 39 | python-version: ${{ matrix.python-version }} 40 | - name: Install dependencies with Django ${{ matrix.django }} 41 | run: | 42 | python -m pip install --upgrade pip 43 | pip install poetry 44 | poetry add "django==${{ matrix.django }}" 45 | poetry install -E testing 46 | - name: Run Tests 47 | run: | 48 | poetry run pytest --cov=./django_q --cov-report=xml 49 | - name: Upload coverage 50 | uses: codecov/codecov-action@v1 51 | env: 52 | PYTHON: ${{ matrix.python-version }} 53 | DJANGO: ${{ matrix.django }} 54 | with: 55 | token: ${{ secrets.CODECOV_TOKEN }} 56 | env_vars: PYTHON, DJANGO 57 | fail_ci_if_error: true 58 | - name: Build docs 59 | run: | 60 | poetry run sphinx-build -b html -d docs/_build/doctrees -nW docs docs/_build/html 61 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | 26 | # PyInstaller 27 | # Usually these files are written by a python script from a template 28 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 29 | *.manifest 30 | *.spec 31 | 32 | # Installer logs 33 | pip-log.txt 34 | pip-delete-this-directory.txt 35 | 36 | # Unit test / coverage reports 37 | htmlcov/ 38 | .tox/ 39 | .coverage 40 | .coverage.* 41 | .cache 42 | nosetests.xml 43 | coverage.xml 44 | *,cover 45 | .DS_Store 46 | 47 | # Translations 48 | *.pot 49 | *.mo 50 | 51 | # Django stuff: 52 | *.log 53 | 54 | # Sphinx documentation 55 | docs/_build/ 56 | 57 | # PyBuilder 58 | target/ 59 | 60 | dev-requirements.txt 61 | dev-requirements.in 62 | manage.py 63 | db.sqlite3 64 | *.ipynb 65 | *.rdb 66 | .venv 67 | .env 68 | .idea 69 | djq 70 | node_modules 71 | /c.cache/ 72 | /dq 73 | /venv/ 74 | .local -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | build: 2 | image: latest 3 | 4 | python: 5 | version: 3.8 6 | setup_py_install: true -------------------------------------------------------------------------------- /Dockerfile.dev: -------------------------------------------------------------------------------- 1 | # Sets the python version 2 | FROM python:3.9.5-slim 3 | 4 | # Allows the logs generated by python apps to be rendered in the terminal 5 | ENV PYTHONUNBUFFERED 1 6 | 7 | # Sets the default shell to bash 8 | ENV SHELL /bin/bash 9 | 10 | # Creates a non-root user 11 | RUN adduser --disabled-password docker 12 | 13 | # Upgrades pip 14 | RUN pip install --upgrade pip 15 | 16 | # Poetry project setup for development 17 | # Copies poetry requirements files 18 | COPY --chown=docker Dockerfile.dev requirements.txt* setup.py* ./ 19 | 20 | RUN pip install -r requirements.txt 21 | 22 | RUN pip install pytest pytest-django codecov poetry 23 | 24 | # Clean up 25 | RUN apt-get autoremove -y \ 26 | && apt-get clean -y \ 27 | && rm -rf /var/lib/apt/lists/* 28 | 29 | WORKDIR /home/docker 30 | 31 | # Sets the binaries to path 32 | ENV PATH="/home/docker/.local/bin:${PATH}" 33 | 34 | # Copy in as non-root user, so permissions match what we need 35 | COPY --chown=docker:docker . . 36 | 37 | RUN python setup.py develop 38 | 39 | # Applies the container user to be non-root 40 | USER docker -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 - 2021 Ilan Steemers 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | .. image:: docs/_static/logo.png 2 | :align: center 3 | :alt: Q logo 4 | :target: https://django-q.readthedocs.org/ 5 | 6 | A multiprocessing distributed task queue for Django 7 | --------------------------------------------------- 8 | 9 | |image0| |image1| |docs| |image2| 10 | 11 | Features 12 | ~~~~~~~~ 13 | 14 | - Multiprocessing worker pool 15 | - Asynchronous tasks 16 | - Scheduled, cron and repeated tasks 17 | - Signed and compressed packages 18 | - Failure and success database or cache 19 | - Result hooks, groups and chains 20 | - Django Admin integration 21 | - PaaS compatible with multiple instances 22 | - Multi cluster monitor 23 | - Redis, Disque, IronMQ, SQS, MongoDB or ORM 24 | - Rollbar and Sentry support 25 | 26 | Requirements 27 | ~~~~~~~~~~~~ 28 | 29 | - `Django `__ > = 2.2 30 | - `Django-picklefield `__ 31 | - `Arrow `__ 32 | - `Blessed `__ 33 | 34 | Tested with: Python 3.7, 3.8, 3.9 Django 2.2.X and 3.2.X 35 | 36 | .. warning:: Since Python 3.7 `async` became a reserved keyword and was refactored to `async_task` 37 | 38 | Brokers 39 | ~~~~~~~ 40 | - `Redis `__ 41 | - `Disque `__ 42 | - `IronMQ `__ 43 | - `Amazon SQS `__ 44 | - `MongoDB `__ 45 | - `Django ORM `__ 46 | 47 | Installation 48 | ~~~~~~~~~~~~ 49 | 50 | - Install the latest version with pip:: 51 | 52 | $ pip install django-q 53 | 54 | 55 | - Add `django_q` to your `INSTALLED_APPS` in your projects `settings.py`:: 56 | 57 | INSTALLED_APPS = ( 58 | # other apps 59 | 'django_q', 60 | ) 61 | 62 | - Run Django migrations to create the database tables:: 63 | 64 | $ python manage.py migrate 65 | 66 | - Choose a message `broker `__ , configure and install the appropriate client library. 67 | 68 | Read the full documentation at `https://django-q.readthedocs.org `__ 69 | 70 | 71 | Configuration 72 | ~~~~~~~~~~~~~ 73 | 74 | All configuration settings are optional. e.g: 75 | 76 | .. code:: python 77 | 78 | # settings.py example 79 | Q_CLUSTER = { 80 | 'name': 'myproject', 81 | 'workers': 8, 82 | 'recycle': 500, 83 | 'timeout': 60, 84 | 'compress': True, 85 | 'cpu_affinity': 1, 86 | 'save_limit': 250, 87 | 'queue_limit': 500, 88 | 'label': 'Django Q', 89 | 'redis': { 90 | 'host': '127.0.0.1', 91 | 'port': 6379, 92 | 'db': 0, } 93 | } 94 | 95 | For full configuration options, see the `configuration documentation `__. 96 | 97 | Management Commands 98 | ~~~~~~~~~~~~~~~~~~~ 99 | 100 | Start a cluster with:: 101 | 102 | $ python manage.py qcluster 103 | 104 | Monitor your clusters with:: 105 | 106 | $ python manage.py qmonitor 107 | 108 | Monitor your clusters' memory usage with:: 109 | 110 | $ python manage.py qmemory 111 | 112 | Check overall statistics with:: 113 | 114 | $ python manage.py qinfo 115 | 116 | Creating Tasks 117 | ~~~~~~~~~~~~~~ 118 | 119 | Use `async_task` from your code to quickly offload tasks: 120 | 121 | .. code:: python 122 | 123 | from django_q.tasks import async_task, result 124 | 125 | # create the task 126 | async_task('math.copysign', 2, -2) 127 | 128 | # or with a reference 129 | import math.copysign 130 | 131 | task_id = async_task(copysign, 2, -2) 132 | 133 | # get the result 134 | task_result = result(task_id) 135 | 136 | # result returns None if the task has not been executed yet 137 | # you can wait for it 138 | task_result = result(task_id, 200) 139 | 140 | # but in most cases you will want to use a hook: 141 | 142 | async_task('math.modf', 2.5, hook='hooks.print_result') 143 | 144 | # hooks.py 145 | def print_result(task): 146 | print(task.result) 147 | 148 | For more info see `Tasks `__ 149 | 150 | 151 | Schedule 152 | ~~~~~~~~ 153 | 154 | Schedules are regular Django models. You can manage them through the 155 | Admin page or directly from your code: 156 | 157 | .. code:: python 158 | 159 | # Use the schedule function 160 | from django_q.tasks import schedule 161 | 162 | schedule('math.copysign', 163 | 2, -2, 164 | hook='hooks.print_result', 165 | schedule_type=Schedule.DAILY) 166 | 167 | # Or create the object directly 168 | from django_q.models import Schedule 169 | 170 | Schedule.objects.create(func='math.copysign', 171 | hook='hooks.print_result', 172 | args='2,-2', 173 | schedule_type=Schedule.DAILY 174 | ) 175 | 176 | # Run a task every 5 minutes, starting at 6 today 177 | # for 2 hours 178 | import arrow 179 | 180 | schedule('math.hypot', 181 | 3, 4, 182 | schedule_type=Schedule.MINUTES, 183 | minutes=5, 184 | repeats=24, 185 | next_run=arrow.utcnow().replace(hour=18, minute=0)) 186 | 187 | # Use a cron expression 188 | schedule('math.hypot', 189 | 3, 4, 190 | schedule_type=Schedule.CRON, 191 | cron = '0 22 * * 1-5') 192 | 193 | For more info check the `Schedules `__ documentation. 194 | 195 | 196 | Testing 197 | ~~~~~~~ 198 | 199 | To run the tests you will need the following in addition to install requirements: 200 | 201 | * `py.test `__ 202 | * `pytest-django `__ 203 | * Disque from https://github.com/antirez/disque.git 204 | * Redis 205 | * MongoDB 206 | 207 | Or you can use the included Docker Compose file. 208 | 209 | The following commands can be used to run the tests: 210 | 211 | .. code:: bash 212 | 213 | # Create virtual environment 214 | python -m venv venv 215 | 216 | # Install requirements 217 | venv/bin/pip install -r requirements.txt 218 | 219 | # Install test dependencies 220 | venv/bin/pip install pytest pytest-django 221 | 222 | # Install django-q 223 | venv/bin/python setup.py develop 224 | 225 | # Run required services (you need to have docker-compose installed) 226 | docker-compose -f test-services-docker-compose.yaml up -d 227 | 228 | # Run tests 229 | venv/bin/pytest 230 | 231 | # Stop the services required by tests (when you no longer plan to run tests) 232 | docker-compose -f test-services-docker-compose.yaml down 233 | 234 | Locale 235 | ~~~~~~ 236 | 237 | Currently available in English, German and French. 238 | Translation pull requests are always welcome. 239 | 240 | Todo 241 | ~~~~ 242 | 243 | - Better tests and coverage 244 | - Less dependencies? 245 | 246 | Acknowledgements 247 | ~~~~~~~~~~~~~~~~ 248 | 249 | - Django Q was inspired by working with 250 | `Django-RQ `__ and 251 | `RQ `__ 252 | - Human readable hashes by 253 | `HumanHash `__ 254 | - Redditors feedback at `r/django `__ 255 | 256 | - JetBrains for their `Open Source Support Program `__ 257 | 258 | .. |image0| image:: https://github.com/koed00/django-q/workflows/Tests/badge.svg?branche=master 259 | :target: https://github.com/Koed00/django-q/actions?query=workflow%3Atests 260 | .. |image1| image:: http://codecov.io/github/Koed00/django-q/coverage.svg?branch=master 261 | :target: http://codecov.io/github/Koed00/django-q?branch=master 262 | .. |image2| image:: http://badges.gitter.im/Join%20Chat.svg 263 | :target: https://gitter.im/Koed00/django-q 264 | .. |docs| image:: https://readthedocs.org/projects/docs/badge/?version=latest 265 | :alt: Documentation Status 266 | :scale: 100 267 | :target: https://django-q.readthedocs.org/ 268 | -------------------------------------------------------------------------------- /django_q/__init__.py: -------------------------------------------------------------------------------- 1 | VERSION = (1, 3, 9) 2 | 3 | import django 4 | 5 | if django.VERSION < (3, 2): 6 | default_app_config = "django_q.apps.DjangoQConfig" 7 | 8 | __all__ = ["conf", "cluster", "models", "tasks"] 9 | -------------------------------------------------------------------------------- /django_q/admin.py: -------------------------------------------------------------------------------- 1 | """Admin module for Django.""" 2 | from django.contrib import admin 3 | from django.utils.translation import gettext_lazy as _ 4 | 5 | from django_q.conf import Conf, croniter 6 | from django_q.models import Failure, OrmQ, Schedule, Success 7 | from django_q.tasks import async_task 8 | 9 | 10 | class TaskAdmin(admin.ModelAdmin): 11 | """model admin for success tasks.""" 12 | 13 | list_display = ("name", "func", "started", "stopped", "time_taken", "group") 14 | 15 | def has_add_permission(self, request): 16 | """Don't allow adds.""" 17 | return False 18 | 19 | def get_queryset(self, request): 20 | """Only show successes.""" 21 | qs = super(TaskAdmin, self).get_queryset(request) 22 | return qs.filter(success=True) 23 | 24 | search_fields = ("name", "func", "group") 25 | readonly_fields = [] 26 | list_filter = ("group",) 27 | 28 | def get_readonly_fields(self, request, obj=None): 29 | """Set all fields readonly.""" 30 | return list(self.readonly_fields) + [field.name for field in obj._meta.fields] 31 | 32 | 33 | def retry_failed(FailAdmin, request, queryset): 34 | """Submit selected tasks back to the queue.""" 35 | for task in queryset: 36 | async_task(task.func, *task.args or (), hook=task.hook, **task.kwargs or {}) 37 | task.delete() 38 | 39 | 40 | retry_failed.short_description = _("Resubmit selected tasks to queue") 41 | 42 | 43 | class FailAdmin(admin.ModelAdmin): 44 | """model admin for failed tasks.""" 45 | 46 | list_display = ("name", "func", "started", "stopped", "short_result") 47 | 48 | def has_add_permission(self, request): 49 | """Don't allow adds.""" 50 | return False 51 | 52 | actions = [retry_failed] 53 | search_fields = ("name", "func") 54 | list_filter = ("group",) 55 | readonly_fields = [] 56 | 57 | def get_readonly_fields(self, request, obj=None): 58 | """Set all fields readonly.""" 59 | return list(self.readonly_fields) + [field.name for field in obj._meta.fields] 60 | 61 | 62 | class ScheduleAdmin(admin.ModelAdmin): 63 | """model admin for schedules""" 64 | 65 | list_display = ( 66 | "id", 67 | "name", 68 | "func", 69 | "schedule_type", 70 | "repeats", 71 | "cluster", 72 | "next_run", 73 | "last_run", 74 | "success", 75 | ) 76 | 77 | # optional cron strings 78 | if not croniter: 79 | readonly_fields = ("cron",) 80 | 81 | list_filter = ("next_run", "schedule_type", "cluster") 82 | search_fields = ("func",) 83 | list_display_links = ("id", "name") 84 | 85 | 86 | class QueueAdmin(admin.ModelAdmin): 87 | """queue admin for ORM broker""" 88 | 89 | list_display = ("id", "key", "task_id", "name", "func", "lock") 90 | 91 | def save_model(self, request, obj, form, change): 92 | obj.save(using=Conf.ORM) 93 | 94 | def delete_model(self, request, obj): 95 | obj.delete(using=Conf.ORM) 96 | 97 | def get_queryset(self, request): 98 | return super(QueueAdmin, self).get_queryset(request).using(Conf.ORM) 99 | 100 | def has_add_permission(self, request): 101 | """Don't allow adds.""" 102 | return False 103 | 104 | list_filter = ("key",) 105 | 106 | 107 | admin.site.register(Schedule, ScheduleAdmin) 108 | admin.site.register(Success, TaskAdmin) 109 | admin.site.register(Failure, FailAdmin) 110 | 111 | if Conf.ORM or Conf.TESTING: 112 | admin.site.register(OrmQ, QueueAdmin) 113 | -------------------------------------------------------------------------------- /django_q/apps.py: -------------------------------------------------------------------------------- 1 | from django.apps import AppConfig 2 | 3 | from django_q.conf import Conf 4 | 5 | 6 | class DjangoQConfig(AppConfig): 7 | name = "django_q" 8 | verbose_name = Conf.LABEL 9 | default_auto_field = "django.db.models.AutoField" 10 | 11 | def ready(self): 12 | from django_q.signals import call_hook 13 | -------------------------------------------------------------------------------- /django_q/brokers/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | from typing import Optional 3 | 4 | from django.core.cache import InvalidCacheBackendError, caches 5 | 6 | from django_q.conf import Conf 7 | 8 | 9 | class Broker: 10 | def __init__(self, list_key: str = Conf.PREFIX): 11 | self.connection = self.get_connection(list_key) 12 | self.list_key = list_key 13 | self.cache = self.get_cache() 14 | self._info = None 15 | 16 | def __getstate__(self): 17 | return self.list_key, self._info 18 | 19 | def __setstate__(self, state): 20 | self.list_key, self._info = state 21 | self.connection = self.get_connection(self.list_key) 22 | self.cache = self.get_cache() 23 | 24 | def enqueue(self, task): 25 | """ 26 | Puts a task onto the queue 27 | :type task: str 28 | :return: task id 29 | """ 30 | pass 31 | 32 | def dequeue(self): 33 | """ 34 | Gets a task from the queue 35 | :return: tuple with task id and task message 36 | """ 37 | pass 38 | 39 | def queue_size(self): 40 | """ 41 | :return: the amount of tasks in the queue 42 | """ 43 | pass 44 | 45 | def lock_size(self): 46 | """ 47 | :return: the number of tasks currently awaiting acknowledgement 48 | """ 49 | 50 | def delete_queue(self): 51 | """ 52 | Deletes the queue from the broker 53 | """ 54 | pass 55 | 56 | def purge_queue(self): 57 | """ 58 | Purges the queue of any tasks 59 | """ 60 | pass 61 | 62 | def delete(self, task_id): 63 | """ 64 | Deletes a task from the queue 65 | :param task_id: the id of the task 66 | """ 67 | pass 68 | 69 | def acknowledge(self, task_id): 70 | """ 71 | Acknowledges completion of the task and removes it from the queue. 72 | :param task_id: the id of the task 73 | """ 74 | pass 75 | 76 | def fail(self, task_id): 77 | """ 78 | Fails a task message 79 | :param task_id: 80 | :return: 81 | """ 82 | 83 | def ping(self) -> bool: 84 | """ 85 | Checks whether the broker connection is available 86 | :rtype: bool 87 | """ 88 | pass 89 | 90 | def info(self): 91 | """ 92 | Shows the broker type 93 | """ 94 | return self._info 95 | 96 | def set_stat(self, key: str, value: str, timeout: int): 97 | """ 98 | Saves a cluster statistic to the cache provider 99 | :type key: str 100 | :type value: str 101 | :type timeout: int 102 | """ 103 | if not self.cache: 104 | return 105 | key_list = self.cache.get(Conf.Q_STAT, []) 106 | if key not in key_list: 107 | key_list.append(key) 108 | self.cache.set(Conf.Q_STAT, key_list) 109 | return self.cache.set(key, value, timeout) 110 | 111 | def get_stat(self, key: str): 112 | """ 113 | Gets a cluster statistic from the cache provider 114 | :type key: str 115 | :return: a cluster Stat 116 | """ 117 | if not self.cache: 118 | return 119 | return self.cache.get(key) 120 | 121 | def get_stats(self, pattern: str) -> Optional[list]: 122 | """ 123 | Returns a list of all cluster stats from the cache provider 124 | :type pattern: str 125 | :return: a list of Stats 126 | """ 127 | if not self.cache: 128 | return 129 | key_list = self.cache.get(Conf.Q_STAT) 130 | if not key_list or len(key_list) == 0: 131 | return [] 132 | stats = [] 133 | for key in key_list: 134 | stat = self.cache.get(key) 135 | if stat: 136 | stats.append(stat) 137 | else: 138 | key_list.remove(key) 139 | self.cache.set(Conf.Q_STAT, key_list) 140 | return stats 141 | 142 | @staticmethod 143 | def get_cache(): 144 | """ 145 | Gets the current cache provider 146 | :return: a cache provider 147 | """ 148 | try: 149 | return caches[Conf.CACHE] 150 | except InvalidCacheBackendError: 151 | return None 152 | 153 | @staticmethod 154 | def get_connection(list_key: str = Conf.PREFIX): 155 | """ 156 | Gets a connection to the broker 157 | :param list_key: Optional queue name 158 | :return: a broker connection 159 | """ 160 | return 0 161 | 162 | 163 | def get_broker(list_key: str = Conf.PREFIX) -> Broker: 164 | """ 165 | Gets the configured broker type 166 | :param list_key: optional queue name 167 | :type list_key: str 168 | :return: a broker instance 169 | """ 170 | # custom 171 | if Conf.BROKER_CLASS: 172 | module, func = Conf.BROKER_CLASS.rsplit(".", 1) 173 | m = importlib.import_module(module) 174 | broker = getattr(m, func) 175 | return broker(list_key=list_key) 176 | # disque 177 | elif Conf.DISQUE_NODES: 178 | from django_q.brokers import disque 179 | 180 | return disque.Disque(list_key=list_key) 181 | # Iron MQ 182 | elif Conf.IRON_MQ: 183 | from django_q.brokers import ironmq 184 | 185 | return ironmq.IronMQBroker(list_key=list_key) 186 | # SQS 187 | elif type(Conf.SQS) == dict: 188 | from django_q.brokers import aws_sqs 189 | 190 | return aws_sqs.Sqs(list_key=list_key) 191 | # ORM 192 | elif Conf.ORM: 193 | from django_q.brokers import orm 194 | 195 | return orm.ORM(list_key=list_key) 196 | # Mongo 197 | elif Conf.MONGO: 198 | from django_q.brokers import mongo 199 | 200 | return mongo.Mongo(list_key=list_key) 201 | # default to redis 202 | else: 203 | from django_q.brokers import redis_broker 204 | 205 | return redis_broker.Redis(list_key=list_key) 206 | -------------------------------------------------------------------------------- /django_q/brokers/aws_sqs.py: -------------------------------------------------------------------------------- 1 | from boto3 import Session 2 | from botocore.client import ClientError 3 | 4 | from django_q.brokers import Broker 5 | from django_q.conf import Conf 6 | 7 | QUEUE_DOES_NOT_EXIST = "AWS.SimpleQueueService.NonExistentQueue" 8 | 9 | 10 | class Sqs(Broker): 11 | def __init__(self, list_key: str = Conf.PREFIX): 12 | self.sqs = None 13 | super(Sqs, self).__init__(list_key) 14 | self.queue = self.get_queue() 15 | 16 | def __setstate__(self, state): 17 | super(Sqs, self).__setstate__(state) 18 | self.sqs = None 19 | self.queue = self.get_queue() 20 | 21 | def enqueue(self, task): 22 | response = self.queue.send_message(MessageBody=task) 23 | return response.get("MessageId") 24 | 25 | def dequeue(self): 26 | # sqs supports max 10 messages in bulk 27 | if Conf.BULK > 10: 28 | Conf.BULK = 10 29 | 30 | params = {"MaxNumberOfMessages": Conf.BULK, "VisibilityTimeout": Conf.RETRY} 31 | 32 | # sqs long polling 33 | sqs_config = Conf.SQS 34 | if "receive_message_wait_time_seconds" in sqs_config: 35 | wait_time_second = sqs_config.get("receive_message_wait_time_seconds", 20) 36 | 37 | # validation of parameter 38 | if not isinstance(wait_time_second, int): 39 | raise ValueError("receive_message_wait_time_seconds should be int") 40 | if wait_time_second > 20: 41 | raise ValueError( 42 | "receive_message_wait_time_seconds is invalid. Reason: Must be >= 0 and <= 20" 43 | ) 44 | params.update({"WaitTimeSeconds": wait_time_second}) 45 | 46 | tasks = self.queue.receive_messages(**params) 47 | if tasks: 48 | return [(t.receipt_handle, t.body) for t in tasks] 49 | 50 | def acknowledge(self, task_id): 51 | return self.delete(task_id) 52 | 53 | def queue_size(self) -> int: 54 | return int(self.queue.attributes["ApproximateNumberOfMessages"]) 55 | 56 | def lock_size(self) -> int: 57 | return int(self.queue.attributes["ApproximateNumberOfMessagesNotVisible"]) 58 | 59 | def delete(self, task_id): 60 | message = self.sqs.Message(self.queue.url, task_id) 61 | message.delete() 62 | 63 | def fail(self, task_id): 64 | self.delete(task_id) 65 | 66 | def delete_queue(self): 67 | self.queue.delete() 68 | 69 | def purge_queue(self): 70 | self.queue.purge() 71 | 72 | def ping(self) -> bool: 73 | return "sqs" in self.connection.get_available_resources() 74 | 75 | def info(self) -> str: 76 | return "AWS SQS" 77 | 78 | @staticmethod 79 | def get_connection(list_key: str = Conf.PREFIX) -> Session: 80 | config = Conf.SQS 81 | if "aws_region" in config: 82 | config["region_name"] = config["aws_region"] 83 | del config["aws_region"] 84 | 85 | if "receive_message_wait_time_seconds" in config: 86 | del config["receive_message_wait_time_seconds"] 87 | 88 | return Session(**config) 89 | 90 | def get_queue(self): 91 | self.sqs = self.connection.resource("sqs") 92 | 93 | try: 94 | # try to return an existing queue by name. If the queue does not 95 | # exist try to create it. 96 | return self.sqs.get_queue_by_name(QueueName=self.list_key) 97 | except ClientError as exp: 98 | if exp.response["Error"]["Code"] != QUEUE_DOES_NOT_EXIST: 99 | raise exp 100 | 101 | return self.sqs.create_queue(QueueName=self.list_key) 102 | -------------------------------------------------------------------------------- /django_q/brokers/disque.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | # External 4 | import redis 5 | 6 | # Django 7 | from django.utils.translation import gettext_lazy as _ 8 | from redis import Redis 9 | 10 | from django_q.brokers import Broker 11 | from django_q.conf import Conf 12 | 13 | 14 | class Disque(Broker): 15 | def enqueue(self, task): 16 | retry = Conf.RETRY if Conf.RETRY > 0 else f"{Conf.RETRY} REPLICATE 1" 17 | return self.connection.execute_command( 18 | f"ADDJOB {self.list_key} {task} 500 RETRY {retry}" 19 | ).decode() 20 | 21 | def dequeue(self): 22 | tasks = self.connection.execute_command( 23 | f"GETJOB COUNT {Conf.BULK} TIMEOUT 1000 FROM {self.list_key}" 24 | ) 25 | if tasks: 26 | return [(t[1].decode(), t[2].decode()) for t in tasks] 27 | 28 | def queue_size(self): 29 | return self.connection.execute_command(f"QLEN {self.list_key}") 30 | 31 | def acknowledge(self, task_id): 32 | command = "FASTACK" if Conf.DISQUE_FASTACK else "ACKJOB" 33 | return self.connection.execute_command(f"{command} {task_id}") 34 | 35 | def ping(self) -> bool: 36 | return self.connection.execute_command("HELLO")[0] > 0 37 | 38 | def delete(self, task_id): 39 | return self.connection.execute_command(f"DELJOB {task_id}") 40 | 41 | def fail(self, task_id): 42 | return self.delete(task_id) 43 | 44 | def delete_queue(self) -> int: 45 | jobs = self.connection.execute_command(f"JSCAN QUEUE {self.list_key}")[1] 46 | if jobs: 47 | job_ids = " ".join(jid.decode() for jid in jobs) 48 | self.connection.execute_command(f"DELJOB {job_ids}") 49 | return len(jobs) 50 | 51 | def info(self) -> str: 52 | if not self._info: 53 | info = self.connection.info("server") 54 | self._info = f'Disque {info["disque_version"]}' 55 | return self._info 56 | 57 | @staticmethod 58 | def get_connection(list_key: str = Conf.PREFIX) -> Redis: 59 | if not Conf.DISQUE_NODES: 60 | raise redis.exceptions.ConnectionError(_("No Disque nodes configured")) 61 | # randomize nodes 62 | random.shuffle(Conf.DISQUE_NODES) 63 | # find one that works 64 | for node in Conf.DISQUE_NODES: 65 | host, port = node.split(":") 66 | kwargs = {"host": host, "port": port} 67 | if Conf.DISQUE_AUTH: 68 | kwargs["password"] = Conf.DISQUE_AUTH 69 | redis_client = redis.Redis(**kwargs) 70 | redis_client.decode_responses = True 71 | try: 72 | redis_client.execute_command("HELLO") 73 | return redis_client 74 | except redis.exceptions.ConnectionError: 75 | continue 76 | raise redis.exceptions.ConnectionError( 77 | _("Could not connect to any Disque nodes") 78 | ) 79 | -------------------------------------------------------------------------------- /django_q/brokers/ironmq.py: -------------------------------------------------------------------------------- 1 | from iron_mq import IronMQ, Queue 2 | from requests.exceptions import HTTPError 3 | 4 | from django_q.brokers import Broker 5 | from django_q.conf import Conf 6 | 7 | 8 | class IronMQBroker(Broker): 9 | def enqueue(self, task): 10 | return self.connection.post(task)["ids"][0] 11 | 12 | def dequeue(self): 13 | timeout = Conf.RETRY or None 14 | tasks = self.connection.get(timeout=timeout, wait=1, max=Conf.BULK)["messages"] 15 | if tasks: 16 | return [(t["id"], t["body"]) for t in tasks] 17 | 18 | def ping(self) -> bool: 19 | return self.connection.name == self.list_key 20 | 21 | def info(self) -> str: 22 | return "IronMQ" 23 | 24 | def queue_size(self): 25 | return self.connection.size() 26 | 27 | def delete_queue(self): 28 | try: 29 | return self.connection.delete_queue()["msg"] 30 | except HTTPError: 31 | return False 32 | 33 | def purge_queue(self): 34 | return self.connection.clear() 35 | 36 | def delete(self, task_id): 37 | try: 38 | return self.connection.delete(task_id)["msg"] 39 | except HTTPError: 40 | return False 41 | 42 | def fail(self, task_id): 43 | self.delete(task_id) 44 | 45 | def acknowledge(self, task_id): 46 | return self.delete(task_id) 47 | 48 | @staticmethod 49 | def get_connection(list_key: str = Conf.PREFIX) -> Queue: 50 | ironmq = IronMQ(name=None, **Conf.IRON_MQ) 51 | return ironmq.queue(queue_name=list_key) 52 | -------------------------------------------------------------------------------- /django_q/brokers/mongo.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | from time import sleep 3 | 4 | from bson import ObjectId 5 | from django.utils import timezone 6 | from pymongo import MongoClient 7 | from pymongo.errors import ConfigurationError 8 | 9 | from django_q.brokers import Broker 10 | from django_q.conf import Conf 11 | 12 | 13 | def _timeout(): 14 | return timezone.now() - timedelta(seconds=Conf.RETRY) 15 | 16 | 17 | class Mongo(Broker): 18 | def __init__(self, list_key=Conf.PREFIX): 19 | super(Mongo, self).__init__(list_key) 20 | self.collection = self.get_collection() 21 | 22 | def __setstate__(self, state): 23 | super(Mongo, self).__setstate__(state) 24 | self.collection = self.get_collection() 25 | 26 | @staticmethod 27 | def get_connection(list_key: str = Conf.PREFIX) -> MongoClient: 28 | return MongoClient(**Conf.MONGO) 29 | 30 | def get_collection(self): 31 | if not Conf.MONGO_DB: 32 | try: 33 | Conf.MONGO_DB = self.connection.get_default_database().name 34 | except ConfigurationError: 35 | Conf.MONGO_DB = "django-q" 36 | return self.connection[Conf.MONGO_DB][self.list_key] 37 | 38 | def queue_size(self): 39 | return self.collection.count_documents({"lock": {"$lte": _timeout()}}) 40 | 41 | def lock_size(self): 42 | return self.collection.count_documents({"lock": {"$gt": _timeout()}}) 43 | 44 | def purge_queue(self): 45 | return self.delete_queue() 46 | 47 | def ping(self) -> bool: 48 | return self.info is not None 49 | 50 | def info(self) -> str: 51 | if not self._info: 52 | self._info = f"MongoDB {self.connection.server_info()['version']}" 53 | return self._info 54 | 55 | def fail(self, task_id): 56 | self.delete(task_id) 57 | 58 | def enqueue(self, task): 59 | inserted_id = self.collection.insert_one( 60 | {"payload": task, "lock": _timeout()} 61 | ).inserted_id 62 | return str(inserted_id) 63 | 64 | def dequeue(self): 65 | task = self.collection.find_one_and_update( 66 | {"lock": {"$lte": _timeout()}}, {"$set": {"lock": timezone.now()}} 67 | ) 68 | if task: 69 | return [(str(task["_id"]), task["payload"])] 70 | # empty queue, spare the cpu 71 | sleep(Conf.POLL) 72 | 73 | def delete_queue(self): 74 | return self.collection.drop() 75 | 76 | def delete(self, task_id): 77 | self.collection.delete_one({"_id": ObjectId(task_id)}) 78 | 79 | def acknowledge(self, task_id): 80 | return self.delete(task_id) 81 | -------------------------------------------------------------------------------- /django_q/brokers/orm.py: -------------------------------------------------------------------------------- 1 | from datetime import timedelta 2 | from time import sleep 3 | 4 | from django import db 5 | from django.db import transaction 6 | from django.utils import timezone 7 | 8 | from django_q.brokers import Broker 9 | from django_q.conf import Conf, logger 10 | from django_q.models import OrmQ 11 | 12 | 13 | def _timeout(): 14 | return timezone.now() - timedelta(seconds=Conf.RETRY) 15 | 16 | 17 | class ORM(Broker): 18 | @staticmethod 19 | def get_connection(list_key: str = Conf.PREFIX): 20 | if transaction.get_autocommit( 21 | using=Conf.ORM 22 | ): # Only True when not in an atomic block 23 | # Make sure stale connections in the broker thread are explicitly 24 | # closed before attempting DB access. 25 | # logger.debug("Broker thread calling close_old_connections") 26 | db.close_old_connections() 27 | else: 28 | logger.debug("Broker in an atomic transaction") 29 | return OrmQ.objects.using(Conf.ORM) 30 | 31 | def queue_size(self) -> int: 32 | return ( 33 | self.get_connection() 34 | .filter(key=self.list_key, lock__lte=_timeout()) 35 | .count() 36 | ) 37 | 38 | def lock_size(self) -> int: 39 | return ( 40 | self.get_connection().filter(key=self.list_key, lock__gt=_timeout()).count() 41 | ) 42 | 43 | def purge_queue(self): 44 | return self.get_connection().filter(key=self.list_key).delete() 45 | 46 | def ping(self) -> bool: 47 | return True 48 | 49 | def info(self) -> str: 50 | if not self._info: 51 | self._info = f"ORM {Conf.ORM}" 52 | return self._info 53 | 54 | def fail(self, task_id): 55 | self.delete(task_id) 56 | 57 | def enqueue(self, task): 58 | package = self.get_connection().create( 59 | key=self.list_key, payload=task, lock=_timeout() 60 | ) 61 | return package.pk 62 | 63 | def dequeue(self): 64 | tasks = self.get_connection().filter(key=self.list_key, lock__lt=_timeout())[ 65 | 0 : Conf.BULK 66 | ] 67 | if tasks: 68 | task_list = [] 69 | for task in tasks: 70 | if ( 71 | self.get_connection() 72 | .filter(id=task.id, lock=task.lock) 73 | .update(lock=timezone.now()) 74 | ): 75 | task_list.append((task.pk, task.payload)) 76 | # else don't process, as another cluster has been faster than us on that task 77 | return task_list 78 | # empty queue, spare the cpu 79 | sleep(Conf.POLL) 80 | 81 | def delete_queue(self): 82 | return self.purge_queue() 83 | 84 | def delete(self, task_id): 85 | self.get_connection().filter(pk=task_id).delete() 86 | 87 | def acknowledge(self, task_id): 88 | return self.delete(task_id) 89 | -------------------------------------------------------------------------------- /django_q/brokers/redis_broker.py: -------------------------------------------------------------------------------- 1 | import redis 2 | from redis import Redis 3 | 4 | from django_q.brokers import Broker 5 | from django_q.conf import Conf, logger 6 | 7 | try: 8 | import django_redis 9 | except ImportError: 10 | django_redis = None 11 | 12 | 13 | class Redis(Broker): 14 | def __init__(self, list_key: str = Conf.PREFIX): 15 | super(Redis, self).__init__(list_key=f"django_q:{list_key}:q") 16 | 17 | def enqueue(self, task): 18 | return self.connection.rpush(self.list_key, task) 19 | 20 | def dequeue(self): 21 | task = self.connection.blpop(self.list_key, 1) 22 | if task: 23 | return [(None, task[1])] 24 | 25 | def queue_size(self): 26 | return self.connection.llen(self.list_key) 27 | 28 | def delete_queue(self): 29 | return self.connection.delete(self.list_key) 30 | 31 | def purge_queue(self): 32 | return self.connection.ltrim(self.list_key, 1, 0) 33 | 34 | def ping(self) -> bool: 35 | try: 36 | return self.connection.ping() 37 | except redis.ConnectionError as e: 38 | logger.error("Can not connect to Redis server.") 39 | raise e 40 | 41 | def info(self) -> str: 42 | if not self._info: 43 | info = self.connection.info("server") 44 | self._info = f"Redis {info['redis_version']}" 45 | return self._info 46 | 47 | def set_stat(self, key: str, value: str, timeout: int): 48 | self.connection.set(key, value, timeout) 49 | 50 | def get_stat(self, key: str): 51 | if self.connection.exists(key): 52 | return self.connection.get(key) 53 | 54 | def get_stats(self, pattern: str): 55 | keys = self.connection.keys(pattern=pattern) 56 | if keys: 57 | return self.connection.mget(keys) 58 | 59 | @staticmethod 60 | def get_connection(list_key: str = Conf.PREFIX) -> Redis: 61 | if django_redis and Conf.DJANGO_REDIS: 62 | return django_redis.get_redis_connection(Conf.DJANGO_REDIS) 63 | if isinstance(Conf.REDIS, str): 64 | return redis.from_url(Conf.REDIS) 65 | return redis.StrictRedis(**Conf.REDIS) 66 | -------------------------------------------------------------------------------- /django_q/conf.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from copy import deepcopy 4 | from multiprocessing import cpu_count 5 | from signal import signal 6 | from warnings import warn 7 | 8 | import pkg_resources 9 | from django.conf import settings 10 | from django.utils.translation import gettext_lazy as _ 11 | 12 | from django_q.queues import Queue 13 | 14 | # optional 15 | try: 16 | import psutil 17 | except ImportError: 18 | psutil = None 19 | 20 | try: 21 | from croniter import croniter 22 | except ImportError: 23 | croniter = None 24 | 25 | try: 26 | import resource 27 | except ModuleNotFoundError: 28 | resource = None 29 | 30 | 31 | class Conf: 32 | """ 33 | Configuration class 34 | """ 35 | 36 | try: 37 | conf = settings.Q_CLUSTER 38 | except AttributeError: 39 | conf = {} 40 | 41 | # Redis server configuration . Follows standard redis keywords 42 | REDIS = conf.get("redis", {}) 43 | 44 | # Support for Django-Redis connections 45 | 46 | DJANGO_REDIS = conf.get("django_redis", None) 47 | 48 | # Disque broker 49 | DISQUE_NODES = conf.get("disque_nodes", None) 50 | 51 | # Optional Authentication 52 | DISQUE_AUTH = conf.get("disque_auth", None) 53 | 54 | # Optional Fast acknowledge 55 | DISQUE_FASTACK = conf.get("disque_fastack", False) 56 | 57 | # IronMQ broker 58 | IRON_MQ = conf.get("iron_mq", None) 59 | 60 | # SQS broker 61 | SQS = conf.get("sqs", None) 62 | 63 | # ORM broker 64 | ORM = conf.get("orm", None) 65 | 66 | # ORM support for read/write replicas 67 | HAS_REPLICA = conf.get("has_replica", False) 68 | 69 | # Custom broker class 70 | BROKER_CLASS = conf.get("broker_class", None) 71 | 72 | # Database Poll 73 | POLL = conf.get("poll", 0.2) 74 | 75 | # MongoDB broker 76 | MONGO = conf.get("mongo", None) 77 | MONGO_DB = conf.get("mongo_db", None) 78 | 79 | # Name of the cluster or site. For when you run multiple sites on one redis server 80 | PREFIX = conf.get("name", "default") 81 | 82 | # Log output level 83 | LOG_LEVEL = conf.get("log_level", "INFO") 84 | 85 | # Maximum number of successful tasks kept in the database. 0 saves everything. -1 saves none 86 | # Failures are always saved 87 | SAVE_LIMIT = conf.get("save_limit", 250) 88 | 89 | # Guard loop sleep in seconds. Should be between 0 and 60 seconds. 90 | GUARD_CYCLE = conf.get("guard_cycle", 0.5) 91 | 92 | # Disable the scheduler 93 | SCHEDULER = conf.get("scheduler", True) 94 | 95 | # Number of workers in the pool. Default is cpu count if implemented, otherwise 4. 96 | WORKERS = conf.get("workers", False) 97 | if not WORKERS: 98 | try: 99 | WORKERS = cpu_count() 100 | # in rare cases this might fail 101 | except NotImplementedError: 102 | # try psutil 103 | if psutil: 104 | WORKERS = psutil.cpu_count() or 4 105 | else: 106 | # sensible default 107 | WORKERS = 4 108 | 109 | # Option to undaemonize the workers and allow them to spawn child processes 110 | DAEMONIZE_WORKERS = conf.get("daemonize_workers", True) 111 | 112 | # Maximum number of tasks that each cluster can work on 113 | QUEUE_LIMIT = conf.get("queue_limit", int(WORKERS) ** 2) 114 | 115 | # Sets compression of redis packages 116 | COMPRESSED = conf.get("compress", False) 117 | 118 | # Number of tasks each worker can handle before it gets recycled. Useful for releasing memory 119 | RECYCLE = conf.get("recycle", 500) 120 | 121 | # The maximum resident set size in kilobytes before a worker will recycle. Useful for limiting memory usage 122 | # Not available on all platforms 123 | MAX_RSS = conf.get("max_rss", None) 124 | 125 | # Number of seconds to wait for a worker to finish. 126 | TIMEOUT = conf.get("timeout", None) 127 | 128 | # Whether to acknowledge unsuccessful tasks. 129 | # This causes failed tasks to be considered delivered, thereby removing them from 130 | # the task queue. Defaults to False. 131 | ACK_FAILURES = conf.get("ack_failures", False) 132 | 133 | # Number of seconds to wait for acknowledgement before retrying a task 134 | # Only works with brokers that guarantee delivery. Defaults to 60 seconds. 135 | RETRY = conf.get("retry", 60) 136 | 137 | # Verify if retry and timeout settings are correct 138 | if not TIMEOUT or (TIMEOUT > RETRY): 139 | warn( 140 | """Retry and timeout are misconfigured. Set retry larger than timeout, 141 | failure to do so will cause the tasks to be retriggered before completion. 142 | See https://django-q.readthedocs.io/en/latest/configure.html#retry for details.""" 143 | ) 144 | 145 | # Sets the amount of tasks the cluster will try to pop off the broker. 146 | # If it supports bulk gets. 147 | BULK = conf.get("bulk", 1) 148 | 149 | # The Django Admin label for this app 150 | LABEL = conf.get("label", "Django Q") 151 | 152 | # Sets the number of processors for each worker, defaults to all. 153 | CPU_AFFINITY = conf.get("cpu_affinity", 0) 154 | 155 | # Global sync option to for debugging 156 | SYNC = conf.get("sync", False) 157 | 158 | # The Django cache to use 159 | CACHE = conf.get("cache", "default") 160 | 161 | # Use the cache as result backend. Can be 'True' or an integer representing the global cache timeout. 162 | # i.e 'cached: 60' , will make all results go the cache and expire in 60 seconds. 163 | CACHED = conf.get("cached", False) 164 | 165 | # If set to False the scheduler won't execute tasks in the past. 166 | # Instead it will run once and reschedule the next run in the future. Defaults to True. 167 | CATCH_UP = conf.get("catch_up", True) 168 | 169 | # Use the secret key for package signing 170 | # Django itself should raise an error if it's not configured 171 | SECRET_KEY = settings.SECRET_KEY 172 | 173 | # The redis stats key 174 | Q_STAT = f"django_q:{PREFIX}:cluster" 175 | 176 | # Optional error reporting setup 177 | ERROR_REPORTER = conf.get("error_reporter", {}) 178 | 179 | # Optional attempt count. set to 0 for infinite attempts 180 | MAX_ATTEMPTS = conf.get("max_attempts", 0) 181 | 182 | # OSX doesn't implement qsize because of missing sem_getvalue() 183 | try: 184 | QSIZE = Queue().qsize() == 0 185 | except (NotImplementedError, OSError): 186 | QSIZE = False 187 | 188 | # Getting the signal names 189 | SIGNAL_NAMES = dict( 190 | (getattr(signal, n), n) 191 | for n in dir(signal) 192 | if n.startswith("SIG") and "_" not in n 193 | ) 194 | 195 | # Translators: Cluster status descriptions 196 | STARTING = _("Starting") 197 | WORKING = _("Working") 198 | IDLE = _("Idle") 199 | STOPPED = _("Stopped") 200 | STOPPING = _("Stopping") 201 | 202 | # to manage workarounds during testing 203 | TESTING = conf.get("testing", False) 204 | 205 | 206 | # logger 207 | logger = logging.getLogger("django-q") 208 | 209 | # Set up standard logging handler in case there is none 210 | if not logger.handlers: 211 | logger.setLevel(level=getattr(logging, Conf.LOG_LEVEL)) 212 | logger.propagate = False 213 | formatter = logging.Formatter( 214 | fmt="%(asctime)s [Q] %(levelname)s %(message)s", datefmt="%H:%M:%S" 215 | ) 216 | handler = logging.StreamHandler() 217 | handler.setFormatter(formatter) 218 | logger.addHandler(handler) 219 | 220 | 221 | # Error Reporting Interface 222 | class ErrorReporter: 223 | 224 | # initialize with iterator of reporters (better name, targets?) 225 | def __init__(self, reporters): 226 | self.targets = [target for target in reporters] 227 | 228 | # report error to all configured targets 229 | def report(self): 230 | for t in self.targets: 231 | t.report() 232 | 233 | 234 | # error reporting setup (sentry or rollbar) 235 | if Conf.ERROR_REPORTER: 236 | error_conf = deepcopy(Conf.ERROR_REPORTER) 237 | try: 238 | reporters = [] 239 | # iterate through the configured error reporters, 240 | # and instantiate an ErrorReporter using the provided config 241 | for name, conf in error_conf.items(): 242 | for entry in pkg_resources.iter_entry_points( 243 | "djangoq.errorreporters", name 244 | ): 245 | Reporter = entry.load() 246 | reporters.append(Reporter(**conf)) 247 | error_reporter = ErrorReporter(reporters) 248 | except ImportError: 249 | error_reporter = None 250 | else: 251 | error_reporter = None 252 | 253 | 254 | # get parent pid compatibility 255 | def get_ppid(): 256 | if hasattr(os, "getppid"): 257 | return os.getppid() 258 | elif psutil: 259 | return psutil.Process(os.getpid()).ppid() 260 | else: 261 | raise OSError( 262 | "Your OS does not support `os.getppid`. Please install `psutil` as an alternative provider." 263 | ) 264 | -------------------------------------------------------------------------------- /django_q/core_signing.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import time 3 | import zlib 4 | 5 | from django.core.signing import BadSignature, JSONSerializer, SignatureExpired 6 | from django.core.signing import Signer as Sgnr 7 | from django.core.signing import TimestampSigner as TsS 8 | from django.core.signing import b64_decode, dumps 9 | from django.utils import baseconv 10 | from django.utils.crypto import constant_time_compare 11 | from django.utils.encoding import force_bytes, force_str 12 | 13 | dumps = dumps 14 | 15 | """ 16 | The loads function is the same as the `django.core.signing.loads` function 17 | The difference is that `this` loads function calls `TimestampSigner` and `Signer` 18 | """ 19 | 20 | 21 | def loads( 22 | s, 23 | key=None, 24 | salt: str = "django.core.signing", 25 | serializer=JSONSerializer, 26 | max_age=None, 27 | ): 28 | """ 29 | Reverse of dumps(), raise BadSignature if signature fails. 30 | 31 | The serializer is expected to accept a bytestring. 32 | """ 33 | # TimestampSigner.unsign() returns str but base64 and zlib compression 34 | # operate on bytes. 35 | base64d = force_bytes(TimestampSigner(key, salt=salt).unsign(s, max_age=max_age)) 36 | decompress = False 37 | if base64d[:1] == b".": 38 | # It's compressed; uncompress it first 39 | base64d = base64d[1:] 40 | decompress = True 41 | data = b64_decode(base64d) 42 | if decompress: 43 | data = zlib.decompress(data) 44 | return serializer().loads(data) 45 | 46 | 47 | class Signer(Sgnr): 48 | def unsign(self, signed_value): 49 | signed_value = force_str(signed_value) 50 | if self.sep not in signed_value: 51 | raise BadSignature('No "%s" found in value' % self.sep) 52 | value, sig = signed_value.rsplit(self.sep, 1) 53 | if constant_time_compare(sig, self.signature(value)): 54 | return force_str(value) 55 | raise BadSignature('Signature "%s" does not match' % sig) 56 | 57 | 58 | """ 59 | TimestampSigner is also the same as `django.core.signing.TimestampSigner` but is 60 | calling `this` Signer. 61 | """ 62 | 63 | 64 | class TimestampSigner(Signer, TsS): 65 | def unsign(self, value, max_age=None): 66 | """ 67 | Retrieve original value and check it wasn't signed more 68 | than max_age seconds ago. 69 | """ 70 | result = super(TimestampSigner, self).unsign(value) 71 | value, timestamp = result.rsplit(self.sep, 1) 72 | timestamp = baseconv.base62.decode(timestamp) 73 | if max_age is not None: 74 | if isinstance(max_age, datetime.timedelta): 75 | max_age = max_age.total_seconds() 76 | # Check timestamp is not older than max_age 77 | age = time.time() - timestamp 78 | if age > max_age: 79 | raise SignatureExpired("Signature age %s > %s seconds" % (age, max_age)) 80 | return value 81 | -------------------------------------------------------------------------------- /django_q/humanhash.py: -------------------------------------------------------------------------------- 1 | """ 2 | humanhash: Human-readable representations of digests. 3 | 4 | The simplest ways to use this module are the :func:`humanize` and :func:`uuid` 5 | functions. For tighter control over the output, see :class:`HumanHasher`. 6 | """ 7 | import operator 8 | import uuid as uuidlib 9 | from argparse import ArgumentError 10 | from functools import reduce 11 | 12 | DEFAULT_WORDLIST = ( 13 | "ack", 14 | "alabama", 15 | "alanine", 16 | "alaska", 17 | "alpha", 18 | "angel", 19 | "apart", 20 | "april", 21 | "arizona", 22 | "arkansas", 23 | "artist", 24 | "asparagus", 25 | "aspen", 26 | "august", 27 | "autumn", 28 | "avocado", 29 | "bacon", 30 | "bakerloo", 31 | "batman", 32 | "beer", 33 | "berlin", 34 | "beryllium", 35 | "black", 36 | "blossom", 37 | "blue", 38 | "bluebird", 39 | "bravo", 40 | "bulldog", 41 | "burger", 42 | "butter", 43 | "california", 44 | "carbon", 45 | "cardinal", 46 | "carolina", 47 | "carpet", 48 | "cat", 49 | "ceiling", 50 | "charlie", 51 | "chicken", 52 | "coffee", 53 | "cola", 54 | "cold", 55 | "colorado", 56 | "comet", 57 | "connecticut", 58 | "crazy", 59 | "cup", 60 | "dakota", 61 | "december", 62 | "delaware", 63 | "delta", 64 | "diet", 65 | "don", 66 | "double", 67 | "early", 68 | "earth", 69 | "east", 70 | "echo", 71 | "edward", 72 | "eight", 73 | "eighteen", 74 | "eleven", 75 | "emma", 76 | "enemy", 77 | "equal", 78 | "failed", 79 | "fanta", 80 | "fifteen", 81 | "fillet", 82 | "finch", 83 | "fish", 84 | "five", 85 | "fix", 86 | "floor", 87 | "florida", 88 | "football", 89 | "four", 90 | "fourteen", 91 | "foxtrot", 92 | "freddie", 93 | "friend", 94 | "fruit", 95 | "gee", 96 | "georgia", 97 | "glucose", 98 | "golf", 99 | "green", 100 | "grey", 101 | "hamper", 102 | "happy", 103 | "harry", 104 | "hawaii", 105 | "helium", 106 | "high", 107 | "hot", 108 | "hotel", 109 | "hydrogen", 110 | "idaho", 111 | "illinois", 112 | "india", 113 | "indigo", 114 | "ink", 115 | "iowa", 116 | "island", 117 | "item", 118 | "jersey", 119 | "jig", 120 | "johnny", 121 | "juliet", 122 | "july", 123 | "jupiter", 124 | "kansas", 125 | "kentucky", 126 | "kilo", 127 | "king", 128 | "kitten", 129 | "lactose", 130 | "lake", 131 | "lamp", 132 | "lemon", 133 | "leopard", 134 | "lima", 135 | "lion", 136 | "lithium", 137 | "london", 138 | "louisiana", 139 | "low", 140 | "magazine", 141 | "magnesium", 142 | "maine", 143 | "mango", 144 | "march", 145 | "mars", 146 | "maryland", 147 | "massachusetts", 148 | "may", 149 | "mexico", 150 | "michigan", 151 | "mike", 152 | "minnesota", 153 | "mirror", 154 | "mississippi", 155 | "missouri", 156 | "mobile", 157 | "mockingbird", 158 | "monkey", 159 | "montana", 160 | "moon", 161 | "mountain", 162 | "muppet", 163 | "music", 164 | "nebraska", 165 | "neptune", 166 | "network", 167 | "nevada", 168 | "nine", 169 | "nineteen", 170 | "nitrogen", 171 | "north", 172 | "november", 173 | "nuts", 174 | "october", 175 | "ohio", 176 | "oklahoma", 177 | "one", 178 | "orange", 179 | "oranges", 180 | "oregon", 181 | "oscar", 182 | "oven", 183 | "oxygen", 184 | "papa", 185 | "paris", 186 | "pasta", 187 | "pennsylvania", 188 | "pip", 189 | "pizza", 190 | "pluto", 191 | "potato", 192 | "princess", 193 | "purple", 194 | "quebec", 195 | "queen", 196 | "quiet", 197 | "red", 198 | "river", 199 | "robert", 200 | "robin", 201 | "romeo", 202 | "rugby", 203 | "sad", 204 | "salami", 205 | "saturn", 206 | "september", 207 | "seven", 208 | "seventeen", 209 | "shade", 210 | "sierra", 211 | "single", 212 | "sink", 213 | "six", 214 | "sixteen", 215 | "skylark", 216 | "snake", 217 | "social", 218 | "sodium", 219 | "solar", 220 | "south", 221 | "spaghetti", 222 | "speaker", 223 | "spring", 224 | "stairway", 225 | "steak", 226 | "stream", 227 | "summer", 228 | "sweet", 229 | "table", 230 | "tango", 231 | "ten", 232 | "tennessee", 233 | "tennis", 234 | "texas", 235 | "thirteen", 236 | "three", 237 | "timing", 238 | "triple", 239 | "twelve", 240 | "twenty", 241 | "two", 242 | "uncle", 243 | "undress", 244 | "uniform", 245 | "uranus", 246 | "utah", 247 | "vegan", 248 | "venus", 249 | "vermont", 250 | "victor", 251 | "video", 252 | "violet", 253 | "virginia", 254 | "washington", 255 | "west", 256 | "whiskey", 257 | "white", 258 | "william", 259 | "winner", 260 | "winter", 261 | "wisconsin", 262 | "wolfram", 263 | "wyoming", 264 | "xray", 265 | "yankee", 266 | "yellow", 267 | "zebra", 268 | "zulu", 269 | ) 270 | 271 | 272 | class HumanHasher: 273 | 274 | """ 275 | Transforms hex digests to human-readable strings. 276 | 277 | The format of these strings will look something like: 278 | `victor-bacon-zulu-lima`. The output is obtained by compressing the input 279 | digest to a fixed number of bytes, then mapping those bytes to one of 256 280 | words. A default wordlist is provided, but you can override this if you 281 | prefer. 282 | 283 | As long as you use the same wordlist, the output will be consistent (i.e. 284 | the same digest will always render the same representation). 285 | """ 286 | 287 | def __init__(self, wordlist=DEFAULT_WORDLIST): 288 | if len(wordlist) != 256: 289 | raise ArgumentError("Wordlist must have exactly 256 items") 290 | self.wordlist = wordlist 291 | 292 | def humanize(self, hexdigest, words=4, separator="-"): 293 | 294 | """ 295 | Humanize a given hexadecimal digest. 296 | 297 | Change the number of words output by specifying `words`. Change the 298 | word separator with `separator`. 299 | 300 | >>> digest = '60ad8d0d871b6095808297' 301 | >>> HumanHasher().humanize(digest) 302 | 'sodium-magnesium-nineteen-hydrogen' 303 | """ 304 | 305 | # Gets a list of byte values between 0-255. 306 | bytes = [ 307 | int(x, 16) 308 | for x in list(map("".join, list(zip(hexdigest[::2], hexdigest[1::2])))) 309 | ] 310 | # Compress an arbitrary number of bytes to `words`. 311 | compressed = self.compress(bytes, words) 312 | # Map the compressed byte values through the word list. 313 | return separator.join(self.wordlist[byte] for byte in compressed) 314 | 315 | @staticmethod 316 | def compress(bytes, target): 317 | 318 | """ 319 | Compress a list of byte values to a fixed target length. 320 | 321 | >>> bytes = [96, 173, 141, 13, 135, 27, 96, 149, 128, 130, 151] 322 | >>> HumanHasher.compress(bytes, 4) 323 | [205, 128, 156, 96] 324 | 325 | Attempting to compress a smaller number of bytes to a larger number is 326 | an error: 327 | 328 | >>> HumanHasher.compress(bytes, 15) # doctest: +ELLIPSIS 329 | Traceback (most recent call last): 330 | ... 331 | ValueError: Fewer input bytes than requested output 332 | """ 333 | 334 | length = len(bytes) 335 | if target > length: 336 | raise ValueError("Fewer input bytes than requested output") 337 | 338 | # Split `bytes` into `target` segments. 339 | seg_size = length // target 340 | segments = [bytes[i * seg_size : (i + 1) * seg_size] for i in range(target)] 341 | # Catch any left-over bytes in the last segment. 342 | segments[-1].extend(bytes[target * seg_size :]) 343 | 344 | # Use a simple XOR checksum-like function for compression. 345 | checksum = lambda bytes: reduce(operator.xor, bytes, 0) 346 | checksums = list(map(checksum, segments)) 347 | return checksums 348 | 349 | def uuid(self, **params): 350 | 351 | """ 352 | Generate a UUID with a human-readable representation. 353 | 354 | Returns `(human_repr, full_digest)`. Accepts the same keyword arguments 355 | as :meth:`humanize` (they'll be passed straight through). 356 | """ 357 | 358 | digest = str(uuidlib.uuid4()).replace("-", "") 359 | return self.humanize(digest, **params), digest 360 | 361 | 362 | DEFAULT_HASHER = HumanHasher() 363 | uuid = DEFAULT_HASHER.uuid 364 | humanize = DEFAULT_HASHER.humanize 365 | -------------------------------------------------------------------------------- /django_q/locale/de/LC_MESSAGES/django.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/django_q/locale/de/LC_MESSAGES/django.mo -------------------------------------------------------------------------------- /django_q/locale/fr/LC_MESSAGES/django.mo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/django_q/locale/fr/LC_MESSAGES/django.mo -------------------------------------------------------------------------------- /django_q/locale/fr/LC_MESSAGES/django.po: -------------------------------------------------------------------------------- 1 | # SOME DESCRIPTIVE TITLE. 2 | # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER 3 | # This file is distributed under the same license as the PACKAGE package. 4 | # FIRST AUTHOR , YEAR. 5 | msgid "" 6 | msgstr "" 7 | "Project-Id-Version: \n" 8 | "Report-Msgid-Bugs-To: \n" 9 | "POT-Creation-Date: 2018-08-05 15:48+0200\n" 10 | "PO-Revision-Date: 2018-08-05 18:28+0200\n" 11 | "Language-Team: \n" 12 | "MIME-Version: 1.0\n" 13 | "Content-Type: text/plain; charset=UTF-8\n" 14 | "Content-Transfer-Encoding: 8bit\n" 15 | "X-Generator: Poedit 2.1.1\n" 16 | "Plural-Forms: nplurals=2; plural=(n > 1);\n" 17 | "Last-Translator: Thierry BOULOGNE \n" 18 | "Language: fr-FR\n" 19 | 20 | #: admin.py:48 21 | msgid "Resubmit selected tasks to queue" 22 | msgstr "Resoumettre les tâches sélectionnées à la file d'attente" 23 | 24 | #: cluster.py:54 25 | msgid "Q Cluster-{} starting." 26 | msgstr "Démarrage de Q Cluster-{}." 27 | 28 | #: cluster.py:62 29 | msgid "Q Cluster-{} stopping." 30 | msgstr "Arrêt de Q Cluster-{}." 31 | 32 | #: cluster.py:65 33 | msgid "Q Cluster-{} has stopped." 34 | msgstr "Q Cluster-{} a été arrêté." 35 | 36 | #: cluster.py:71 37 | msgid "{} got signal {}" 38 | msgstr "{} à reçu le signal {}" 39 | 40 | #: cluster.py:169 41 | msgid "reincarnated monitor {} after sudden death" 42 | msgstr "moniteur réintégré {} après un arrêt intempestif" 43 | 44 | #: cluster.py:172 45 | msgid "reincarnated pusher {} after sudden death" 46 | msgstr "pousseur réintégré {} après un arrêt intempestif" 47 | 48 | #: cluster.py:179 49 | msgid "reincarnated worker {} after timeout" 50 | msgstr "processus réintégré {} après un arrêt une attente trop longue" 51 | 52 | #: cluster.py:181 53 | msgid "recycled worker {}" 54 | msgstr "processus recyclé" 55 | 56 | #: cluster.py:183 57 | msgid "reincarnated worker {} after death" 58 | msgstr "processus réintégré {} après arrêt" 59 | 60 | #: cluster.py:202 61 | msgid "{} guarding cluster at {}" 62 | msgstr "{} surveillance du cluster à {}" 63 | 64 | #: cluster.py:205 65 | msgid "Q Cluster-{} running." 66 | msgstr "Q Cluster-{} en cours d'exécution." 67 | 68 | #: cluster.py:239 69 | msgid "{} stopping cluster processes" 70 | msgstr "{} arrêt des processus de cluster" 71 | 72 | #: cluster.py:264 73 | msgid "{} waiting for the monitor." 74 | msgstr "{} en attente du moniteur." 75 | 76 | #: cluster.py:285 77 | msgid "{} pushing tasks at {}" 78 | msgstr "{} tâche envoyé à {}" 79 | 80 | #: cluster.py:306 81 | msgid "queueing from {}" 82 | msgstr "mise en file d'attente de {}" 83 | 84 | #: cluster.py:309 85 | msgid "{} stopped pushing tasks" 86 | msgstr "{} a cessé de pousser les tâches" 87 | 88 | #: cluster.py:320 89 | msgid "{} monitoring at {}" 90 | msgstr "{} Surveillance de {}" 91 | 92 | #: cluster.py:334 93 | msgid "Processed [{}]" 94 | msgstr "Traitement de [{}]" 95 | 96 | #: cluster.py:337 97 | msgid "Failed [{}] - {}" 98 | msgstr "Echec [{}] - {}" 99 | 100 | #: cluster.py:338 101 | msgid "{} stopped monitoring results" 102 | msgstr "{} arrêt des résultats de surveillance" 103 | 104 | #: cluster.py:349 105 | msgid "{} ready for work at {}" 106 | msgstr "{} prêt pour le travail à {}" 107 | 108 | #: cluster.py:357 109 | msgid "{} processing [{}]" 110 | msgstr "{} en cours de traitement [{}]" 111 | 112 | #: cluster.py:398 113 | msgid "{} stopped doing work" 114 | msgstr "{} arrêté de travailler" 115 | 116 | #: cluster.py:543 117 | msgid "{} failed to create a task from schedule [{}]" 118 | msgstr "{} Echec de la création d'une tâche à partir de Schedule [{}]" 119 | 120 | #: cluster.py:547 121 | msgid "{} created a task from schedule [{}]" 122 | msgstr "{} a créé une tâche à partir de Schedule [{}]" 123 | 124 | #: cluster.py:595 125 | msgid "{} will use cpu {}" 126 | msgstr "{} utilisera le CPU {}" 127 | 128 | #: conf.py:169 129 | msgid "Starting" 130 | msgstr "Démarrage" 131 | 132 | #: conf.py:170 133 | msgid "Working" 134 | msgstr "Actif" 135 | 136 | #: conf.py:171 137 | msgid "Idle" 138 | msgstr "En attente" 139 | 140 | #: conf.py:172 141 | msgid "Stopped" 142 | msgstr "Arrêté" 143 | 144 | #: conf.py:173 145 | msgid "Stopping" 146 | msgstr "En cours d’arrêt" 147 | 148 | #: management/commands/qcluster.py:9 149 | msgid "Starts a Django Q Cluster." 150 | msgstr "Démarre un cluster Django Q." 151 | 152 | #: management/commands/qinfo.py:11 153 | msgid "General information over all clusters." 154 | msgstr "Informations générales sur tous les clusters." 155 | 156 | #: management/commands/qmonitor.py:9 157 | msgid "Monitors Q Cluster activity" 158 | msgstr "Activité du cluster Moniteur Q" 159 | 160 | #: models.py:104 161 | msgid "Successful task" 162 | msgstr "Tâche réussie" 163 | 164 | #: models.py:105 165 | msgid "Successful tasks" 166 | msgstr "Tâches réussies" 167 | 168 | #: models.py:121 169 | msgid "Failed task" 170 | msgstr "Tâche échoué" 171 | 172 | #: models.py:122 173 | msgid "Failed tasks" 174 | msgstr "Tâches échouées" 175 | 176 | #: models.py:131 177 | msgid "e.g. 1, 2, 'John'" 178 | msgstr "ex. 1, 2, ‘Jean’" 179 | 180 | #: models.py:132 181 | msgid "e.g. x=1, y=2, name='John'" 182 | msgstr "p. ex. x = 1, y = 2, Nom = ‘Jean’" 183 | 184 | #: models.py:142 185 | msgid "Once" 186 | msgstr "Une fois" 187 | 188 | #: models.py:143 189 | msgid "Minutes" 190 | msgstr "Minutes" 191 | 192 | #: models.py:144 193 | msgid "Hourly" 194 | msgstr "Toutes les heures" 195 | 196 | #: models.py:145 197 | msgid "Daily" 198 | msgstr "Quotidien" 199 | 200 | #: models.py:146 201 | msgid "Weekly" 202 | msgstr "Hebdomadaire" 203 | 204 | #: models.py:147 205 | msgid "Monthly" 206 | msgstr "Mensuel" 207 | 208 | #: models.py:148 209 | msgid "Quarterly" 210 | msgstr "Tous les quart-d’heure" 211 | 212 | #: models.py:149 213 | msgid "Yearly" 214 | msgstr "Annuel" 215 | 216 | #: models.py:151 217 | msgid "Schedule Type" 218 | msgstr "Type de plannification" 219 | 220 | #: models.py:153 221 | msgid "Number of minutes for the Minutes type" 222 | msgstr "Nombre de minutes pour le type de minutes" 223 | 224 | #: models.py:154 225 | msgid "Repeats" 226 | msgstr "Répéter" 227 | 228 | #: models.py:154 229 | msgid "n = n times, -1 = forever" 230 | msgstr "n = n fois,-1 = Toujours" 231 | 232 | #: models.py:155 233 | msgid "Next Run" 234 | msgstr "Prochaine exécution" 235 | 236 | #: models.py:180 237 | msgid "Scheduled task" 238 | msgstr "Tâche planifiée" 239 | 240 | #: models.py:181 241 | msgid "Scheduled tasks" 242 | msgstr "Tâches planifiées" 243 | 244 | #: models.py:204 245 | msgid "Queued task" 246 | msgstr "Tâche en file d'attente" 247 | 248 | #: models.py:205 249 | msgid "Queued tasks" 250 | msgstr "Tâches en file d'attente" 251 | 252 | #: monitor.py:33 253 | msgid "Host" 254 | msgstr "Hôte" 255 | 256 | #: monitor.py:34 257 | msgid "Id" 258 | msgstr "Id" 259 | 260 | #: monitor.py:35 261 | msgid "State" 262 | msgstr "Statut" 263 | 264 | #: monitor.py:36 265 | msgid "Pool" 266 | msgstr "Piscine" 267 | 268 | #: monitor.py:37 269 | msgid "TQ" 270 | msgstr "TQ" 271 | 272 | #: monitor.py:38 273 | msgid "RQ" 274 | msgstr "RQ" 275 | 276 | #: monitor.py:39 277 | msgid "RC" 278 | msgstr "RC" 279 | 280 | #: monitor.py:40 281 | msgid "Up" 282 | msgstr "Haut" 283 | 284 | #: monitor.py:90 monitor.py:165 285 | msgid "Queued" 286 | msgstr "En file d'attente" 287 | 288 | #: monitor.py:92 289 | msgid "Success" 290 | msgstr "Succès" 291 | 292 | #: monitor.py:95 monitor.py:173 293 | msgid "Failures" 294 | msgstr "Défaillances" 295 | 296 | #: monitor.py:101 297 | msgid "[Press q to quit]" 298 | msgstr "[appuyez sur q pour quitter]" 299 | 300 | #: monitor.py:120 301 | msgid "day" 302 | msgstr "jour" 303 | 304 | #: monitor.py:137 305 | msgid "second" 306 | msgstr "seconde" 307 | 308 | #: monitor.py:140 309 | msgid "minute" 310 | msgstr "minute" 311 | 312 | #: monitor.py:143 313 | msgid "hour" 314 | msgstr "heure" 315 | 316 | #: monitor.py:151 317 | msgid "-- {} {} on {} --" 318 | msgstr "--{} {} sur {}--" 319 | 320 | #: monitor.py:153 321 | msgid "Clusters" 322 | msgstr "Grappes" 323 | 324 | #: monitor.py:157 325 | msgid "Workers" 326 | msgstr "Processus" 327 | 328 | #: monitor.py:161 329 | msgid "Restarts" 330 | msgstr "Redémarrages" 331 | 332 | #: monitor.py:169 333 | msgid "Successes" 334 | msgstr "Succès" 335 | 336 | #: monitor.py:177 337 | msgid "Schedules" 338 | msgstr "Planifications" 339 | 340 | #: monitor.py:181 341 | msgid "Tasks/{}" 342 | msgstr "Tâches/{}" 343 | 344 | #: monitor.py:185 345 | msgid "Avg time" 346 | msgstr "Temps Moyen" 347 | 348 | #: signals.py:21 349 | msgid "malformed return hook '{}' for [{}]" 350 | msgstr "hook de retour mal formé' {} 'pour [{}]" 351 | 352 | #: signals.py:26 353 | msgid "return hook {} failed on [{}] because {}" 354 | msgstr "le crochet de retour {} a échoué sur [{}] parce que {}" 355 | -------------------------------------------------------------------------------- /django_q/management/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/django_q/management/__init__.py -------------------------------------------------------------------------------- /django_q/management/commands/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/django_q/management/commands/__init__.py -------------------------------------------------------------------------------- /django_q/management/commands/qcluster.py: -------------------------------------------------------------------------------- 1 | from django.core.management.base import BaseCommand 2 | from django.utils.translation import gettext as _ 3 | 4 | from django_q.cluster import Cluster 5 | 6 | 7 | class Command(BaseCommand): 8 | # Translators: help text for qcluster management command 9 | help = _("Starts a Django Q Cluster.") 10 | 11 | def add_arguments(self, parser): 12 | parser.add_argument( 13 | "--run-once", 14 | action="store_true", 15 | dest="run_once", 16 | default=False, 17 | help="Run once and then stop.", 18 | ) 19 | 20 | def handle(self, *args, **options): 21 | q = Cluster() 22 | q.start() 23 | if options.get("run_once", False): 24 | q.stop() 25 | -------------------------------------------------------------------------------- /django_q/management/commands/qinfo.py: -------------------------------------------------------------------------------- 1 | from django.core.management.base import BaseCommand 2 | from django.utils.translation import gettext as _ 3 | 4 | from django_q import VERSION 5 | from django_q.conf import Conf 6 | from django_q.monitor import get_ids, info 7 | 8 | 9 | class Command(BaseCommand): 10 | # Translators: help text for qinfo management command 11 | help = _("General information over all clusters.") 12 | 13 | def add_arguments(self, parser): 14 | parser.add_argument( 15 | "--config", 16 | action="store_true", 17 | dest="config", 18 | default=False, 19 | help="Print current configuration.", 20 | ) 21 | parser.add_argument( 22 | "--ids", 23 | action="store_true", 24 | dest="ids", 25 | default=False, 26 | help="Print cluster task ID(s) (PIDs).", 27 | ) 28 | 29 | def handle(self, *args, **options): 30 | if options.get("ids", True): 31 | get_ids() 32 | elif options.get("config", False): 33 | hide = [ 34 | "conf", 35 | "IDLE", 36 | "STOPPING", 37 | "STARTING", 38 | "WORKING", 39 | "SIGNAL_NAMES", 40 | "STOPPED", 41 | ] 42 | settings = [ 43 | a for a in dir(Conf) if not a.startswith("__") and a not in hide 44 | ] 45 | self.stdout.write(f"VERSION: {'.'.join(str(v) for v in VERSION)}") 46 | for setting in settings: 47 | value = getattr(Conf, setting) 48 | if value is not None: 49 | self.stdout.write(f"{setting}: {value}") 50 | else: 51 | info() 52 | -------------------------------------------------------------------------------- /django_q/management/commands/qmemory.py: -------------------------------------------------------------------------------- 1 | from django.core.management.base import BaseCommand 2 | from django.utils.translation import gettext as _ 3 | 4 | from django_q.monitor import memory 5 | 6 | 7 | class Command(BaseCommand): 8 | # Translators: help text for qmemory management command 9 | help = _("Monitors Q Cluster memory usage") 10 | 11 | def add_arguments(self, parser): 12 | parser.add_argument( 13 | "--run-once", 14 | action="store_true", 15 | dest="run_once", 16 | default=False, 17 | help="Run once and then stop.", 18 | ) 19 | parser.add_argument( 20 | "--workers", 21 | action="store_true", 22 | dest="workers", 23 | default=False, 24 | help="Show each worker's memory usage.", 25 | ) 26 | 27 | def handle(self, *args, **options): 28 | memory( 29 | run_once=options.get("run_once", False), 30 | workers=options.get("workers", False), 31 | ) 32 | -------------------------------------------------------------------------------- /django_q/management/commands/qmonitor.py: -------------------------------------------------------------------------------- 1 | from django.core.management.base import BaseCommand 2 | from django.utils.translation import gettext as _ 3 | 4 | from django_q.monitor import monitor 5 | 6 | 7 | class Command(BaseCommand): 8 | # Translators: help text for qmonitor management command 9 | help = _("Monitors Q Cluster activity") 10 | 11 | def add_arguments(self, parser): 12 | parser.add_argument( 13 | "--run-once", 14 | action="store_true", 15 | dest="run_once", 16 | default=False, 17 | help="Run once and then stop.", 18 | ) 19 | 20 | def handle(self, *args, **options): 21 | monitor(run_once=options.get("run_once", False)) 22 | -------------------------------------------------------------------------------- /django_q/migrations/0001_initial.py: -------------------------------------------------------------------------------- 1 | import django.utils.timezone 2 | import picklefield.fields 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ] 10 | 11 | operations = [ 12 | migrations.CreateModel( 13 | name='Schedule', 14 | fields=[ 15 | ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)), 16 | ('func', models.CharField(max_length=256, help_text='e.g. module.tasks.function')), 17 | ('hook', models.CharField(null=True, blank=True, max_length=256, help_text='e.g. module.tasks.result_function')), 18 | ('args', models.CharField(null=True, blank=True, max_length=256, help_text="e.g. 1, 2, 'John'")), 19 | ('kwargs', models.CharField(null=True, blank=True, max_length=256, help_text="e.g. x=1, y=2, name='John'")), 20 | ('schedule_type', models.CharField(verbose_name='Schedule Type', choices=[('O', 'Once'), ('H', 'Hourly'), ('D', 'Daily'), ('W', 'Weekly'), ('M', 'Monthly'), ('Q', 'Quarterly'), ('Y', 'Yearly')], default='O', max_length=1)), 21 | ('repeats', models.SmallIntegerField(verbose_name='Repeats', default=-1, help_text='n = n times, -1 = forever')), 22 | ('next_run', models.DateTimeField(verbose_name='Next Run', default=django.utils.timezone.now, null=True)), 23 | ('task', models.CharField(editable=False, null=True, max_length=100)), 24 | ], 25 | options={ 26 | 'verbose_name': 'Scheduled task', 27 | 'ordering': ['next_run'], 28 | }, 29 | ), 30 | migrations.CreateModel( 31 | name='Task', 32 | fields=[ 33 | ('id', models.AutoField(verbose_name='ID', auto_created=True, serialize=False, primary_key=True)), 34 | ('name', models.CharField(editable=False, max_length=100)), 35 | ('func', models.CharField(max_length=256)), 36 | ('hook', models.CharField(null=True, max_length=256)), 37 | ('args', picklefield.fields.PickledObjectField(editable=False, null=True)), 38 | ('kwargs', picklefield.fields.PickledObjectField(editable=False, null=True)), 39 | ('result', picklefield.fields.PickledObjectField(editable=False, null=True)), 40 | ('started', models.DateTimeField(editable=False)), 41 | ('stopped', models.DateTimeField(editable=False)), 42 | ('success', models.BooleanField(editable=False, default=True)), 43 | ], 44 | ), 45 | migrations.CreateModel( 46 | name='Failure', 47 | fields=[ 48 | ], 49 | options={ 50 | 'verbose_name': 'Failed task', 51 | 'proxy': True, 52 | }, 53 | bases=('django_q.task',), 54 | ), 55 | migrations.CreateModel( 56 | name='Success', 57 | fields=[ 58 | ], 59 | options={ 60 | 'verbose_name': 'Successful task', 61 | 'proxy': True, 62 | }, 63 | bases=('django_q.task',), 64 | ), 65 | ] 66 | -------------------------------------------------------------------------------- /django_q/migrations/0002_auto_20150630_1624.py: -------------------------------------------------------------------------------- 1 | from django.db import migrations, models 2 | 3 | 4 | class Migration(migrations.Migration): 5 | 6 | dependencies = [ 7 | ('django_q', '0001_initial'), 8 | ] 9 | 10 | operations = [ 11 | migrations.AlterField( 12 | model_name='schedule', 13 | name='args', 14 | field=models.TextField(help_text="e.g. 1, 2, 'John'", blank=True, null=True), 15 | ), 16 | migrations.AlterField( 17 | model_name='schedule', 18 | name='kwargs', 19 | field=models.TextField(help_text="e.g. x=1, y=2, name='John'", blank=True, null=True), 20 | ), 21 | ] 22 | -------------------------------------------------------------------------------- /django_q/migrations/0003_auto_20150708_1326.py: -------------------------------------------------------------------------------- 1 | from django.db import migrations, models 2 | 3 | 4 | class Migration(migrations.Migration): 5 | 6 | dependencies = [ 7 | ('django_q', '0002_auto_20150630_1624'), 8 | ] 9 | 10 | operations = [ 11 | migrations.AlterModelOptions( 12 | name='failure', 13 | options={'verbose_name_plural': 'Failed tasks', 'verbose_name': 'Failed task'}, 14 | ), 15 | migrations.AlterModelOptions( 16 | name='schedule', 17 | options={'verbose_name_plural': 'Scheduled tasks', 'ordering': ['next_run'], 'verbose_name': 'Scheduled task'}, 18 | ), 19 | migrations.AlterModelOptions( 20 | name='success', 21 | options={'verbose_name_plural': 'Successful tasks', 'verbose_name': 'Successful task'}, 22 | ), 23 | migrations.RemoveField( 24 | model_name='task', 25 | name='id', 26 | ), 27 | migrations.AddField( 28 | model_name='task', 29 | name='id', 30 | field=models.CharField(max_length=32, primary_key=True, editable=False, serialize=False), 31 | ), 32 | ] 33 | -------------------------------------------------------------------------------- /django_q/migrations/0004_auto_20150710_1043.py: -------------------------------------------------------------------------------- 1 | from django.db import migrations, models 2 | 3 | 4 | class Migration(migrations.Migration): 5 | 6 | dependencies = [ 7 | ('django_q', '0003_auto_20150708_1326'), 8 | ] 9 | 10 | operations = [ 11 | migrations.AlterModelOptions( 12 | name='failure', 13 | options={'verbose_name_plural': 'Failed tasks', 'verbose_name': 'Failed task', 'ordering': ['-stopped']}, 14 | ), 15 | migrations.AlterModelOptions( 16 | name='success', 17 | options={'verbose_name_plural': 'Successful tasks', 'verbose_name': 'Successful task', 'ordering': ['-stopped']}, 18 | ), 19 | migrations.AlterModelOptions( 20 | name='task', 21 | options={'ordering': ['-stopped']}, 22 | ), 23 | ] 24 | -------------------------------------------------------------------------------- /django_q/migrations/0005_auto_20150718_1506.py: -------------------------------------------------------------------------------- 1 | from django.db import migrations, models 2 | 3 | 4 | class Migration(migrations.Migration): 5 | 6 | dependencies = [ 7 | ('django_q', '0004_auto_20150710_1043'), 8 | ] 9 | 10 | operations = [ 11 | migrations.AddField( 12 | model_name='schedule', 13 | name='name', 14 | field=models.CharField(max_length=100, null=True), 15 | ), 16 | migrations.AddField( 17 | model_name='task', 18 | name='group', 19 | field=models.CharField(max_length=100, null=True, editable=False), 20 | ), 21 | ] 22 | -------------------------------------------------------------------------------- /django_q/migrations/0006_auto_20150805_1817.py: -------------------------------------------------------------------------------- 1 | from django.db import migrations, models 2 | 3 | 4 | class Migration(migrations.Migration): 5 | 6 | dependencies = [ 7 | ('django_q', '0005_auto_20150718_1506'), 8 | ] 9 | 10 | operations = [ 11 | migrations.AddField( 12 | model_name='schedule', 13 | name='minutes', 14 | field=models.PositiveSmallIntegerField(help_text='Number of minutes for the Minutes type', blank=True, null=True), 15 | ), 16 | migrations.AlterField( 17 | model_name='schedule', 18 | name='schedule_type', 19 | field=models.CharField(max_length=1, choices=[('O', 'Once'), ('I', 'Minutes'), ('H', 'Hourly'), ('D', 'Daily'), ('W', 'Weekly'), ('M', 'Monthly'), ('Q', 'Quarterly'), ('Y', 'Yearly')], default='O', verbose_name='Schedule Type'), 20 | ), 21 | ] 22 | -------------------------------------------------------------------------------- /django_q/migrations/0007_ormq.py: -------------------------------------------------------------------------------- 1 | from django.db import migrations, models 2 | 3 | 4 | class Migration(migrations.Migration): 5 | 6 | dependencies = [ 7 | ('django_q', '0006_auto_20150805_1817'), 8 | ] 9 | 10 | operations = [ 11 | migrations.CreateModel( 12 | name='OrmQ', 13 | fields=[ 14 | ('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)), 15 | ('key', models.CharField(max_length=100)), 16 | ('payload', models.TextField()), 17 | ('lock', models.DateTimeField(null=True)), 18 | ], 19 | options={ 20 | 'verbose_name_plural': 'Queued tasks', 21 | 'verbose_name': 'Queued task', 22 | }, 23 | ), 24 | ] 25 | -------------------------------------------------------------------------------- /django_q/migrations/0008_auto_20160224_1026.py: -------------------------------------------------------------------------------- 1 | from django.db import migrations, models 2 | 3 | 4 | class Migration(migrations.Migration): 5 | 6 | dependencies = [ 7 | ('django_q', '0007_ormq'), 8 | ] 9 | 10 | operations = [ 11 | migrations.AlterField( 12 | model_name='schedule', 13 | name='name', 14 | field=models.CharField(blank=True, max_length=100, null=True), 15 | ), 16 | ] 17 | -------------------------------------------------------------------------------- /django_q/migrations/0009_auto_20171009_0915.py: -------------------------------------------------------------------------------- 1 | from django.db import migrations, models 2 | 3 | 4 | class Migration(migrations.Migration): 5 | 6 | dependencies = [ 7 | ('django_q', '0008_auto_20160224_1026'), 8 | ] 9 | 10 | operations = [ 11 | migrations.AlterField( 12 | model_name='schedule', 13 | name='repeats', 14 | field=models.IntegerField(default=-1, help_text='n = n times, -1 = forever', verbose_name='Repeats'), 15 | ), 16 | ] 17 | -------------------------------------------------------------------------------- /django_q/migrations/0010_auto_20200610_0856.py: -------------------------------------------------------------------------------- 1 | import picklefield.fields 2 | from django.db import migrations 3 | 4 | 5 | class Migration(migrations.Migration): 6 | 7 | dependencies = [ 8 | ('django_q', '0009_auto_20171009_0915'), 9 | ] 10 | 11 | operations = [ 12 | migrations.AlterField( 13 | model_name='task', 14 | name='args', 15 | field=picklefield.fields.PickledObjectField(editable=False, null=True, protocol=-1), 16 | ), 17 | migrations.AlterField( 18 | model_name='task', 19 | name='kwargs', 20 | field=picklefield.fields.PickledObjectField(editable=False, null=True, protocol=-1), 21 | ), 22 | migrations.AlterField( 23 | model_name='task', 24 | name='result', 25 | field=picklefield.fields.PickledObjectField(editable=False, null=True, protocol=-1), 26 | ), 27 | ] 28 | -------------------------------------------------------------------------------- /django_q/migrations/0011_auto_20200628_1055.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.0.7 on 2020-06-28 10:55 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('django_q', '0010_auto_20200610_0856'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AddField( 14 | model_name='schedule', 15 | name='cron', 16 | field=models.CharField(blank=True, help_text='Cron expression', max_length=100, null=True), 17 | ), 18 | migrations.AlterField( 19 | model_name='schedule', 20 | name='schedule_type', 21 | field=models.CharField(choices=[('O', 'Once'), ('I', 'Minutes'), ('H', 'Hourly'), ('D', 'Daily'), ('W', 'Weekly'), ('M', 'Monthly'), ('Q', 'Quarterly'), ('Y', 'Yearly'), ('C', 'Cron')], default='O', max_length=1, verbose_name='Schedule Type'), 22 | ), 23 | ] 24 | -------------------------------------------------------------------------------- /django_q/migrations/0012_auto_20200702_1608.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.0.8 on 2020-07-02 16:08 2 | 3 | from django.db import migrations, models 4 | 5 | import django_q.models 6 | 7 | 8 | class Migration(migrations.Migration): 9 | 10 | dependencies = [ 11 | ('django_q', '0011_auto_20200628_1055'), 12 | ] 13 | 14 | operations = [ 15 | migrations.AlterField( 16 | model_name='schedule', 17 | name='cron', 18 | field=models.CharField(blank=True, help_text='Cron expression', max_length=100, null=True, validators=[django_q.models.validate_cron]), 19 | ), 20 | ] 21 | -------------------------------------------------------------------------------- /django_q/migrations/0013_task_attempt_count.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.0.7 on 2020-08-11 15:17 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('django_q', '0012_auto_20200702_1608'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AddField( 14 | model_name='task', 15 | name='attempt_count', 16 | field=models.IntegerField(default=0), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /django_q/migrations/0014_schedule_cluster.py: -------------------------------------------------------------------------------- 1 | # Generated by Django 3.2.2 on 2021-05-11 05:59 2 | 3 | from django.db import migrations, models 4 | 5 | 6 | class Migration(migrations.Migration): 7 | 8 | dependencies = [ 9 | ('django_q', '0013_task_attempt_count'), 10 | ] 11 | 12 | operations = [ 13 | migrations.AddField( 14 | model_name='schedule', 15 | name='cluster', 16 | field=models.CharField(blank=True, default=None, max_length=100, null=True), 17 | ), 18 | ] 19 | -------------------------------------------------------------------------------- /django_q/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/django_q/migrations/__init__.py -------------------------------------------------------------------------------- /django_q/models.py: -------------------------------------------------------------------------------- 1 | # Django 2 | from django import get_version 3 | from django.core.exceptions import ValidationError 4 | from django.db import models 5 | from django.template.defaultfilters import truncatechars 6 | from django.urls import reverse 7 | from django.utils import timezone 8 | from django.utils.html import format_html 9 | from django.utils.translation import gettext_lazy as _ 10 | 11 | # External 12 | from picklefield import PickledObjectField 13 | from picklefield.fields import dbsafe_decode 14 | 15 | # Local 16 | from django_q.conf import croniter 17 | from django_q.signing import SignedPackage 18 | 19 | 20 | class Task(models.Model): 21 | id = models.CharField(max_length=32, primary_key=True, editable=False) 22 | name = models.CharField(max_length=100, editable=False) 23 | func = models.CharField(max_length=256) 24 | hook = models.CharField(max_length=256, null=True) 25 | args = PickledObjectField(null=True, protocol=-1) 26 | kwargs = PickledObjectField(null=True, protocol=-1) 27 | result = PickledObjectField(null=True, protocol=-1) 28 | group = models.CharField(max_length=100, editable=False, null=True) 29 | started = models.DateTimeField(editable=False) 30 | stopped = models.DateTimeField(editable=False) 31 | success = models.BooleanField(default=True, editable=False) 32 | attempt_count = models.IntegerField(default=0) 33 | 34 | @staticmethod 35 | def get_result(task_id): 36 | if len(task_id) == 32 and Task.objects.filter(id=task_id).exists(): 37 | return Task.objects.get(id=task_id).result 38 | elif Task.objects.filter(name=task_id).exists(): 39 | return Task.objects.get(name=task_id).result 40 | 41 | @staticmethod 42 | def get_result_group(group_id, failures=False): 43 | if failures: 44 | values = Task.objects.filter(group=group_id).values_list( 45 | "result", flat=True 46 | ) 47 | else: 48 | values = ( 49 | Task.objects.filter(group=group_id) 50 | .exclude(success=False) 51 | .values_list("result", flat=True) 52 | ) 53 | return decode_results(values) 54 | 55 | def group_result(self, failures=False): 56 | if self.group: 57 | return self.get_result_group(self.group, failures) 58 | 59 | @staticmethod 60 | def get_group_count(group_id, failures=False): 61 | if failures: 62 | return Failure.objects.filter(group=group_id).count() 63 | return Task.objects.filter(group=group_id).count() 64 | 65 | def group_count(self, failures=False): 66 | if self.group: 67 | return self.get_group_count(self.group, failures) 68 | 69 | @staticmethod 70 | def delete_group(group_id, objects=False): 71 | group = Task.objects.filter(group=group_id) 72 | if objects: 73 | return group.delete() 74 | return group.update(group=None) 75 | 76 | def group_delete(self, tasks=False): 77 | if self.group: 78 | return self.delete_group(self.group, tasks) 79 | 80 | @staticmethod 81 | def get_task(task_id): 82 | if len(task_id) == 32 and Task.objects.filter(id=task_id).exists(): 83 | return Task.objects.get(id=task_id) 84 | elif Task.objects.filter(name=task_id).exists(): 85 | return Task.objects.get(name=task_id) 86 | 87 | @staticmethod 88 | def get_task_group(group_id, failures=True): 89 | if failures: 90 | return Task.objects.filter(group=group_id) 91 | return Task.objects.filter(group=group_id).exclude(success=False) 92 | 93 | def time_taken(self): 94 | return (self.stopped - self.started).total_seconds() 95 | 96 | @property 97 | def short_result(self): 98 | return truncatechars(self.result, 100) 99 | 100 | def __str__(self): 101 | return f"{self.name or self.id}" 102 | 103 | class Meta: 104 | app_label = "django_q" 105 | ordering = ["-stopped"] 106 | 107 | 108 | class SuccessManager(models.Manager): 109 | def get_queryset(self): 110 | return super(SuccessManager, self).get_queryset().filter(success=True) 111 | 112 | 113 | class Success(Task): 114 | objects = SuccessManager() 115 | 116 | class Meta: 117 | app_label = "django_q" 118 | verbose_name = _("Successful task") 119 | verbose_name_plural = _("Successful tasks") 120 | ordering = ["-stopped"] 121 | proxy = True 122 | 123 | 124 | class FailureManager(models.Manager): 125 | def get_queryset(self): 126 | return super(FailureManager, self).get_queryset().filter(success=False) 127 | 128 | 129 | class Failure(Task): 130 | objects = FailureManager() 131 | 132 | class Meta: 133 | app_label = "django_q" 134 | verbose_name = _("Failed task") 135 | verbose_name_plural = _("Failed tasks") 136 | ordering = ["-stopped"] 137 | proxy = True 138 | 139 | 140 | # Optional Cron validator 141 | def validate_cron(value): 142 | if not croniter: 143 | raise ImportError(_("Please install croniter to enable cron expressions")) 144 | try: 145 | croniter.expand(value) 146 | except ValueError as e: 147 | raise ValidationError(e) 148 | 149 | 150 | class Schedule(models.Model): 151 | name = models.CharField(max_length=100, null=True, blank=True) 152 | func = models.CharField(max_length=256, help_text="e.g. module.tasks.function") 153 | hook = models.CharField( 154 | max_length=256, 155 | null=True, 156 | blank=True, 157 | help_text="e.g. module.tasks.result_function", 158 | ) 159 | args = models.TextField(null=True, blank=True, help_text=_("e.g. 1, 2, 'John'")) 160 | kwargs = models.TextField( 161 | null=True, blank=True, help_text=_("e.g. x=1, y=2, name='John'") 162 | ) 163 | ONCE = "O" 164 | MINUTES = "I" 165 | HOURLY = "H" 166 | DAILY = "D" 167 | WEEKLY = "W" 168 | MONTHLY = "M" 169 | QUARTERLY = "Q" 170 | YEARLY = "Y" 171 | CRON = "C" 172 | TYPE = ( 173 | (ONCE, _("Once")), 174 | (MINUTES, _("Minutes")), 175 | (HOURLY, _("Hourly")), 176 | (DAILY, _("Daily")), 177 | (WEEKLY, _("Weekly")), 178 | (MONTHLY, _("Monthly")), 179 | (QUARTERLY, _("Quarterly")), 180 | (YEARLY, _("Yearly")), 181 | (CRON, _("Cron")), 182 | ) 183 | schedule_type = models.CharField( 184 | max_length=1, choices=TYPE, default=TYPE[0][0], verbose_name=_("Schedule Type") 185 | ) 186 | minutes = models.PositiveSmallIntegerField( 187 | null=True, blank=True, help_text=_("Number of minutes for the Minutes type") 188 | ) 189 | repeats = models.IntegerField( 190 | default=-1, verbose_name=_("Repeats"), help_text=_("n = n times, -1 = forever") 191 | ) 192 | next_run = models.DateTimeField( 193 | verbose_name=_("Next Run"), default=timezone.now, null=True 194 | ) 195 | cron = models.CharField( 196 | max_length=100, 197 | null=True, 198 | blank=True, 199 | validators=[validate_cron], 200 | help_text=_("Cron expression"), 201 | ) 202 | task = models.CharField(max_length=100, null=True, editable=False) 203 | cluster = models.CharField(max_length=100, default=None, null=True, blank=True) 204 | 205 | def success(self): 206 | if self.task and Task.objects.filter(id=self.task): 207 | return Task.objects.get(id=self.task).success 208 | 209 | def last_run(self): 210 | if self.task and Task.objects.filter(id=self.task): 211 | task = Task.objects.get(id=self.task) 212 | if task.success: 213 | url = reverse("admin:django_q_success_change", args=(task.id,)) 214 | else: 215 | url = reverse("admin:django_q_failure_change", args=(task.id,)) 216 | return format_html(f'[{task.name}]') 217 | return None 218 | 219 | def __str__(self): 220 | return self.func 221 | 222 | success.boolean = True 223 | last_run.allow_tags = True 224 | 225 | class Meta: 226 | app_label = "django_q" 227 | verbose_name = _("Scheduled task") 228 | verbose_name_plural = _("Scheduled tasks") 229 | ordering = ["next_run"] 230 | 231 | 232 | class OrmQ(models.Model): 233 | key = models.CharField(max_length=100) 234 | payload = models.TextField() 235 | lock = models.DateTimeField(null=True) 236 | 237 | def task(self): 238 | return SignedPackage.loads(self.payload) 239 | 240 | def func(self): 241 | return self.task()["func"] 242 | 243 | def task_id(self): 244 | return self.task()["id"] 245 | 246 | def name(self): 247 | return self.task()["name"] 248 | 249 | class Meta: 250 | app_label = "django_q" 251 | verbose_name = _("Queued task") 252 | verbose_name_plural = _("Queued tasks") 253 | 254 | 255 | # Backwards compatibility for Django 1.7 256 | def decode_results(values): 257 | if get_version().split(".")[1] == "7": 258 | # decode values in 1.7 259 | return [dbsafe_decode(v) for v in values] 260 | return values 261 | -------------------------------------------------------------------------------- /django_q/queues.py: -------------------------------------------------------------------------------- 1 | """ 2 | The code is derived from https://github.com/althonos/pronto/commit/3384010dfb4fc7c66a219f59276adef3288a886b 3 | """ 4 | import multiprocessing 5 | import multiprocessing.queues 6 | import sys 7 | 8 | 9 | class SharedCounter: 10 | """A synchronized shared counter. 11 | 12 | The locking done by multiprocessing.Value ensures that only a single 13 | process or thread may read or write the in-memory ctypes object. However, 14 | in order to do n += 1, Python performs a read followed by a write, so a 15 | second process may read the old value before the new one is written by 16 | the first process. The solution is to use a multiprocessing.Lock to 17 | guarantee the atomicity of the modifications to Value. 18 | 19 | This class comes almost entirely from Eli Bendersky's blog: 20 | http://eli.thegreenplace.net/2012/01/04/shared-counter-with-pythons-multiprocessing/ 21 | """ 22 | 23 | def __init__(self, n=0): 24 | self.count = multiprocessing.Value("i", n) 25 | 26 | def increment(self, n=1): 27 | """Increment the counter by n (default = 1)""" 28 | with self.count.get_lock(): 29 | self.count.value += n 30 | 31 | @property 32 | def value(self): 33 | """Return the value of the counter""" 34 | return self.count.value 35 | 36 | 37 | class Queue(multiprocessing.queues.Queue): 38 | """A portable implementation of multiprocessing.Queue. 39 | 40 | Because of multithreading / multiprocessing semantics, Queue.qsize() may 41 | raise the NotImplementedError exception on Unix platforms like Mac OS X 42 | where sem_getvalue() is not implemented. This subclass addresses this 43 | problem by using a synchronized shared counter (initialized to zero) and 44 | increasing / decreasing its value every time the put() and get() methods 45 | are called, respectively. This not only prevents NotImplementedError from 46 | being raised, but also allows us to implement a reliable version of both 47 | qsize() and empty(). 48 | """ 49 | 50 | def __init__(self, *args, **kwargs): 51 | if sys.version_info < (3, 0): 52 | super(Queue, self).__init__(*args, **kwargs) 53 | else: 54 | super(Queue, self).__init__( 55 | *args, ctx=multiprocessing.get_context(), **kwargs 56 | ) 57 | self.size = SharedCounter(0) 58 | 59 | def __getstate__(self): 60 | return super(Queue, self).__getstate__() + (self.size,) 61 | 62 | def __setstate__(self, state): 63 | super(Queue, self).__setstate__(state[:-1]) 64 | self.size = state[-1] 65 | 66 | def put(self, *args, **kwargs): 67 | super(Queue, self).put(*args, **kwargs) 68 | self.size.increment(1) 69 | 70 | def get(self, *args, **kwargs): 71 | x = super(Queue, self).get(*args, **kwargs) 72 | self.size.increment(-1) 73 | return x 74 | 75 | def qsize(self) -> int: 76 | """Reliable implementation of multiprocessing.Queue.qsize()""" 77 | return self.size.value 78 | 79 | def empty(self) -> bool: 80 | """Reliable implementation of multiprocessing.Queue.empty()""" 81 | return not self.qsize() > 0 82 | -------------------------------------------------------------------------------- /django_q/signals.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | 3 | from django.db.models.signals import post_save 4 | from django.dispatch import Signal, receiver 5 | from django.utils.translation import gettext_lazy as _ 6 | 7 | from django_q.conf import logger 8 | from django_q.models import Task 9 | 10 | 11 | @receiver(post_save, sender=Task) 12 | def call_hook(sender, instance, **kwargs): 13 | if instance.hook: 14 | f = instance.hook 15 | if not callable(f): 16 | try: 17 | module, func = f.rsplit(".", 1) 18 | m = importlib.import_module(module) 19 | f = getattr(m, func) 20 | except (ValueError, ImportError, AttributeError): 21 | logger.error( 22 | _(f"malformed return hook '{instance.hook}' for [{instance.name}]") 23 | ) 24 | return 25 | try: 26 | f(instance) 27 | except Exception as e: 28 | logger.error( 29 | _( 30 | f"return hook {instance.hook} failed on [{instance.name}] because {str(e)}" 31 | ) 32 | ) 33 | 34 | 35 | # args: task 36 | pre_enqueue = Signal() 37 | 38 | # args: func, task 39 | pre_execute = Signal() 40 | 41 | # args: task 42 | post_execute = Signal() 43 | -------------------------------------------------------------------------------- /django_q/signing.py: -------------------------------------------------------------------------------- 1 | """Package signing.""" 2 | import pickle 3 | 4 | from django_q import core_signing as signing 5 | from django_q.conf import Conf 6 | 7 | BadSignature = signing.BadSignature 8 | 9 | 10 | class SignedPackage: 11 | """Wraps Django's signing module with custom Pickle serializer.""" 12 | 13 | @staticmethod 14 | def dumps(obj, compressed: bool = Conf.COMPRESSED) -> str: 15 | return signing.dumps( 16 | obj, 17 | key=Conf.SECRET_KEY, 18 | salt=Conf.PREFIX, 19 | compress=compressed, 20 | serializer=PickleSerializer, 21 | ) 22 | 23 | @staticmethod 24 | def loads(obj) -> any: 25 | return signing.loads( 26 | obj, key=Conf.SECRET_KEY, salt=Conf.PREFIX, serializer=PickleSerializer 27 | ) 28 | 29 | 30 | class PickleSerializer: 31 | """Simple wrapper around Pickle for signing.dumps and signing.loads.""" 32 | 33 | @staticmethod 34 | def dumps(obj) -> bytes: 35 | return pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL) 36 | 37 | @staticmethod 38 | def loads(data) -> any: 39 | return pickle.loads(data) 40 | -------------------------------------------------------------------------------- /django_q/status.py: -------------------------------------------------------------------------------- 1 | import socket 2 | from typing import Union 3 | 4 | from django.utils import timezone 5 | 6 | from django_q.brokers import Broker, get_broker 7 | from django_q.conf import Conf, logger 8 | from django_q.signing import BadSignature, SignedPackage 9 | 10 | 11 | class Status: 12 | """Cluster status base class.""" 13 | 14 | def __init__(self, pid, cluster_id): 15 | self.workers = [] 16 | self.tob = None 17 | self.reincarnations = 0 18 | self.pid = pid 19 | self.cluster_id = cluster_id 20 | self.sentinel = 0 21 | self.status = Conf.STOPPED 22 | self.done_q_size = 0 23 | self.host = socket.gethostname() 24 | self.monitor = 0 25 | self.task_q_size = 0 26 | self.pusher = 0 27 | self.timestamp = timezone.now() 28 | 29 | 30 | class Stat(Status): 31 | """Status object for Cluster monitoring.""" 32 | 33 | def __init__(self, sentinel): 34 | super(Stat, self).__init__( 35 | sentinel.parent_pid or sentinel.pid, cluster_id=sentinel.cluster_id 36 | ) 37 | self.broker = sentinel.broker or get_broker() 38 | self.tob = sentinel.tob 39 | self.reincarnations = sentinel.reincarnations 40 | self.sentinel = sentinel.pid 41 | self.status = sentinel.status() 42 | self.done_q_size = 0 43 | self.task_q_size = 0 44 | if Conf.QSIZE: 45 | self.done_q_size = sentinel.result_queue.qsize() 46 | self.task_q_size = sentinel.task_queue.qsize() 47 | if sentinel.monitor: 48 | self.monitor = sentinel.monitor.pid 49 | if sentinel.pusher: 50 | self.pusher = sentinel.pusher.pid 51 | self.workers = [w.pid for w in sentinel.pool] 52 | 53 | def uptime(self) -> float: 54 | return (timezone.now() - self.tob).total_seconds() 55 | 56 | @property 57 | def key(self) -> str: 58 | """ 59 | :return: redis key for this cluster statistic 60 | """ 61 | return self.get_key(self.cluster_id) 62 | 63 | @staticmethod 64 | def get_key(cluster_id) -> str: 65 | """ 66 | :param cluster_id: cluster ID 67 | :return: redis key for the cluster statistic 68 | """ 69 | return f"{Conf.Q_STAT}:{cluster_id}" 70 | 71 | def save(self): 72 | try: 73 | self.broker.set_stat(self.key, SignedPackage.dumps(self, True), 3) 74 | except Exception as e: 75 | logger.error(e) 76 | 77 | def empty_queues(self) -> bool: 78 | return self.done_q_size + self.task_q_size == 0 79 | 80 | @staticmethod 81 | def get(pid: int, cluster_id: str, broker: Broker = None) -> Union[Status, None]: 82 | """ 83 | gets the current status for the cluster 84 | :param pid: 85 | :param broker: an optional broker instance 86 | :param cluster_id: id of the cluster 87 | :return: Stat or Status 88 | """ 89 | if not broker: 90 | broker = get_broker() 91 | pack = broker.get_stat(Stat.get_key(cluster_id)) 92 | if pack: 93 | try: 94 | return SignedPackage.loads(pack) 95 | except BadSignature: 96 | return None 97 | return Status(pid=pid, cluster_id=cluster_id) 98 | 99 | @staticmethod 100 | def get_all(broker: Broker = None) -> list: 101 | """ 102 | Get the status for all currently running clusters with the same prefix 103 | and secret key. 104 | :return: list of type Stat 105 | """ 106 | if not broker: 107 | broker = get_broker() 108 | stats = [] 109 | packs = broker.get_stats(f"{Conf.Q_STAT}:*") or [] 110 | for pack in packs: 111 | try: 112 | stats.append(SignedPackage.loads(pack)) 113 | except BadSignature: 114 | continue 115 | return stats 116 | 117 | def __getstate__(self): 118 | # Don't pickle the redis connection 119 | state = dict(self.__dict__) 120 | del state["broker"] 121 | return state 122 | -------------------------------------------------------------------------------- /django_q/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/django_q/tests/__init__.py -------------------------------------------------------------------------------- /django_q/tests/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import django 4 | 5 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 6 | 7 | 8 | # Quick-start development settings - unsuitable for production 9 | # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ 10 | 11 | # SECURITY WARNING: keep the secret key used in production secret! 12 | SECRET_KEY = ")cqmpi+p@n&!u&fu@!m@9h&1bz9mwmstsahe)nf!ms+c$uc=x7" 13 | 14 | # SECURITY WARNING: don't run with debug turned on in production! 15 | DEBUG = True 16 | 17 | ALLOWED_HOSTS = [] 18 | 19 | 20 | # Application definition 21 | 22 | INSTALLED_APPS = ( 23 | "django.contrib.admin", 24 | "django.contrib.auth", 25 | "django.contrib.contenttypes", 26 | "django.contrib.sessions", 27 | "django.contrib.messages", 28 | "django.contrib.staticfiles", 29 | "django_q", 30 | "django_redis", 31 | ) 32 | 33 | 34 | MIDDLEWARE_CLASSES = ( 35 | "django.contrib.sessions.middleware.SessionMiddleware", 36 | "django.middleware.common.CommonMiddleware", 37 | "django.middleware.csrf.CsrfViewMiddleware", 38 | "django.contrib.auth.middleware.AuthenticationMiddleware", 39 | "django.contrib.messages.middleware.MessageMiddleware", 40 | "django.middleware.clickjacking.XFrameOptionsMiddleware", 41 | ) 42 | 43 | MIDDLEWARE = MIDDLEWARE_CLASSES 44 | 45 | ROOT_URLCONF = "tests.urls" 46 | 47 | TEMPLATES = [ 48 | { 49 | "BACKEND": "django.template.backends.django.DjangoTemplates", 50 | "DIRS": [], 51 | "APP_DIRS": True, 52 | "OPTIONS": { 53 | "context_processors": [ 54 | "django.template.context_processors.debug", 55 | "django.template.context_processors.request", 56 | "django.contrib.auth.context_processors.auth", 57 | "django.contrib.messages.context_processors.messages", 58 | ], 59 | }, 60 | }, 61 | ] 62 | 63 | 64 | # Database 65 | # https://docs.djangoproject.com/en/2.2/ref/settings/#databases 66 | 67 | DATABASES = { 68 | "default": { 69 | "ENGINE": "django.db.backends.sqlite3", 70 | "NAME": os.path.join(BASE_DIR, "db.sqlite3"), 71 | } 72 | } 73 | 74 | 75 | # Internationalization 76 | # https://docs.djangoproject.com/en/2.2/topics/i18n/ 77 | 78 | LANGUAGE_CODE = "en-us" 79 | 80 | TIME_ZONE = "UTC" 81 | 82 | USE_I18N = True 83 | 84 | USE_L10N = True 85 | 86 | USE_TZ = True 87 | 88 | LOGGING = { 89 | "version": 1, 90 | "disable_existing_loggers": False, 91 | "handlers": { 92 | "console": { 93 | "class": "logging.StreamHandler", 94 | }, 95 | }, 96 | "loggers": { 97 | "django_q": { 98 | "handlers": ["console"], 99 | "level": "INFO", 100 | }, 101 | }, 102 | } 103 | 104 | # Static files (CSS, JavaScript, Images) 105 | # https://docs.djangoproject.com/en/2.2/howto/static-files/ 106 | 107 | STATIC_URL = "/static/" 108 | 109 | # Django Redis 110 | CACHES = { 111 | "default": { 112 | "BACKEND": "django_redis.cache.RedisCache", 113 | "LOCATION": "redis://127.0.0.1:6379/0", 114 | "OPTIONS": { 115 | "CLIENT_CLASS": "django_redis.client.DefaultClient", 116 | "PARSER_CLASS": "redis.connection.HiredisParser", 117 | }, 118 | } 119 | } 120 | 121 | # Django Q specific 122 | Q_CLUSTER = { 123 | "name": "django_q_test", 124 | "cpu_affinity": 1, 125 | "testing": True, 126 | "log_level": "DEBUG", 127 | "django_redis": "default", 128 | } 129 | -------------------------------------------------------------------------------- /django_q/tests/tasks.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | 3 | 4 | class TaskError(Exception): 5 | pass 6 | 7 | 8 | def countdown(n): 9 | while n > 0: 10 | n -= 1 11 | 12 | 13 | def multiply(x, y): 14 | return x * y 15 | 16 | 17 | def count_letters(tup): 18 | total = 0 19 | for word in tup: 20 | total += len(word) 21 | return total 22 | 23 | 24 | def count_letters2(obj): 25 | return count_letters(obj.get_words()) 26 | 27 | 28 | def word_multiply(x, word=""): 29 | return len(word) * x 30 | 31 | 32 | def count_forever(): 33 | while True: 34 | sleep(0.5) 35 | 36 | 37 | def get_task_name(task): 38 | return task.name 39 | 40 | 41 | def get_user_id(user): 42 | return user.id 43 | 44 | 45 | def hello(): 46 | return "hello" 47 | 48 | 49 | def result(obj): 50 | print(f"RESULT HOOK {obj.name} : {obj.result()}") 51 | 52 | 53 | def raise_exception(): 54 | raise TaskError("this is an exception!") 55 | -------------------------------------------------------------------------------- /django_q/tests/test_admin.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from django.urls import reverse 3 | from django.utils import timezone 4 | 5 | from django_q.conf import Conf 6 | from django_q.humanhash import uuid 7 | from django_q.models import Failure, OrmQ, Task 8 | from django_q.signing import SignedPackage 9 | from django_q.tasks import schedule 10 | 11 | 12 | @pytest.mark.django_db 13 | def test_admin_views(admin_client, monkeypatch): 14 | monkeypatch.setattr(Conf, "ORM", "default") 15 | s = schedule("schedule.test") 16 | tag = uuid() 17 | f = Task.objects.create( 18 | id=tag[1], 19 | name=tag[0], 20 | func="test.fail", 21 | started=timezone.now(), 22 | stopped=timezone.now(), 23 | success=False, 24 | ) 25 | tag = uuid() 26 | t = Task.objects.create( 27 | id=tag[1], 28 | name=tag[0], 29 | func="test.success", 30 | started=timezone.now(), 31 | stopped=timezone.now(), 32 | success=True, 33 | ) 34 | q = OrmQ.objects.create( 35 | key="test", 36 | payload=SignedPackage.dumps({"id": 1, "func": "test", "name": "test"}), 37 | ) 38 | admin_urls = ( 39 | # schedule 40 | reverse("admin:django_q_schedule_changelist"), 41 | reverse("admin:django_q_schedule_add"), 42 | reverse("admin:django_q_schedule_change", args=(s.id,)), 43 | reverse("admin:django_q_schedule_history", args=(s.id,)), 44 | reverse("admin:django_q_schedule_delete", args=(s.id,)), 45 | # success 46 | reverse("admin:django_q_success_changelist"), 47 | reverse("admin:django_q_success_change", args=(t.id,)), 48 | reverse("admin:django_q_success_history", args=(t.id,)), 49 | reverse("admin:django_q_success_delete", args=(t.id,)), 50 | # failure 51 | reverse("admin:django_q_failure_changelist"), 52 | reverse("admin:django_q_failure_change", args=(f.id,)), 53 | reverse("admin:django_q_failure_history", args=(f.id,)), 54 | reverse("admin:django_q_failure_delete", args=(f.id,)), 55 | # orm queue 56 | reverse("admin:django_q_ormq_changelist"), 57 | reverse("admin:django_q_ormq_change", args=(q.id,)), 58 | reverse("admin:django_q_ormq_history", args=(q.id,)), 59 | reverse("admin:django_q_ormq_delete", args=(q.id,)), 60 | ) 61 | for url in admin_urls: 62 | response = admin_client.get(url) 63 | assert response.status_code == 200 64 | 65 | # resubmit the failure 66 | url = reverse("admin:django_q_failure_changelist") 67 | data = {"action": "retry_failed", "_selected_action": [f.pk]} 68 | response = admin_client.post(url, data) 69 | assert response.status_code == 302 70 | assert Failure.objects.filter(name=f.id).exists() is False 71 | # change q 72 | url = reverse("admin:django_q_ormq_change", args=(q.id,)) 73 | data = { 74 | "key": "default", 75 | "payload": "test", 76 | "lock_0": "2015-09-17", 77 | "lock_1": "14:31:51", 78 | "_save": "Save", 79 | } 80 | response = admin_client.post(url, data) 81 | assert response.status_code == 302 82 | # delete q 83 | url = reverse("admin:django_q_ormq_delete", args=(q.id,)) 84 | data = {"post": "yes"} 85 | response = admin_client.post(url, data) 86 | assert response.status_code == 302 87 | -------------------------------------------------------------------------------- /django_q/tests/test_cached.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import Event, Value 2 | 3 | import pytest 4 | 5 | from django_q.brokers import get_broker 6 | from django_q.cluster import monitor, pusher, worker 7 | from django_q.conf import Conf 8 | from django_q.queues import Queue 9 | from django_q.tasks import ( 10 | AsyncTask, 11 | Chain, 12 | Iter, 13 | async_chain, 14 | async_iter, 15 | async_task, 16 | count_group, 17 | delete_cached, 18 | delete_group, 19 | fetch, 20 | fetch_group, 21 | result, 22 | result_group, 23 | ) 24 | 25 | 26 | @pytest.fixture 27 | def broker(monkeypatch): 28 | monkeypatch.setattr(Conf, "DJANGO_REDIS", "default") 29 | return get_broker() 30 | 31 | 32 | @pytest.mark.django_db 33 | def test_cached(broker): 34 | broker.purge_queue() 35 | broker.cache.clear() 36 | group = "cache_test" 37 | # queue the tests 38 | task_id = async_task("math.copysign", 1, -1, cached=True, broker=broker) 39 | async_task("math.copysign", 1, -1, cached=True, broker=broker, group=group) 40 | async_task("math.copysign", 1, -1, cached=True, broker=broker, group=group) 41 | async_task("math.copysign", 1, -1, cached=True, broker=broker, group=group) 42 | async_task("math.copysign", 1, -1, cached=True, broker=broker, group=group) 43 | async_task("math.copysign", 1, -1, cached=True, broker=broker, group=group) 44 | async_task("math.popysign", 1, -1, cached=True, broker=broker, group=group) 45 | iter_id = async_iter("math.floor", [i for i in range(10)], cached=True) 46 | # test wait on cache 47 | # test wait timeout 48 | assert result(task_id, wait=10, cached=True) is None 49 | assert fetch(task_id, wait=10, cached=True) is None 50 | assert result_group(group, wait=10, cached=True) is None 51 | assert result_group(group, count=2, wait=10, cached=True) is None 52 | assert fetch_group(group, wait=10, cached=True) is None 53 | assert fetch_group(group, count=2, wait=10, cached=True) is None 54 | # run a single inline cluster 55 | task_count = 17 56 | assert broker.queue_size() == task_count 57 | task_queue = Queue() 58 | stop_event = Event() 59 | stop_event.set() 60 | for i in range(task_count): 61 | pusher(task_queue, stop_event, broker=broker) 62 | assert broker.queue_size() == 0 63 | assert task_queue.qsize() == task_count 64 | task_queue.put("STOP") 65 | result_queue = Queue() 66 | worker(task_queue, result_queue, Value("f", -1)) 67 | assert result_queue.qsize() == task_count 68 | result_queue.put("STOP") 69 | monitor(result_queue) 70 | assert result_queue.qsize() == 0 71 | # assert results 72 | assert result(task_id, wait=500, cached=True) == -1 73 | assert fetch(task_id, wait=500, cached=True).result == -1 74 | # make sure it's not in the db backend 75 | assert fetch(task_id) is None 76 | # assert group 77 | assert count_group(group, cached=True) == 6 78 | assert count_group(group, cached=True, failures=True) == 1 79 | assert result_group(group, cached=True) == [-1, -1, -1, -1, -1] 80 | assert len(result_group(group, cached=True, failures=True)) == 6 81 | assert len(fetch_group(group, cached=True)) == 6 82 | assert len(fetch_group(group, cached=True, failures=False)) == 5 83 | delete_group(group, cached=True) 84 | assert count_group(group, cached=True) is None 85 | delete_cached(task_id) 86 | assert result(task_id, cached=True) is None 87 | assert fetch(task_id, cached=True) is None 88 | # iter cached 89 | assert result(iter_id) is None 90 | assert result(iter_id, cached=True) is not None 91 | broker.cache.clear() 92 | 93 | 94 | @pytest.mark.django_db 95 | def test_iter(broker): 96 | broker.purge_queue() 97 | broker.cache.clear() 98 | it = [i for i in range(10)] 99 | it2 = [(1, -1), (2, -1), (3, -4), (5, 6)] 100 | it3 = (1, 2, 3, 4, 5) 101 | t = async_iter("math.floor", it, sync=True) 102 | t2 = async_iter("math.copysign", it2, sync=True) 103 | t3 = async_iter("math.floor", it3, sync=True) 104 | t4 = async_iter("math.floor", (1,), sync=True) 105 | result_t = result(t) 106 | assert result_t is not None 107 | task_t = fetch(t) 108 | assert task_t.result == result_t 109 | assert result(t2) is not None 110 | assert result(t3) is not None 111 | assert result(t4)[0] == 1 112 | # test iter class 113 | i = Iter("math.copysign", sync=True, cached=True) 114 | i.append(1, -1) 115 | i.append(2, -1) 116 | i.append(3, -4) 117 | i.append(5, 6) 118 | assert i.started is False 119 | assert i.length() == 4 120 | assert i.run() is not None 121 | assert len(i.result()) == 4 122 | assert len(i.fetch().result) == 4 123 | i.append(1, -7) 124 | assert i.result() is None 125 | i.run() 126 | assert len(i.result()) == 5 127 | 128 | 129 | @pytest.mark.django_db 130 | def test_chain(broker): 131 | broker.purge_queue() 132 | broker.cache.clear() 133 | task_chain = Chain(sync=True) 134 | task_chain.append("math.floor", 1) 135 | task_chain.append("math.copysign", 1, -1) 136 | task_chain.append("math.floor", 2) 137 | assert task_chain.length() == 3 138 | assert task_chain.current() is None 139 | task_chain.run() 140 | r = task_chain.result(wait=1000) 141 | assert task_chain.current() == task_chain.length() 142 | assert len(r) == task_chain.length() 143 | t = task_chain.fetch() 144 | assert len(t) == task_chain.length() 145 | task_chain.cached = True 146 | task_chain.append("math.floor", 3) 147 | assert task_chain.length() == 4 148 | task_chain.run() 149 | r = task_chain.result(wait=1000) 150 | assert task_chain.current() == task_chain.length() 151 | assert len(r) == task_chain.length() 152 | t = task_chain.fetch() 153 | assert len(t) == task_chain.length() 154 | # test single 155 | rid = async_chain( 156 | ["django_q.tests.tasks.hello", "django_q.tests.tasks.hello"], 157 | sync=True, 158 | cached=True, 159 | ) 160 | assert result_group(rid, cached=True) == ["hello", "hello"] 161 | 162 | 163 | @pytest.mark.django_db 164 | def test_asynctask_class(broker, monkeypatch): 165 | broker.purge_queue() 166 | broker.cache.clear() 167 | a = AsyncTask("math.copysign") 168 | assert a.func == "math.copysign" 169 | a.args = (1, -1) 170 | assert a.started is False 171 | a.cached = True 172 | assert a.cached 173 | a.sync = True 174 | assert a.sync 175 | a.broker = broker 176 | assert a.broker == broker 177 | a.run() 178 | assert a.result() == -1 179 | assert a.fetch().result == -1 180 | # again with kwargs 181 | a = AsyncTask("math.copysign", 1, -1, cached=True, sync=True, broker=broker) 182 | a.run() 183 | assert a.result() == -1 184 | # with q_options 185 | a = AsyncTask( 186 | "math.copysign", 187 | 1, 188 | -1, 189 | q_options={"cached": True, "sync": False, "broker": broker}, 190 | ) 191 | assert not a.sync 192 | a.sync = True 193 | assert a.kwargs["q_options"]["sync"] is True 194 | a.run() 195 | assert a.result() == -1 196 | a.group = "async_class_test" 197 | assert a.group == "async_class_test" 198 | a.save = False 199 | assert not a.save 200 | a.hook = "djq.tests.tasks.hello" 201 | assert a.hook == "djq.tests.tasks.hello" 202 | assert a.started is False 203 | a.run() 204 | assert a.result_group() == [-1] 205 | assert a.fetch_group() == [a.fetch()] 206 | # global overrides 207 | monkeypatch.setattr(Conf, "SYNC", True) 208 | monkeypatch.setattr(Conf, "CACHED", True) 209 | a = AsyncTask("math.floor", 1.5) 210 | a.run() 211 | assert a.result() == 1 212 | -------------------------------------------------------------------------------- /django_q/tests/test_commands.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from django.core.management import call_command 3 | 4 | 5 | @pytest.mark.django_db 6 | def test_qcluster(): 7 | call_command("qcluster", run_once=True) 8 | 9 | 10 | @pytest.mark.django_db 11 | def test_qmonitor(): 12 | call_command("qmonitor", run_once=True) 13 | 14 | 15 | @pytest.mark.django_db 16 | def test_qinfo(): 17 | call_command("qinfo") 18 | call_command("qinfo", config=True) 19 | call_command("qinfo", ids=True) 20 | 21 | 22 | @pytest.mark.django_db 23 | def test_qmemory(): 24 | call_command("qmemory", run_once=True) 25 | call_command("qmemory", workers=True, run_once=True) 26 | -------------------------------------------------------------------------------- /django_q/tests/test_monitor.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | import pytest 4 | 5 | from django_q.brokers import get_broker 6 | from django_q.cluster import Cluster 7 | from django_q.conf import Conf 8 | from django_q.monitor import get_ids, info, monitor 9 | from django_q.status import Stat 10 | from django_q.tasks import async_task 11 | 12 | 13 | @pytest.mark.django_db 14 | def test_monitor(monkeypatch): 15 | cluster_id = uuid.uuid4() 16 | assert Stat.get(pid=0, cluster_id=cluster_id).sentinel == 0 17 | c = Cluster() 18 | c.start() 19 | stats = monitor(run_once=True) 20 | assert get_ids() is True 21 | c.stop() 22 | assert len(stats) > 0 23 | found_c = False 24 | for stat in stats: 25 | if stat.cluster_id == c.cluster_id: 26 | found_c = True 27 | assert stat.uptime() > 0 28 | assert stat.empty_queues() is True 29 | break 30 | assert found_c 31 | # test lock size 32 | monkeypatch.setattr(Conf, "ORM", "default") 33 | b = get_broker("monitor_test") 34 | b.enqueue("test") 35 | b.dequeue() 36 | assert b.lock_size() == 1 37 | monitor(run_once=True, broker=b) 38 | b.delete_queue() 39 | 40 | 41 | @pytest.mark.django_db 42 | def test_info(): 43 | info() 44 | do_sync() 45 | info() 46 | for _ in range(24): 47 | do_sync() 48 | info() 49 | 50 | 51 | def do_sync(): 52 | async_task("django_q.tests.tasks.countdown", 1, sync=True, save=True) 53 | -------------------------------------------------------------------------------- /django_q/tests/testing_utilities/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/django_q/tests/testing_utilities/__init__.py -------------------------------------------------------------------------------- /django_q/tests/testing_utilities/multiple_database_routers.py: -------------------------------------------------------------------------------- 1 | class TestingReplicaDatabaseRouter: 2 | """ 3 | A router to control all database operations on models in the 4 | auth application. 5 | """ 6 | 7 | def db_for_read(self, model, **hints): 8 | """ 9 | Allows read access from REPLICA database. 10 | """ 11 | return "replica" 12 | 13 | def db_for_write(self, model, **hints): 14 | """ 15 | Always write to DEFAULT database 16 | """ 17 | return "default" 18 | 19 | 20 | class TestingMultipleAppsDatabaseRouter: 21 | """ 22 | A router to control all database operations on models in the 23 | auth application. 24 | """ 25 | 26 | @staticmethod 27 | def is_admin(model): 28 | return model._meta.app_label in ["admin"] 29 | 30 | def db_for_read(self, model, **hints): 31 | if self.is_admin(model): 32 | return "admin" 33 | return "default" 34 | 35 | def db_for_write(self, model, **hints): 36 | if self.is_admin(model): 37 | return "admin" 38 | return "default" 39 | -------------------------------------------------------------------------------- /django_q/tests/urls.py: -------------------------------------------------------------------------------- 1 | from django.contrib import admin 2 | from django.urls import re_path 3 | 4 | urlpatterns = [ 5 | re_path(r"^admin/", admin.site.urls), 6 | ] 7 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = -nW 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/DjangoQ.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/DjangoQ.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/DjangoQ" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/DjangoQ" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/_static/cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/docs/_static/cluster.png -------------------------------------------------------------------------------- /docs/_static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/docs/_static/favicon.ico -------------------------------------------------------------------------------- /docs/_static/info.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/docs/_static/info.png -------------------------------------------------------------------------------- /docs/_static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/docs/_static/logo.png -------------------------------------------------------------------------------- /docs/_static/logo_large.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/docs/_static/logo_large.png -------------------------------------------------------------------------------- /docs/_static/monitor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/docs/_static/monitor.png -------------------------------------------------------------------------------- /docs/_static/scheduled.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/docs/_static/scheduled.png -------------------------------------------------------------------------------- /docs/_static/successful.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Koed00/django-q/85baaccd2c3adfe0a414d4237465163e9ff6e5a0/docs/_static/successful.png -------------------------------------------------------------------------------- /docs/admin.rst: -------------------------------------------------------------------------------- 1 | .. _admin_page: 2 | .. py:currentmodule:: django_q 3 | 4 | Admin pages 5 | =========== 6 | 7 | Django Q does not use custom pages, but instead leverages what is offered by Django's model admin by default. 8 | When you open Django Q's admin pages you will see three models: 9 | 10 | Successful tasks 11 | ---------------- 12 | 13 | Shows all successfully executed tasks. Meaning they did not encounter any errors during execution. 14 | From here you can look at details of each task or delete them. Use the group filter to filter your results by schedule name or group id. 15 | The table is searchable by `name`, `func` and `group` 16 | 17 | .. image:: _static/successful.png 18 | 19 | Uses the :class:`Success` proxy model. 20 | 21 | .. tip:: 22 | 23 | The maximum number of successful tasks can be set using the :ref:`save_limit` option. 24 | 25 | 26 | 27 | Failed tasks 28 | ------------ 29 | Failed tasks have encountered an error, preventing them from finishing execution. 30 | The worker will try to put the error in the `result` field of the task so you can review what happened. 31 | 32 | You can resubmit a failed task back to the queue using the admins action menu. 33 | 34 | Uses the :class:`Failure` proxy model 35 | 36 | 37 | 38 | Customize the admin UI by creating your own ``admin.ModelAdmin`` class and use ``admin.site.unregister`` and ``admin.site.register`` to replace the default 39 | for example: 40 | 41 | .. code-block:: python 42 | 43 | from django_q import models as q_models 44 | from django_q import admin as q_admin 45 | 46 | admin.site.unregister([q_models.Failure]) 47 | @admin.register(q_models.Failure) 48 | class ChildClassAdmin(q_admin.FailAdmin): 49 | list_display = ( 50 | 'name', 51 | 'func', 52 | 'result', 53 | 'started', 54 | # add attempt_count to list_display 55 | 'attempt_count' 56 | ) 57 | 58 | 59 | 60 | Scheduled tasks 61 | --------------- 62 | 63 | Here you can check on the status of your scheduled tasks, create, edit or delete them. 64 | 65 | .. image:: _static/scheduled.png 66 | 67 | 68 | 69 | Repeats 70 | ~~~~~~~ 71 | If you want a schedule to only run a finite amount of times, e.g. every hour for the next 24 hours, you can do that using the :attr:`Schedule.repeats` attribute. 72 | In this case you would set the schedule type to :attr:`Schedule.HOURLY` and the repeats to `24`. Every time the schedule runs the repeats count down until it hits zero and schedule is no longer run. 73 | 74 | When you set repeats to ``-1`` the schedule will continue indefinitely and the repeats will still count down. This can be used as an indicator of how many times the schedule has been executed. 75 | 76 | An exception to this are schedules of type :attr:`Schedule.ONCE`. Negative repeats for this schedule type will cause it to be deleted from the database. 77 | This behavior is useful if you have many delayed actions which you do not necessarily need a result for. A positive number will keep the ONCE schedule, but it will not run again. 78 | 79 | You can pause a schedule by setting its repeats value to zero. 80 | 81 | .. note:: 82 | 83 | To run a ``ONCE`` schedule again, change the repeats to something other than `0`. Set a new run time before you do this or let it execute immediately. 84 | 85 | 86 | Next run 87 | ~~~~~~~~ 88 | 89 | Shows you when this task will be added to the queue next. 90 | 91 | 92 | Last run 93 | ~~~~~~~~ 94 | 95 | Links to the task result of the last scheduled run. Shows nothing if the schedule hasn't run yet or if task result has been deleted. 96 | 97 | Success 98 | ~~~~~~~ 99 | 100 | Indicates the success status of the last scheduled task, if any. 101 | 102 | .. note:: 103 | 104 | if you have set the :ref:`save_limit` configuration option to not save successful tasks to the database, you will only see the failed results of your schedules. 105 | 106 | 107 | Uses the :class:`Schedule` model 108 | 109 | Queued tasks 110 | ------------ 111 | This admin view is only enabled when you use the :ref:`orm_broker` broker. 112 | It shows all tasks packages currently in the broker queue. The ``lock`` column shows the moment at which this package was picked up by the cluster and is used to determine whether it has expired or not. 113 | For development purposes you can edit and delete queued tasks from here. 114 | -------------------------------------------------------------------------------- /docs/architecture.rst: -------------------------------------------------------------------------------- 1 | Architecture 2 | ------------ 3 | 4 | .. image:: _static/cluster.png 5 | :alt: Django Q schema 6 | 7 | 8 | Signed Tasks 9 | """""""""""" 10 | 11 | Tasks are first pickled and then signed using Django's own :mod:`django.core.signing` module using the ``SECRET_KEY`` and cluster name as salt, before being sent to a message broker. This ensures that task 12 | packages on the broker can only be executed and read by clusters 13 | and django servers who share the same secret key and cluster name. 14 | If a package fails to unpack, it will be marked failed with the broker and discarded. 15 | Optionally the packages can be compressed before transport. 16 | 17 | Broker 18 | """""" 19 | 20 | The broker collects task packages from the django instances and queues them for pick up by a cluster. 21 | If the broker supports message receipts, it will keep a copy of the tasks around until a cluster acknowledges the processing of the task. 22 | Otherwise it is put back in the queue after a timeout period. This ensure at-least-once delivery. 23 | Most failed deliveries will be the result of a worker or the cluster crashing before the task was saved. 24 | 25 | .. note:: 26 | When the :ref:`ack_failures` option is set to ``False`` (the default), a task is 27 | considered a failed delivery when it raises an ``Exception``. Set 28 | this option to ``True`` to acknowledge failed tasks as successful. 29 | 30 | Pusher 31 | """""" 32 | 33 | The pusher process continuously checks the broker for new task 34 | packages. It checks the signing and unpacks the task to the internal Task Queue. 35 | The amount of tasks in the Task Queue can be configured to control memory usage and minimize data loss in case of a failure. 36 | 37 | Worker 38 | """""" 39 | 40 | A worker process pulls a task of the Task Queue and it sets a shared countdown timer with :ref:`sentinel` indicating it is about to start work. 41 | The worker then tries to execute the task and afterwards the timer is reset and any results (including errors) are saved to the package. 42 | Irrespective of the failure or success of any of these steps, the package is then pushed onto the Result Queue. 43 | 44 | Monitor 45 | """"""" 46 | 47 | The result monitor checks the Result Queue for processed packages and 48 | saves both failed and successful packages to the Django database or cache backend. 49 | If the broker supports it, a delivery receipt is sent. 50 | In case the task was part of a chain, the next task is queued. 51 | 52 | .. _sentinel: 53 | 54 | Sentinel 55 | """""""" 56 | 57 | The sentinel spawns all process and then checks the health of all 58 | workers, including the pusher and the monitor. This includes checking timers on each worker for timeouts. 59 | In case of a sudden death or timeout, it will reincarnate the failing processes. When a stop signal is received, the sentinel will halt the 60 | pusher and instruct the workers and monitor to finish the remaining items. See :ref:`stop_procedure` 61 | 62 | Timeouts 63 | """""""" 64 | Before each task execution the worker sets a countdown timer on the sentinel and resets it again after execution. 65 | Meanwhile the sentinel checks if the timers don't reach zero, in which case it will terminate the worker and reincarnate a new one. 66 | 67 | Scheduler 68 | """"""""" 69 | Twice a minute the scheduler checks for any scheduled tasks that should be starting. 70 | 71 | - Creates a task from the schedule 72 | - Subtracts 1 from :attr:`django_q.Schedule.repeats` 73 | - Sets the next run time if there are repeats left or if it has a negative value. 74 | 75 | .. _stop_procedure: 76 | 77 | Stop procedure 78 | """""""""""""" 79 | 80 | When a stop signal is received, the sentinel exits the guard loop and instructs the pusher to stop pushing. 81 | Once this is confirmed, the sentinel pushes poison pills onto the task queue and will wait for all the workers to exit. 82 | This ensures that the task queue is emptied before the workers exit. 83 | Afterwards the sentinel waits for the monitor to empty the result queue and the stop procedure is complete. 84 | 85 | - Send stop event to pusher 86 | - Wait for pusher to exit 87 | - Put poison pills in the Task Queue 88 | - Wait for all the workers to clear the queue and stop 89 | - Put a poison pill on the Result Queue 90 | - Wait for monitor to process remaining results and exit 91 | - Signal that we have stopped 92 | 93 | .. warning:: 94 | If you force the cluster to terminate before the stop procedure has completed, you can lose tasks or results still being held in memory. 95 | You can manage the amount of tasks in a clusters memory by setting the :ref:`queue_limit`. 96 | -------------------------------------------------------------------------------- /docs/brokers.rst: -------------------------------------------------------------------------------- 1 | Brokers 2 | ======= 3 | 4 | The broker sits between your Django instances and your Django Q cluster instances; accepting, saving and delivering task packages. 5 | Currently we support a variety of brokers from the default Redis, bleeding edge Disque to the convenient ORM and fast MongoDB. 6 | 7 | The default Redis broker does not support message receipts. 8 | This means that in case of a catastrophic failure of the cluster server or worker timeouts, tasks that were being executed get lost. 9 | Keep in mind this is not the same as a failing task. If a tasks code crashes, this should only lead to a failed task status. 10 | 11 | Even though this might be acceptable in some use cases, you might prefer brokers with message receipts support. 12 | These guarantee delivery by waiting for the cluster to send a receipt after the task has been processed. 13 | In case a receipt has not been received after a set time, the task package is put back in the queue. 14 | Django Q supports this behavior by setting the :ref:`retry` timer on brokers that support message receipts. 15 | 16 | Some pointers: 17 | 18 | * Don't set the :ref:`retry` timer to a lower or equal number than the task timeout. 19 | * Retry time includes time the task spends waiting in the clusters internal queue. 20 | * Don't set the :ref:`queue_limit` so high that tasks time out while waiting to be processed. 21 | * In case a task is worked on twice, the task result will be updated with the latest results. 22 | * In some rare cases a non-atomic broker will re-queue a task after it has been acknowledged. 23 | * If a task runs twice and a previous run has succeeded, the new result will be discarded. 24 | * Limiting the number of retries is handled globally in your actual broker's settings. 25 | 26 | 27 | Support for more brokers is being worked on. 28 | 29 | 30 | Redis 31 | ----- 32 | The default broker for Django Q clusters. 33 | 34 | * Atomic 35 | * Requires `Redis-py `__ client library: ``pip install redis`` 36 | * Does not need cache framework for monitoring 37 | * Does not support receipts 38 | * Can use existing :ref:`django_redis` connections. 39 | * Configure with :ref:`redis_configuration`-py compatible configuration 40 | 41 | Disque 42 | ------ 43 | Unlike Redis, Disque supports message receipts which make delivery to the cluster workers guaranteed. 44 | In our tests it is as fast or faster than the Redis broker. 45 | You can control the amount of time Disque should wait for completion of a task by configuring the :ref:`retry` setting. 46 | Bulk task retrieval is supported via the :ref:`bulk` option. 47 | 48 | * Delivery receipts 49 | * Atomic 50 | * Needs Django's `Cache framework `__ configured for monitoring 51 | * Compatible with `Tynd `__ Disque addon on `Heroku `__ 52 | * Still considered Alpha software 53 | * Supports bulk dequeue 54 | * Requires `Redis-py `__ client library: ``pip install redis`` 55 | * See the :ref:`disque_configuration` configuration section for more info. 56 | 57 | IronMQ 58 | ------ 59 | This HTTP based queue service is both available directly via `Iron.io `__ and as an add-on on Heroku. 60 | 61 | * Delivery receipts 62 | * Supports bulk dequeue 63 | * Needs Django's `Cache framework `__ configured for monitoring 64 | * Requires the `iron-mq `__ client library: ``pip install iron-mq`` 65 | * See the :ref:`ironmq_configuration` configuration section for options. 66 | 67 | Amazon SQS 68 | ---------- 69 | Amazon's Simple Queue Service is another HTTP based message queue. 70 | Although `SQS `__ is not the fastest, it is stable, cheap and convenient if you already use AWS. 71 | 72 | * Delivery receipts 73 | * Maximum message size is 256Kb 74 | * Supports bulk dequeue up to 10 messages with a maximum total size of 256Kb 75 | * Needs Django's `Cache framework `__ configured for monitoring 76 | * Requires the `boto3 `__ client library: ``pip install boto3`` 77 | * See the :ref:`sqs_configuration` configuration section for options. 78 | 79 | 80 | MongoDB 81 | ------- 82 | This highly scalable NoSQL database makes for a very fast and reliably persistent at-least-once message broker. 83 | Usually available on most PaaS providers, as `open-source `__ or commercial `enterprise `__ edition. 84 | 85 | * Delivery receipts 86 | * Needs Django's `Cache framework `__ configured for monitoring 87 | * Can be configured as the Django cache-backend through several open-source cache providers. 88 | * Requires the `pymongo `__ driver: ``pip install pymongo`` 89 | * See the :ref:`mongo_configuration` configuration section for options. 90 | 91 | .. _orm_broker: 92 | 93 | Django ORM 94 | ---------- 95 | Select this to use Django's database backend as a message broker. 96 | Unless you have configured a dedicated database backend for it, this should probably not be your first choice for a high traffic setup. 97 | However for a medium message rate and scheduled tasks, this is the most convenient guaranteed delivery broker. 98 | 99 | * Delivery receipts 100 | * Supports bulk dequeue 101 | * Needs Django's `Cache framework `__ configured for monitoring 102 | * Can be `configured `__ as its own cache backend. 103 | * Queue editable in Django Admin 104 | * See the :ref:`orm_configuration` configuration on how to set it up. 105 | 106 | 107 | 108 | Custom Broker 109 | ------------- 110 | You can override the :class:`Broker` or any of its existing derived broker types. 111 | 112 | .. code-block:: python 113 | 114 | # example Custom broker.py 115 | from django_q.brokers import Broker 116 | 117 | class CustomBroker(Broker): 118 | def info(self): 119 | return 'My Custom Broker' 120 | 121 | Using the :ref:`broker_class` configuration setting you can then instruct Django Q to use this instead of one of the existing brokers: 122 | 123 | .. code-block:: python 124 | 125 | # example Custom broker class connection 126 | 127 | Q_CLUSTER = { 128 | 'name': 'Custom', 129 | 'workers': 8, 130 | 'timeout': 60, 131 | 'broker_class: 'myapp.broker.CustomBroker' 132 | } 133 | 134 | If you do write a custom broker for one of the many message queueing servers out there we don't support yet, please consider contributing it to the project. 135 | 136 | Reference 137 | --------- 138 | The :class:`Broker` class is used internally to communicate with the different types of brokers. 139 | You can override this class if you want to contribute and support your own broker. 140 | 141 | .. py:class:: Broker 142 | 143 | .. py:method:: async_task(task) 144 | 145 | Sends a task package to the broker queue and returns a tracking id if available. 146 | 147 | .. py:method:: dequeue() 148 | 149 | Gets packages from the broker and returns a list of tuples with a tracking id and the package. 150 | 151 | .. py:method:: acknowledge(id) 152 | 153 | Notifies the broker that the task has been processed. 154 | Only works with brokers that support delivery receipts. 155 | 156 | .. py:method:: fail(id) 157 | 158 | Tells the broker that the message failed to be processed by the cluster. 159 | Only available on brokers that support this. 160 | Currently only occurs when a cluster fails to unpack a task package. 161 | 162 | .. py:method:: delete(id) 163 | 164 | Instructs the broker to delete this message from the queue. 165 | 166 | .. py:method:: purge_queue() 167 | 168 | Empties the current queue of all messages. 169 | 170 | .. py:method:: delete_queue() 171 | 172 | Deletes the current queue from the broker. 173 | 174 | .. py:method:: queue_size() 175 | 176 | Returns the amount of messages in the brokers queue. 177 | 178 | .. py:method:: lock_size() 179 | 180 | Optional method that returns the number of messages currently awaiting acknowledgement. 181 | Only implemented on brokers that support it. 182 | 183 | .. py:method:: ping() 184 | 185 | Returns True if the broker can be reached. 186 | 187 | .. py:method:: info() 188 | 189 | Shows the name and version of the currently configured broker. 190 | 191 | .. py:function:: brokers.get_broker() 192 | 193 | Returns a :class:`Broker` instance based on the current configuration. 194 | -------------------------------------------------------------------------------- /docs/chain.rst: -------------------------------------------------------------------------------- 1 | .. py:currentmodule:: django_q 2 | 3 | Chains 4 | ====== 5 | Sometimes you want to run tasks sequentially. For that you can use the :func:`async_chain` function: 6 | 7 | .. code-block:: python 8 | 9 | # async a chain of tasks 10 | from django_q.tasks import async_chain, result_group 11 | 12 | # the chain must be in the format 13 | # [(func,(args),{kwargs}),(func,(args),{kwargs}),..] 14 | group_id = async_chain([('math.copysign', (1, -1)), 15 | ('math.floor', (1,))]) 16 | 17 | # get group result 18 | result_group(group_id, count=2) 19 | 20 | A slightly more convenient way is to use a :class:`Chain` instance: 21 | 22 | .. code-block:: python 23 | 24 | # Chain async 25 | from django_q.tasks import Chain 26 | 27 | # create a chain that uses the cache backend 28 | chain = Chain(cached=True) 29 | 30 | # add some tasks 31 | chain.append('math.copysign', 1, -1) 32 | chain.append('math.floor', 1) 33 | 34 | # run it 35 | chain.run() 36 | 37 | print(chain.result()) 38 | .. code-block:: python 39 | 40 | [-1.0, 1] 41 | 42 | Reference 43 | --------- 44 | .. py:function:: async_chain(chain, group=None, cached=Conf.CACHED, sync=Conf.SYNC, broker=None) 45 | 46 | Async a chain of tasks. See also the :class:`Chain` class. 47 | 48 | :param list chain: a list of tasks in the format [(func,(args),{kwargs}), (func,(args),{kwargs})] 49 | :param str group: an optional group name. 50 | :param bool cached: run this against the cache backend 51 | :param bool sync: execute this inline instead of asynchronous 52 | 53 | .. py:class:: Chain(chain=None, group=None, cached=Conf.CACHED, sync=Conf.SYNC) 54 | 55 | A sequential chain of tasks. Acts as a convenient wrapper for :func:`async_chain` 56 | You can pass the task chain at construction or you can append individual tasks before running them. 57 | 58 | :param list chain: a list of task in the format [(func,(args),{kwargs}), (func,(args),{kwargs})] 59 | :param str group: an optional group name. 60 | :param bool cached: run this against the cache backend 61 | :param bool sync: execute this inline instead of asynchronous 62 | 63 | 64 | .. py:method:: append(func, *args, **kwargs) 65 | 66 | Append a task to the chain. Takes the same arguments as :func:`async_task` 67 | 68 | :return: the current number of tasks in the chain 69 | :rtype: int 70 | 71 | 72 | .. py:method:: run() 73 | 74 | Start queueing the chain to the worker cluster. 75 | 76 | :return: the chains group id 77 | 78 | 79 | .. py:method:: result(wait=0) 80 | 81 | return the full list of results from the chain when it finishes. Blocks until timeout or result. 82 | 83 | :param int wait: how many milliseconds to wait for a result 84 | :return: an unsorted list of results 85 | 86 | 87 | .. py:method:: fetch(failures=True, wait=0) 88 | 89 | get the task result objects from the chain when it finishes. Blocks until timeout or result. 90 | 91 | :param failures: include failed tasks 92 | :param int wait: how many milliseconds to wait for a result 93 | :return: an unsorted list of task objects 94 | 95 | .. py:method:: current() 96 | 97 | get the index of the currently executing chain element 98 | 99 | :return int: current chain index 100 | 101 | .. py:method:: length() 102 | 103 | get the length of the chain 104 | 105 | :return int: length of the chain 106 | -------------------------------------------------------------------------------- /docs/cluster.rst: -------------------------------------------------------------------------------- 1 | 2 | Cluster 3 | ======= 4 | .. py:currentmodule:: django_q 5 | 6 | Django Q uses Python's multiprocessing module to manage a pool of workers that will handle your tasks. 7 | Start your cluster using Django's ``manage.py`` command:: 8 | 9 | $ python manage.py qcluster 10 | 11 | 12 | You should see the cluster starting :: 13 | 14 | 10:57:40 [Q] INFO Q Cluster-31781 starting. 15 | 10:57:40 [Q] INFO Process-1:1 ready for work at 31784 16 | 10:57:40 [Q] INFO Process-1:2 ready for work at 31785 17 | 10:57:40 [Q] INFO Process-1:3 ready for work at 31786 18 | 10:57:40 [Q] INFO Process-1:4 ready for work at 31787 19 | 10:57:40 [Q] INFO Process-1:5 ready for work at 31788 20 | 10:57:40 [Q] INFO Process-1:6 ready for work at 31789 21 | 10:57:40 [Q] INFO Process-1:7 ready for work at 31790 22 | 10:57:40 [Q] INFO Process-1:8 ready for work at 31791 23 | 10:57:40 [Q] INFO Process-1:9 monitoring at 31792 24 | 10:57:40 [Q] INFO Process-1 guarding cluster at 31783 25 | 10:57:40 [Q] INFO Process-1:10 pushing tasks at 31793 26 | 10:57:40 [Q] INFO Q Cluster-31781 running. 27 | 28 | 29 | Stopping the cluster with ctrl-c or either the ``SIGTERM`` and ``SIGKILL`` signals, will initiate the :ref:`stop_procedure`:: 30 | 31 | 16:44:12 [Q] INFO Q Cluster-31781 stopping. 32 | 16:44:12 [Q] INFO Process-1 stopping cluster processes 33 | 16:44:13 [Q] INFO Process-1:10 stopped pushing tasks 34 | 16:44:13 [Q] INFO Process-1:6 stopped doing work 35 | 16:44:13 [Q] INFO Process-1:4 stopped doing work 36 | 16:44:13 [Q] INFO Process-1:1 stopped doing work 37 | 16:44:13 [Q] INFO Process-1:5 stopped doing work 38 | 16:44:13 [Q] INFO Process-1:7 stopped doing work 39 | 16:44:13 [Q] INFO Process-1:3 stopped doing work 40 | 16:44:13 [Q] INFO Process-1:8 stopped doing work 41 | 16:44:13 [Q] INFO Process-1:2 stopped doing work 42 | 16:44:14 [Q] INFO Process-1:9 stopped monitoring results 43 | 16:44:15 [Q] INFO Q Cluster-31781 has stopped. 44 | 45 | The number of workers, optional timeouts, recycles and cpu_affinity can be controlled via the :doc:`configure` settings. 46 | 47 | Multiple Clusters 48 | ----------------- 49 | You can have multiple clusters on multiple machines, working on the same queue as long as: 50 | 51 | - They connect to the same :doc:`broker`. 52 | - They use the same cluster name. See :doc:`configure` 53 | - They share the same ``SECRET_KEY`` for Django. 54 | 55 | Using a Procfile 56 | ---------------- 57 | If you host on `Heroku `__ or you are using `Honcho `__ you can start the cluster from a :file:`Procfile` with an entry like this:: 58 | 59 | worker: python manage.py qcluster 60 | 61 | Process managers 62 | ---------------- 63 | While you certainly can run a Django Q with a process manager like `Supervisor `__ or `Circus `__ it is not strictly necessary. 64 | The cluster has an internal sentinel that checks the health of all the processes and recycles or reincarnates according to your settings or in case of unexpected crashes. 65 | Because of the multiprocessing daemonic nature of the cluster, it is impossible for a process manager to determine the clusters health and resource usage. 66 | 67 | An example :file:`circus.ini` :: 68 | 69 | [circus] 70 | check_delay = 5 71 | endpoint = tcp://127.0.0.1:5555 72 | pubsub_endpoint = tcp://127.0.0.1:5556 73 | stats_endpoint = tcp://127.0.0.1:5557 74 | 75 | [watcher:django_q] 76 | cmd = python manage.py qcluster 77 | numprocesses = 1 78 | copy_env = True 79 | 80 | Note that we only start one process. It is not a good idea to run multiple instances of the cluster in the same environment since this does nothing to increase performance and in all likelihood will diminish it. 81 | Control your cluster using the ``workers``, ``recycle`` and ``timeout`` settings in your :doc:`configure` 82 | 83 | An example :file:`supervisor.conf` :: 84 | 85 | [program:django-q] 86 | command = python manage.py qcluster 87 | stopasgroup = true 88 | 89 | Supervisor's ``stopasgroup`` will ensure that the single process doesn't leave orphan process on stop or restart. 90 | 91 | Reference 92 | --------- 93 | 94 | .. py:class:: Cluster 95 | 96 | .. py:method:: start 97 | 98 | Spawns a cluster and then returns 99 | 100 | .. py:method:: stop 101 | 102 | Initiates :ref:`stop_procedure` and waits for it to finish. 103 | 104 | .. py:method:: stat 105 | 106 | returns a :class:`Stat` object with the current cluster status. 107 | 108 | .. py:attribute:: pid 109 | 110 | The cluster process id. 111 | 112 | .. py:attribute:: host 113 | 114 | The current hostname 115 | 116 | .. py:attribute:: sentinel 117 | 118 | returns the :class:`multiprocessing.Process` containing the :ref:`sentinel`. 119 | 120 | .. py:attribute:: timeout 121 | 122 | The clusters timeout setting in seconds 123 | 124 | .. py:attribute:: start_event 125 | 126 | A :class:`multiprocessing.Event` indicating if the :ref:`sentinel` has finished starting the cluster 127 | 128 | .. py:attribute:: stop_event 129 | 130 | A :class:`multiprocessing.Event` used to instruct the :ref:`sentinel` to initiate the :ref:`stop_procedure` 131 | 132 | .. py:attribute:: is_starting 133 | 134 | Bool. Indicating that the cluster is busy starting up 135 | 136 | .. py:attribute:: is_running 137 | 138 | Bool. Tells you if the cluster is up and running. 139 | 140 | .. py:attribute:: is_stopping 141 | 142 | Bool. Shows that the stop procedure has been started. 143 | 144 | .. py:attribute:: has_stopped 145 | 146 | Bool. Tells you if the cluster has finished the stop procedure 147 | 148 | 149 | 150 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Django Q documentation build configuration file, created by 5 | # sphinx-quickstart on Fri Jun 26 22:18:36 2015. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | import os 17 | import sys 18 | 19 | import alabaster 20 | 21 | myPath = os.path.dirname(os.path.abspath(__file__)) 22 | sys.path.insert(0, myPath + '/../') 23 | os.environ['DJANGO_SETTINGS_MODULE'] = 'django_q.tests.settings' 24 | nitpick_ignore = [('py:class', 'datetime')] 25 | 26 | # If extensions (or modules to document with autodoc) are in another directory, 27 | # add these directories to sys.path here. If the directory is relative to the 28 | # documentation root, use os.path.abspath to make it absolute, like shown here. 29 | # sys.path.insert(0, os.path.abspath('.')) 30 | 31 | # -- General configuration ------------------------------------------------ 32 | 33 | # If your documentation needs a minimal Sphinx version, state it here. 34 | # needs_sphinx = '1.0' 35 | 36 | # Add any Sphinx extension module names here, as strings. They can be 37 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 38 | # ones. 39 | extensions = [ 40 | 'alabaster', 41 | 'sphinx.ext.todo', 42 | 'sphinx.ext.intersphinx', 43 | # 'sphinx.ext.autodoc' 44 | ] 45 | 46 | intersphinx_mapping = {'python': ('https://docs.python.org/3.8', None), 47 | 'django': ('https://docs.djangoproject.com/en/2.2/', 48 | 'https://docs.djangoproject.com/en/2.2/_objects/')} 49 | 50 | # Add any paths that contain templates here, relative to this directory. 51 | templates_path = ['_templates'] 52 | 53 | # The suffix(es) of source filenames. 54 | # You can specify multiple suffix as a list of string: 55 | # source_suffix = ['.rst', '.md'] 56 | source_suffix = '.rst' 57 | 58 | # The encoding of source files. 59 | # source_encoding = 'utf-8-sig' 60 | 61 | # The master toctree document. 62 | master_doc = 'index' 63 | 64 | # General information about the project. 65 | project = 'Django Q' 66 | copyright = '2015-2021, Ilan Steemers' 67 | author = 'Ilan Steemers' 68 | 69 | # The version info for the project you're documenting, acts as replacement for 70 | # |version| and |release|, also used in various other places throughout the 71 | # built documents. 72 | # 73 | # The short X.Y version. 74 | version = '1.3' 75 | # The full version, including alpha/beta/rc tags. 76 | release = '1.3.9' 77 | 78 | # The language for content autogenerated by Sphinx. Refer to documentation 79 | # for a list of supported languages. 80 | # 81 | # This is also used if you do content translation via gettext catalogs. 82 | # Usually you set "language" from the command line for these cases. 83 | language = 'en' 84 | 85 | # There are two options for replacing |today|: either, you set today to some 86 | # non-false value, then it is used: 87 | # today = '' 88 | # Else, today_fmt is used as the format for a strftime call. 89 | # today_fmt = '%B %d, %Y' 90 | 91 | # List of patterns, relative to source directory, that match files and 92 | # directories to ignore when looking for source files. 93 | exclude_patterns = ['_build'] 94 | 95 | # The reST default role (used for this markup: `text`) to use for all 96 | # documents. 97 | # default_role = None 98 | 99 | # If true, '()' will be appended to :func: etc. cross-reference text. 100 | # add_function_parentheses = True 101 | 102 | # If true, the current module name will be prepended to all description 103 | # unit titles (such as .. function::). 104 | add_module_names = False 105 | 106 | # If true, sectionauthor and moduleauthor directives will be shown in the 107 | # output. They are ignored by default. 108 | # show_authors = False 109 | 110 | # The name of the Pygments (syntax highlighting) style to use. 111 | pygments_style = 'sphinx' 112 | 113 | # A list of ignored prefixes for module index sorting. 114 | # modindex_common_prefix = [] 115 | 116 | # If true, keep warnings as "system message" paragraphs in the built documents. 117 | # keep_warnings = False 118 | 119 | # If true, `todo` and `todoList` produce output, else they produce nothing. 120 | todo_include_todos = True 121 | 122 | 123 | # -- Options for HTML output ---------------------------------------------- 124 | 125 | # The theme to use for HTML and HTML Help pages. See the documentation for 126 | # a list of builtin themes. 127 | html_theme = 'alabaster' 128 | 129 | # Theme options are theme-specific and customize the look and feel of a theme 130 | # further. For a list of options available for each theme, see the 131 | # documentation.None 132 | html_theme_options = { 133 | 'description': "A multiprocessing task queue for Django", 134 | 'logo': 'logo.png', 135 | 'github_user': 'Koed00', 136 | 'github_repo': 'django-q', 137 | 'github_banner': True, 138 | 'travis_button': True, 139 | 'analytics_id': 'UA-64807059-1' 140 | } 141 | html_sidebars = { 142 | '**': [ 143 | 'about.html', 144 | 'navigation.html', 145 | 'relations.html', 146 | 'searchbox.html', 147 | ] 148 | } 149 | # Add any paths that contain custom themes here, relative to this directory. 150 | html_theme_path = [alabaster.get_path()] 151 | 152 | 153 | # The name for this set of Sphinx documents. If None, it defaults to 154 | # " v documentation". 155 | # html_title = None 156 | 157 | # A shorter title for the navigation bar. Default is the same as html_title. 158 | # html_short_title = None 159 | 160 | # The name of an image file (relative to this directory) to place at the top 161 | # of the sidebar. 162 | # html_logo = None 163 | 164 | # The name of an image file (within the static path) to use as favicon of the 165 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 166 | # pixels large. 167 | html_favicon = '_static/favicon.ico' 168 | 169 | # Add any paths that contain custom static files (such as style sheets) here, 170 | # relative to this directory. They are copied after the builtin static files, 171 | # so a file named "default.css" will overwrite the builtin "default.css". 172 | html_static_path = ['_static'] 173 | 174 | # Add any extra paths that contain custom files (such as robots.txt or 175 | # .htaccess) here, relative to this directory. These files are copied 176 | # directly to the root of the documentation. 177 | # html_extra_path = [] 178 | 179 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 180 | # using the given strftime format. 181 | # html_last_updated_fmt = '%b %d, %Y' 182 | 183 | # If true, SmartyPants will be used to convert quotes and dashes to 184 | # typographically correct entities. 185 | # html_use_smartypants = True 186 | 187 | # Custom sidebar templates, maps document names to template names. 188 | # html_sidebars = {} 189 | 190 | # Additional templates that should be rendered to pages, maps page names to 191 | # template names. 192 | # html_additional_pages = {} 193 | 194 | # If false, no module index is generated. 195 | # html_domain_indices = True 196 | 197 | # If false, no index is generated. 198 | # html_use_index = True 199 | 200 | # If true, the index is split into individual pages for each letter. 201 | # html_split_index = False 202 | 203 | # If true, links to the reST sources are added to the pages. 204 | # html_show_sourcelink = True 205 | 206 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 207 | # html_show_sphinx = True 208 | 209 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 210 | # html_show_copyright = True 211 | 212 | # If true, an OpenSearch description file will be output, and all pages will 213 | # contain a tag referring to it. The value of this option must be the 214 | # base URL from which the finished HTML is served. 215 | # html_use_opensearch = '' 216 | 217 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 218 | # html_file_suffix = None 219 | 220 | # Language to be used for generating the HTML full-text search index. 221 | # Sphinx supports the following languages: 222 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' 223 | # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' 224 | # html_search_language = 'en' 225 | 226 | # A dictionary with options for the search language support, empty by default. 227 | # Now only 'ja' uses this config value 228 | # html_search_options = {'type': 'default'} 229 | 230 | # The name of a javascript file (relative to the configuration directory) that 231 | # implements a search results scorer. If empty, the default will be used. 232 | # html_search_scorer = 'scorer.js' 233 | 234 | # Output file base name for HTML help builder. 235 | htmlhelp_basename = 'DjangoQdoc' 236 | 237 | # -- Options for LaTeX output --------------------------------------------- 238 | 239 | latex_elements = { 240 | # The paper size ('letterpaper' or 'a4paper'). 241 | # 'papersize': 'letterpaper', 242 | 243 | # The font size ('10pt', '11pt' or '12pt'). 244 | # 'pointsize': '10pt', 245 | 246 | # Additional stuff for the LaTeX preamble. 247 | # 'preamble': '', 248 | 249 | # Latex figure (float) alignment 250 | # 'figure_align': 'htbp', 251 | } 252 | 253 | # Grouping the document tree into LaTeX files. List of tuples 254 | # (source start file, target name, title, 255 | # author, documentclass [howto, manual, or own class]). 256 | latex_documents = [ 257 | (master_doc, 'DjangoQ.tex', 'Django Q Documentation', 258 | 'Ilan Steemers', 'manual'), 259 | ] 260 | 261 | # The name of an image file (relative to this directory) to place at the top of 262 | # the title page. 263 | latex_logo = '_static/logo_large.png' 264 | 265 | # For "manual" documents, if this is true, then toplevel headings are parts, 266 | # not chapters. 267 | # latex_use_parts = False 268 | 269 | # If true, show page references after internal links. 270 | # latex_show_pagerefs = False 271 | 272 | # If true, show URL addresses after external links. 273 | # latex_show_urls = False 274 | 275 | # Documents to append as an appendix to all manuals. 276 | # latex_appendices = [] 277 | 278 | # If false, no module index is generated. 279 | # latex_domain_indices = True 280 | 281 | 282 | # -- Options for manual page output --------------------------------------- 283 | 284 | # One entry per manual page. List of tuples 285 | # (source start file, name, description, authors, manual section). 286 | man_pages = [ 287 | (master_doc, 'djangoq', 'Django Q Documentation', 288 | [author], 1) 289 | ] 290 | 291 | 292 | # If true, show URL addresses after external links. 293 | # man_show_urls = False 294 | 295 | 296 | # -- Options for Texinfo output ------------------------------------------- 297 | 298 | # Grouping the document tree into Texinfo files. List of tuples 299 | # (source start file, target name, title, author, 300 | # dir menu entry, description, category) 301 | texinfo_documents = [ 302 | (master_doc, 'DjangoQ', 'Django Q Documentation', 303 | author, 'DjangoQ', 'A multiprocessing distributed task queue for Django.', 304 | 'Miscellaneous'), 305 | ] 306 | 307 | # Documents to append as an appendix to all manuals. 308 | # texinfo_appendices = [] 309 | 310 | # If false, no module index is generated. 311 | texinfo_domain_indices = True 312 | 313 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 314 | # texinfo_show_urls = 'footnote' 315 | 316 | # If true, do not generate a @detailmenu in the "Top" node's menu. 317 | # texinfo_no_detailmenu = False 318 | -------------------------------------------------------------------------------- /docs/errors.rst: -------------------------------------------------------------------------------- 1 | Errors 2 | ------ 3 | .. py:currentmodule:: django_q 4 | 5 | Django Q uses a pluggable error reporter system based upon python `extras `__, allowing anyone to develop plugins for their favorite error reporting and monitoring integration. Currently implemented examples include `Rollbar `__ and `Sentry `__. 6 | 7 | Error reporting plugins register a class which implements a ``report`` method, which is invoked when a Django Q cluster encounters an error, passing information to the particular service. Error reporters must be :ref:`configured` via the ``Q_CLUSTER`` dictionary in your :file:`settings.py`. These settings are passed as kwargs upon initiation of the Error Reporter. Therefore, in order to implement a new plugin, a package must expose a class which will be instantiated with the necessary information via the ``Q_CLUSTER`` settings and implements a single ``report`` method. 8 | 9 | For example implementations, see `django-q-rollbar `__ and `django-q-sentry `__ 10 | -------------------------------------------------------------------------------- /docs/group.rst: -------------------------------------------------------------------------------- 1 | .. py:currentmodule:: django_q 2 | 3 | Groups 4 | ====== 5 | You can group together results by passing :func:`async_task` the optional ``group`` keyword: 6 | 7 | .. code-block:: python 8 | 9 | # result group example 10 | from django_q.tasks import async_task, result_group 11 | 12 | for i in range(4): 13 | async_task('math.modf', i, group='modf') 14 | 15 | # wait until the group has 4 results 16 | result = result_group('modf', count=4) 17 | print(result) 18 | 19 | .. code-block:: python 20 | 21 | [(0.0, 0.0), (0.0, 1.0), (0.0, 2.0), (0.0, 3.0)] 22 | 23 | Note that this particular example can be achieved much faster with :doc:`iterable` 24 | 25 | Take care to not limit your results database too much and call :func:`delete_group` before each run, unless you want your results to keep adding up. 26 | Instead of :func:`result_group` you can also use :func:`fetch_group` to return a queryset of :class:`Task` objects.: 27 | 28 | .. code-block:: python 29 | 30 | # fetch group example 31 | from django_q.tasks import fetch_group, count_group, result_group 32 | 33 | # count the number of failures 34 | failure_count = count_group('modf', failures=True) 35 | 36 | # only use the successes 37 | results = fetch_group('modf') 38 | if failure_count: 39 | results = results.exclude(success=False) 40 | results = [task.result for task in successes] 41 | 42 | # this is the same as 43 | results = fetch_group('modf', failures=False) 44 | results = [task.result for task in successes] 45 | 46 | # and the same as 47 | results = result_group('modf') # filters failures by default 48 | 49 | 50 | Getting results by using :func:`result_group` is of course much faster than using :func:`fetch_group`, but it doesn't offer the benefits of Django's queryset functions. 51 | 52 | .. note:: 53 | 54 | Calling ``Queryset.values`` for the result on Django 1.7 or lower will return a list of encoded results. 55 | If you can't upgrade to Django 1.8, use list comprehension or an iterator to return decoded results. 56 | 57 | You can also access group functions from a task result instance: 58 | 59 | .. code-block:: python 60 | 61 | from django_q.tasks import fetch 62 | 63 | task = fetch('winter-speaker-alpha-ceiling') 64 | if task.group_count() > 100: 65 | print(task.group_result()) 66 | task.group_delete() 67 | print('Deleted group {}'.format(task.group)) 68 | 69 | or call them directly on :class:`AsyncTask` object: 70 | 71 | .. code-block:: python 72 | 73 | from django_q.tasks import AsyncTask 74 | 75 | # add a task to the math group and run it cached 76 | a = AsyncTask('math.floor', 2.5, group='math', cached=True) 77 | 78 | # wait until this tasks group has 10 results 79 | result = a.result_group(count=10) 80 | 81 | Reference 82 | --------- 83 | .. py:function:: result_group(group_id, failures=False, wait=0, count=None, cached=False) 84 | 85 | Returns the results of a task group 86 | 87 | :param str group_id: the group identifier 88 | :param bool failures: set this to ``True`` to include failed results 89 | :param int wait: optional milliseconds to wait for a result or count. -1 for indefinite 90 | :param int count: block until there are this many results in the group 91 | :param bool cached: run this against the cache backend 92 | :returns: a list of results 93 | :rtype: list 94 | 95 | .. py:function:: fetch_group(group_id, failures=True, wait=0, count=None, cached=False) 96 | 97 | Returns a list of tasks in a group 98 | 99 | :param str group_id: the group identifier 100 | :param bool failures: set this to ``False`` to exclude failed tasks 101 | :param int wait: optional milliseconds to wait for a task or count. -1 for indefinite 102 | :param int count: block until there are this many tasks in the group 103 | :param bool cached: run this against the cache backend. 104 | :returns: a list of :class:`Task` 105 | :rtype: list 106 | 107 | .. py:function:: count_group(group_id, failures=False, cached=False) 108 | 109 | Counts the number of task results in a group. 110 | 111 | :param str group_id: the group identifier 112 | :param bool failures: counts the number of failures if ``True`` 113 | :param bool cached: run this against the cache backend. 114 | :returns: the number of tasks or failures in a group 115 | :rtype: int 116 | 117 | .. py:function:: delete_group(group_id, tasks=False, cached=False) 118 | 119 | Deletes a group label from the database. 120 | 121 | :param str group_id: the group identifier 122 | :param bool tasks: also deletes the associated tasks if ``True`` 123 | :param bool cached: run this against the cache backend. 124 | :returns: the numbers of tasks affected 125 | :rtype: int 126 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Django Q documentation master file, created by 2 | sphinx-quickstart on Fri Jun 26 22:18:36 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Django Q 7 | =================== 8 | Django Q is a native Django task queue, scheduler and worker application using Python multiprocessing. 9 | 10 | 11 | Features 12 | -------- 13 | 14 | - Multiprocessing worker pools 15 | - Asynchronous tasks 16 | - Scheduled, cron and repeated tasks 17 | - Signed and compressed packages 18 | - Failure and success database or cache 19 | - Result hooks, groups and chains 20 | - Django Admin integration 21 | - PaaS compatible with multiple instances 22 | - Multi cluster monitor 23 | - Redis, Disque, IronMQ, SQS, MongoDB or ORM 24 | - Rollbar and Sentry support 25 | 26 | 27 | Django Q is tested with: Python 3.7 and 3.8, Django 2.2.x and 3.2.x 28 | 29 | Currently available in English, German and French. 30 | 31 | Contents: 32 | 33 | .. toctree:: 34 | :maxdepth: 2 35 | 36 | Installation 37 | Configuration 38 | Brokers 39 | Tasks 40 | Groups 41 | Iterable 42 | Chains 43 | Schedules 44 | Cluster 45 | Monitor 46 | Admin 47 | Errors 48 | Signals 49 | Architecture 50 | Examples 51 | 52 | * :ref:`genindex` 53 | * :ref:`search` 54 | 55 | -------------------------------------------------------------------------------- /docs/install.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | .. py:currentmodule:: django_q 4 | 5 | - Install the latest version with pip:: 6 | 7 | $ pip install django-q 8 | 9 | 10 | - Add :mod:`django_q` to ``INSTALLED_APPS`` in your projects :file:`settings.py`:: 11 | 12 | INSTALLED_APPS = ( 13 | # other apps 14 | 'django_q', 15 | ) 16 | 17 | - Run Django migrations to create the database tables:: 18 | 19 | $ python manage.py migrate 20 | 21 | - Choose a message :doc:`broker` , configure it and install the appropriate client library. 22 | 23 | - Run Django Q cluster in order to handle tasks async:: 24 | 25 | $ python manage.py qcluster 26 | 27 | Requirements 28 | ------------ 29 | 30 | Django Q is tested for Python 3.7, 3.8 and 3.9 31 | 32 | - `Django `__ 33 | 34 | Django Q aims to use as much of Django's standard offerings as possible 35 | The code is tested against Django versions `2.2.x` and `3.2.x`. 36 | Please note that Django versions below 2.0 do not support Python 3.7 37 | 38 | - `Django-picklefield `__ 39 | 40 | Used to store args, kwargs and result objects in the database. 41 | 42 | - `Arrow `__ 43 | 44 | The scheduler uses Chris Smith's wonderful project to determine correct dates in the future. 45 | 46 | - `Blessed `__ 47 | 48 | This feature-filled fork of Erik Rose's blessings project provides the terminal layout of the monitor. 49 | 50 | 51 | Optional 52 | ~~~~~~~~ 53 | - `Redis-py `__ client by Andy McCurdy is used to interface with both the Redis and Disque brokers:: 54 | 55 | $ pip install redis 56 | 57 | .. _psutil_package: 58 | 59 | - `Psutil `__ python system and process utilities module by Giampaolo Rodola', is an optional requirement and adds cpu affinity settings to the cluster:: 60 | 61 | $ pip install psutil 62 | 63 | - `Hiredis `__ parser. This C library maintained by the core Redis team is faster than the standard PythonParser during high loads:: 64 | 65 | $ pip install hiredis 66 | 67 | - `Boto3 `__ is used for the Amazon SQS broker in favor of the now deprecating boto library:: 68 | 69 | $ pip install boto3 70 | 71 | - `Iron-mq `_ is the official python binding for the IronMQ broker:: 72 | 73 | $ pip install iron-mq 74 | 75 | - `Pymongo `__ is needed if you want to use MongoDB as a message broker:: 76 | 77 | $ pip install pymongo 78 | 79 | - `Redis `__ server is the default broker for Django Q. It provides the best performance and does not require Django's cache framework for monitoring. 80 | 81 | - `Disque `__ server is based on Redis by the same author, but focuses on reliable queues. Currently in Alpha, but highly recommended. You can either build it from source or use it on Heroku through the `Tynd `__ beta. 82 | 83 | - `MongoDB `__ is a highly scalable NoSQL database which makes for a very fast and reliably persistent at-least-once message broker. Usually available on most PaaS providers. 84 | 85 | - `Pyrollbar `__ is an error notifier for `Rollbar `__ which lets you manage your worker errors in one place. Needs a `Rollbar `__ account and access key:: 86 | 87 | $ pip install rollbar 88 | 89 | 90 | 91 | 92 | .. _croniter_package: 93 | 94 | - `Croniter `__ is an optional package that is used to parse cron expressions for the scheduler:: 95 | 96 | $ pip install croniter 97 | 98 | 99 | 100 | 101 | Add-ons 102 | ------- 103 | - `django-q-rollbar `__ is a Rollbar error reporter:: 104 | 105 | $ pip install django-q[rollbar] 106 | 107 | - `django-q-sentry `__ is a Sentry error reporter:: 108 | 109 | $ pip install django-q[sentry] 110 | 111 | - `django-q-email `__ is a compatible Django email backend that will automatically async queue your emails. 112 | 113 | Compatibility 114 | ------------- 115 | Django Q is still a young project. If you do find any incompatibilities please submit an issue on `github `__. 116 | 117 | OS X 118 | ~~~~ 119 | Running Django Q on OS X should work fine, except for the following known issues: 120 | 121 | * :meth:`multiprocessing.Queue.qsize()` is not supported. This leads to the monitor not reporting the internal queue size of clusters running under OS X. 122 | * CPU count through :func:`multiprocessing.cpu_count()` does not work. Installing :ref:`psutil` provides Django Q with an alternative way of determining the number of CPU's on your system 123 | * CPU affinity is provided by :ref:`psutil` which at this time does not support this feature on OSX. The code however is aware of this and will fake the CPU affinity assignment in the logs without actually assigning it. This way you can still develop with this setting. 124 | 125 | Windows 126 | ~~~~~~~ 127 | The cluster and worker multiprocessing code depend on the OS's ability to fork, unfortunately forking is not supported under windows. 128 | You should however be able to develop and test without the cluster by setting the ``sync`` option to ``True`` in the configuration. 129 | This will run all ``async`` calls inline through a single cluster worker without the need for forking. 130 | Other known issues are: 131 | 132 | * :func:`os.getppid()` is only supported under windows since Python 3.2. If you use an older version you need to install :ref:`psutil` as an alternative. 133 | * CPU count through :func:`multiprocessing.cpu_count()` occasionally fails on servers. Installing :ref:`psutil` provides Django Q with an alternative way of determining the number of CPU's on your system 134 | * The monitor and info commands rely on the Curses package which is not officially supported on windows. There are however some ports available like `this one `__ by Christoph Gohlke. 135 | 136 | Python 137 | ~~~~~~ 138 | The code is always tested against the latest version Python 3 and we try to stay compatible with the last two versions of each. 139 | Current tests are performed with 3.7 and 3.8 140 | If you do encounter any regressions with earlier versions, please submit an issue on `github `__ 141 | 142 | .. note:: 143 | 144 | Django releases before 1.11 are not supported on Python 3.6 145 | Django releases before 2.0 are not supported on Python 3.7 146 | 147 | Open-source packages 148 | ~~~~~~~~~~~~~~~~~~~~ 149 | Django Q is always tested with the latest versions of the required and optional Python packages. We try to keep the dependencies as up to date as possible. 150 | You can reference the `requirements `__ file to determine which versions are currently being used for tests and development. 151 | 152 | Django 153 | ~~~~~~ 154 | We strive to be compatible with last two major version of Django. 155 | At the moment this means we support the 2.2.x and 3.1.x releases. 156 | 157 | Since we are now no longer supporting Python 2, we can also not support older versions of Django that do not support Python >= 3.6 158 | For this you can always use older releases, but they are no longer maintained. 159 | 160 | 161 | 162 | -------------------------------------------------------------------------------- /docs/iterable.rst: -------------------------------------------------------------------------------- 1 | .. py:currentmodule:: django_q 2 | 3 | Iterable 4 | ======== 5 | If you have an iterable object with arguments for a function, you can use :func:`async_iter` to async them with a single command:: 6 | 7 | # Async Iterable example 8 | from django_q.tasks import async_iter, result 9 | 10 | # set up a list of arguments for math.floor 11 | iter = [i for i in range(100)] 12 | 13 | # async_task iter them 14 | id=async_iter('math.floor',iter) 15 | 16 | # wait for the collated result for 1 second 17 | result_list = result(id, wait=1000) 18 | 19 | This will individually queue 100 tasks to the worker cluster, which will save their results in the cache backend for speed. 20 | Once all the 100 results are in the cache, they are collated into a list and saved as a single result in the database. The cache results are then cleared. 21 | 22 | You can also use an :class:`Iter` instance which can sometimes be more convenient: 23 | 24 | .. code-block:: python 25 | 26 | from django_q.tasks import Iter 27 | 28 | i = Iter('math.copysign') 29 | 30 | # add some arguments 31 | i.append(1, -1) 32 | i.append(2, -1) 33 | i.append(3, -1) 34 | 35 | # run it 36 | i.run() 37 | 38 | # get the results 39 | print(i.result()) 40 | 41 | .. code-block:: python 42 | 43 | [-1.0, -2.0, -3.0] 44 | 45 | Reference 46 | --------- 47 | 48 | .. py:function:: async_iter(func, args_iter,**kwargs) 49 | 50 | Runs iterable arguments against the cache backend and returns a single collated result. 51 | Accepts the same options as :func:`async_task` except ``hook``. See also the :class:`Iter` class. 52 | 53 | :param object func: The task function to execute 54 | :param args: An iterable containing arguments for the task function 55 | :param dict kwargs: Keyword arguments for the task function. Ignores ``hook``. 56 | :returns: The uuid of the task 57 | :rtype: str 58 | 59 | .. py:class:: Iter(func=None, args=None, kwargs=None, cached=Conf.CACHED, sync=Conf.SYNC, broker=None) 60 | 61 | An async task with iterable arguments. Serves as a convenient wrapper for :func:`async_iter` 62 | You can pass the iterable arguments at construction or you can append individual argument tuples. 63 | 64 | :param func: the function to execute 65 | :param args: an iterable of arguments. 66 | :param kwargs: the keyword arguments 67 | :param bool cached: run this against the cache backend 68 | :param bool sync: execute this inline instead of asynchronous 69 | :param broker: optional broker instance 70 | 71 | 72 | .. py:method:: append(*args) 73 | 74 | Append arguments to the iter set. Returns the current set count. 75 | 76 | :param args: the arguments for a single execution 77 | :return: the current set count 78 | :rtype: int 79 | 80 | 81 | .. py:method:: run() 82 | 83 | Start queueing the tasks to the worker cluster. 84 | 85 | :return: the task result id 86 | 87 | 88 | .. py:method:: result(wait=0) 89 | 90 | return the full list of results. 91 | 92 | :param int wait: how many milliseconds to wait for a result 93 | :return: an unsorted list of results 94 | 95 | 96 | .. py:method:: fetch(wait=0) 97 | 98 | get the task result objects. 99 | 100 | :param int wait: how many milliseconds to wait for a result 101 | :return: an unsorted list of task objects 102 | 103 | 104 | .. py:method:: length() 105 | 106 | get the length of the arguments list 107 | 108 | :return int: length of the argument list 109 | 110 | -------------------------------------------------------------------------------- /docs/monitor.rst: -------------------------------------------------------------------------------- 1 | Monitor 2 | ======= 3 | .. py:currentmodule::django_q.monitor 4 | 5 | The cluster monitor shows live information about all the Q clusters connected to your project. 6 | 7 | Start the monitor with Django's `manage.py` command:: 8 | 9 | $ python manage.py qmonitor 10 | 11 | 12 | .. image:: _static/monitor.png 13 | 14 | For all broker types except the Redis broker, the monitor utilizes Django's cache framework to store statistics of running clusters. 15 | This can be any type of cache backend as long as it can be shared among Django instances. For this reason, the local memory backend will not work. 16 | 17 | 18 | Legend 19 | ------ 20 | 21 | Host 22 | ~~~~ 23 | 24 | Shows the hostname of the server this cluster is running on. 25 | 26 | Id 27 | ~~ 28 | 29 | The cluster Id. Same as the cluster process ID or pid. 30 | 31 | State 32 | ~~~~~ 33 | 34 | Current state of the cluster: 35 | 36 | - **Starting** The cluster is spawning workers and getting ready. 37 | - **Idle** Everything is ok, but there are no tasks to process. 38 | - **Working** Processing tasks like a good cluster should. 39 | - **Stopping** The cluster does not take on any new tasks and is finishing. 40 | - **Stopped** All tasks have been processed and the cluster is shutting down. 41 | 42 | Pool 43 | ~~~~ 44 | 45 | The current number of workers in the cluster pool. 46 | 47 | TQ 48 | ~~ 49 | 50 | **Task Queue** counts the number of tasks in the queue [#f1]_ 51 | 52 | If this keeps rising it means you are taking on more tasks than your cluster can handle. 53 | You can limit this by settings the :ref:`queue_limit` in your cluster configuration, after which it will turn green when that limit has been reached. 54 | If your task queue is always hitting its limit and your running out of resources, it may be time to add another cluster. 55 | 56 | RQ 57 | ~~ 58 | 59 | **Result Queue** shows the number of results in the queue. [#f1]_ 60 | 61 | Since results are only saved by a single process which has to access the database. 62 | It's normal for the result queue to take slightly longer to clear than the task queue. 63 | 64 | RC 65 | ~~ 66 | 67 | **Reincarnations** shows the amount of processes that have been reincarnated after a recycle, sudden death or timeout. 68 | If this number is unusually high, you are either suffering from repeated task errors or severe timeouts and you should check your logs for details. 69 | 70 | Up 71 | ~~ 72 | 73 | **Uptime** the amount of time that has passed since the cluster was started. 74 | 75 | 76 | .. centered:: Press `q` to quit the monitor and return to your terminal. 77 | 78 | Info 79 | ---- 80 | 81 | If you just want to see a one-off summary of your cluster stats you can use the `qinfo` management command:: 82 | 83 | $ python manage.py qinfo 84 | 85 | 86 | .. image:: _static/info.png 87 | 88 | All stats are summed over all available clusters. 89 | 90 | Task rate is calculated over the last 24 hours and will show the number of tasks per second, minute, hour or day depending on the amount. 91 | Average execution time (`Avg time`) is calculated in seconds over the last 24 hours. 92 | 93 | Since some of these numbers are based on what is available in your tasks database, limiting or disabling the result backend will skew them. 94 | 95 | Like with the monitor, these statistics come from a Redis server or Django's cache framework. So make sure you have either one configured. 96 | 97 | To print out the current configuration run:: 98 | 99 | $ python manage.py qinfo --config 100 | 101 | 102 | Status 103 | ------ 104 | 105 | You can check the status of your clusters straight from your code with the :class:`Stat` class: 106 | 107 | .. code:: python 108 | 109 | from django_q.monitor import Stat 110 | 111 | for stat in Stat.get_all(): 112 | print(stat.cluster_id, stat.status) 113 | 114 | # or if you know the cluster id 115 | cluster_id = 1234 116 | stat = Stat.get(cluster_id) 117 | print(stat.status, stat.workers) 118 | 119 | Reference 120 | --------- 121 | 122 | .. py:class:: Stat 123 | 124 | Cluster status object. 125 | 126 | .. py:attribute:: cluster_id 127 | 128 | Id of this cluster. Corresponds with the process id. 129 | 130 | .. py:attribute:: tob 131 | 132 | Time Of Birth 133 | 134 | .. py:method:: uptime 135 | 136 | Shows the number of seconds passed since the time of birth 137 | 138 | .. py:attribute:: reincarnations 139 | 140 | The number of times the sentinel had to start a new worker process. 141 | 142 | .. py:attribute:: status 143 | 144 | String representing the current cluster status. 145 | 146 | .. py:attribute:: task_q_size 147 | 148 | The number of tasks currently in the task queue. [#f1]_ 149 | 150 | .. py:attribute:: done_q_size 151 | 152 | The number of tasks currently in the result queue. [#f1]_ 153 | 154 | .. py:attribute:: pusher 155 | 156 | The pid of the pusher process 157 | 158 | .. py:attribute:: monitor 159 | 160 | The pid of the monitor process 161 | 162 | .. py:attribute:: sentinel 163 | 164 | The pid of the sentinel process 165 | 166 | .. py:attribute:: workers 167 | 168 | A list of process ids of the workers currently in the cluster pool. 169 | 170 | .. py:method:: empty_queues 171 | 172 | Returns true or false depending on any tasks still present in the task or result queue. 173 | 174 | .. py:classmethod:: get(cluster_id, broker=None) 175 | 176 | Gets the current :class:`Stat` for the cluster id. Takes an optional broker connection. 177 | 178 | .. py:classmethod:: get_all(broker=None) 179 | 180 | Returns a list of :class:`Stat` objects for all active clusters. Takes an optional broker connection. 181 | 182 | .. rubric:: Footnotes 183 | 184 | .. [#f1] Uses :meth:`multiprocessing.Queue.qsize()` which is not implemented on OS X and always returns 0. 185 | -------------------------------------------------------------------------------- /docs/schedules.rst: -------------------------------------------------------------------------------- 1 | Schedules 2 | ========= 3 | .. py:currentmodule:: django_q 4 | 5 | Schedule 6 | -------- 7 | 8 | Schedules are regular Django models. 9 | You can manage them through the :ref:`admin_page` or directly from your code with the :func:`schedule` function or the :class:`Schedule` model: 10 | 11 | .. code:: python 12 | 13 | # Use the schedule wrapper 14 | from django_q.tasks import schedule 15 | 16 | schedule('math.copysign', 17 | 2, -2, 18 | hook='hooks.print_result', 19 | schedule_type='D') 20 | 21 | # Or create the object directly 22 | from django_q.models import Schedule 23 | 24 | Schedule.objects.create(func='math.copysign', 25 | hook='hooks.print_result', 26 | args='2,-2', 27 | schedule_type=Schedule.DAILY 28 | ) 29 | 30 | # In case you want to use q_options 31 | # Specify the broker by using the property broker_name in q_options 32 | schedule('math.sqrt', 33 | 9, 34 | hook='hooks.print_result', 35 | q_options={'timeout': 30, 'broker_name': 'broker_1'}, 36 | schedule_type=Schedule.HOURLY) 37 | 38 | # Run a schedule every 5 minutes, starting at 6 today 39 | # for 2 hours 40 | import arrow 41 | 42 | schedule('math.hypot', 43 | 3, 4, 44 | schedule_type=Schedule.MINUTES, 45 | minutes=5, 46 | repeats=24, 47 | next_run=arrow.utcnow().replace(hour=18, minute=0)) 48 | 49 | # Use a cron expression 50 | schedule('math.hypot', 51 | 3, 4, 52 | schedule_type=Schedule.CRON, 53 | cron = '0 22 * * 1-5') 54 | 55 | 56 | # Restrain a schedule to a specific cluster 57 | schedule('math.hypot', 58 | 3, 4, 59 | schedule_type=Schedule.DAILY, 60 | cluster='my_cluster') 61 | 62 | 63 | Missed schedules 64 | ---------------- 65 | If your cluster has not run for a while, the default behavior for the scheduler is to play catch up with the schedules and keep executing them until they are up to date. 66 | In practical terms this means the scheduler will execute tasks in the past, reschedule them in the past and immediately execute them again until the schedule is set in the future. 67 | This default behavior is intended to facilitate schedules that poll or gather statistics, but might not be suitable to your particular situation. 68 | You can change this by setting the :ref:`catch_up` configuration setting to ``False``. 69 | The scheduler will then skip execution of scheduled events in the past. 70 | Instead those tasks will run once when the cluster starts again and the scheduler will find the next available slot in the future according to original schedule parameters. 71 | 72 | Management Commands 73 | ------------------- 74 | 75 | If you want to schedule regular Django management commands, you can use the :mod:`django.core.management` module to call them directly: 76 | 77 | .. code-block:: python 78 | 79 | from django_q.tasks import schedule 80 | 81 | # run `manage.py clearsession` every hour 82 | schedule('django.core.management.call_command', 83 | 'clearsessions', 84 | schedule_type='H') 85 | 86 | Or you can make a wrapper function which you can then schedule in Django Q: 87 | 88 | .. code-block:: python 89 | 90 | # tasks.py 91 | from django.core import management 92 | 93 | # wrapping `manage.py clearsessions` 94 | def clear_sessions_command(): 95 | return management.call_command('clearsessions') 96 | 97 | # now you can schedule it to run every hour 98 | from django_q.tasks import schedule 99 | 100 | schedule('tasks.clear_sessions_command', schedule_type='H') 101 | 102 | 103 | Check out the :ref:`shell` examples if you want to schedule regular shell commands 104 | 105 | .. note:: 106 | 107 | Schedules needs the optional :ref:`Croniter` package installed to parse cron expressions. 108 | 109 | Reference 110 | --------- 111 | 112 | .. py:function:: schedule(func, *args, name=None, hook=None, schedule_type='O', minutes=None, repeats=-1, next_run=now() , q_options=None, **kwargs) 113 | 114 | Creates a schedule 115 | 116 | :param str func: the function to schedule. Dotted strings only. 117 | :param args: arguments for the scheduled function. 118 | :param str name: An optional name for your schedule. 119 | :param str hook: optional result hook function. Dotted strings only. 120 | :param str schedule_type: (O)nce, M(I)nutes, (H)ourly, (D)aily, (W)eekly, (M)onthly, (Q)uarterly, (Y)early or (C)ron :attr:`Schedule.TYPE` 121 | :param int minutes: Number of minutes for the Minutes type. 122 | :param str cron: Cron expression for the Cron type. 123 | :param int repeats: Number of times to repeat schedule. -1=Always, 0=Never, n =n. 124 | :param datetime next_run: Next or first scheduled execution datetime. 125 | :param str cluster: optional cluster name. Task will be executed only on a cluster with a matching :ref:`name`. 126 | :param dict q_options: options passed to async_task for this schedule 127 | :param kwargs: optional keyword arguments for the scheduled function. 128 | 129 | .. note:: 130 | 131 | q_options does not accept the 'broker' key with a broker instance but accepts a 'broker_name' key instead. This can be used to specify the broker connection name to assign the task. If a broker with the specified name does not exist or is not running at the moment of placing the task in queue it fallbacks to the random broker/queue that handled the schedule. 132 | 133 | 134 | .. class:: Schedule 135 | 136 | A database model for task schedules. 137 | 138 | .. py:attribute:: id 139 | 140 | Primary key 141 | 142 | .. py:attribute:: name 143 | 144 | A name for your schedule. Tasks created by this schedule will assume this or the primary key as their group id. 145 | 146 | .. py:attribute:: func 147 | 148 | The function to be scheduled 149 | 150 | .. py:attribute:: hook 151 | 152 | Optional hook function to be called after execution. 153 | 154 | .. py:attribute:: args 155 | 156 | Positional arguments for the function. 157 | 158 | .. py:attribute:: kwargs 159 | 160 | Keyword arguments for the function 161 | 162 | .. py:attribute:: schedule_type 163 | 164 | The type of schedule. Follows :attr:`Schedule.TYPE` 165 | 166 | .. py:attribute:: TYPE 167 | 168 | :attr:`ONCE`, :attr:`MINUTES`, :attr:`HOURLY`, :attr:`DAILY`, :attr:`WEEKLY`, :attr:`MONTHLY`, :attr:`QUARTERLY`, :attr:`YEARLY`, :attr:`CRON` 169 | 170 | 171 | .. py:attribute:: minutes 172 | 173 | The number of minutes the :attr:`MINUTES` schedule should use. 174 | Is ignored for other schedule types. 175 | 176 | .. py:attribute:: cron 177 | 178 | A cron string describing the schedule. You need the optional `croniter` package installed for this. 179 | 180 | .. py:attribute:: repeats 181 | 182 | Number of times to repeat the schedule. -1=Always, 0=Never, n =n. 183 | When set to -1, this will keep counting down. 184 | 185 | .. py:attribute:: cluster 186 | 187 | Task will be executed only on a cluster with a matching :ref:`name`. 188 | 189 | .. py:attribute:: next_run 190 | 191 | Datetime of the next scheduled execution. 192 | 193 | .. py:attribute:: task 194 | 195 | Id of the last task generated by this schedule. 196 | 197 | .. py:method:: last_run() 198 | 199 | Admin link to the last executed task. 200 | 201 | .. py:method:: success() 202 | 203 | Returns the success status of the last executed task. 204 | 205 | .. py:attribute:: ONCE 206 | 207 | `'O'` the schedule will only run once. 208 | If it has a negative :attr:`repeats` it will be deleted after it has run. 209 | If you want to keep the result, set :attr:`repeats` to a positive number. 210 | 211 | .. py:attribute:: MINUTES 212 | 213 | `'I'` will run every :attr:`minutes` after its first run. 214 | 215 | .. py:attribute:: HOURLY 216 | 217 | `'H'` the scheduled task will run every hour after its first run. 218 | 219 | .. py:attribute:: DAILY 220 | 221 | `'D'` the scheduled task will run every day at the time of its first run. 222 | 223 | .. py:attribute:: WEEKLY 224 | 225 | `'W'` the task will run every week on they day and time of the first run. 226 | 227 | .. py:attribute:: MONTHLY 228 | 229 | `'M'` the tasks runs every month on they day and time of the last run. 230 | 231 | .. note:: 232 | 233 | Months are tricky. If you schedule something on the 31st of the month and the next month has only 30 days or less, the task will run on the last day of the next month. 234 | It will however continue to run on that day, e.g. the 28th, in subsequent months. 235 | 236 | .. py:attribute:: QUARTERLY 237 | 238 | `'Q'` this task runs once every 3 months on the day and time of the last run. 239 | 240 | .. py:attribute:: YEARLY 241 | 242 | `'Y'` only runs once a year. The same caution as with months apply; 243 | If you set this to february 29th, it will run on february 28th in the following years. 244 | 245 | .. py:attribute:: CRON 246 | 247 | `'C'` uses the optional `croniter` package to determine a schedule based on a cron expression. 248 | 249 | 250 | -------------------------------------------------------------------------------- /docs/signals.rst: -------------------------------------------------------------------------------- 1 | Signals 2 | ======= 3 | .. py:currentmodule:: django_q 4 | 5 | Available signals 6 | ----------------- 7 | 8 | Django Q emits the following signals during its lifecycle. 9 | 10 | Before enqueuing a task 11 | """"""""""""""""""""""" 12 | 13 | The ``django_q.signals.pre_enqueue`` signal is emitted before a task is 14 | enqueued. The task dictionary is given as the ``task`` argument. 15 | 16 | Before executing a task 17 | """"""""""""""""""""""" 18 | 19 | The ``django_q.signals.pre_execute`` signal is emitted before a task is 20 | executed by a worker. This signal provides two arguments: 21 | 22 | - ``task``: the task dictionary. 23 | - ``func``: the actual function that will be executed. If the task was created 24 | with a function path, this argument will be the callable function 25 | nonetheless. 26 | 27 | After executing a task 28 | """""""""""""""""""""" 29 | The ``django_q.signals.post_execute`` signal is emitted after a task is 30 | executed by a worker and processed by the monitor. It included the ``task`` dictionary with the result. 31 | 32 | 33 | Subscribing to a signal 34 | ----------------------- 35 | 36 | Connecting to a Django Q signal is done the same as any other Django 37 | signal:: 38 | 39 | from django.dispatch import receiver 40 | from django_q.signals import pre_enqueue, pre_execute, post_execute 41 | 42 | @receiver(pre_enqueue) 43 | def my_pre_enqueue_callback(sender, task, **kwargs): 44 | print(f"Task {task['name']} will be queued") 45 | 46 | @receiver(pre_execute) 47 | def my_pre_execute_callback(sender, func, task, **kwargs): 48 | print(f"Task {task['name']} will be executed by calling {func}") 49 | 50 | @receiver(post_execute) 51 | def my_post_execute_callback(sender, task, **kwargs): 52 | print(f"Task {task['name']} was executed with result {task['result']}") 53 | 54 | 55 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "django-q" 3 | version = "1.3.9" 4 | description = "A multiprocessing distributed task queue for Django" 5 | authors = ["Ilan Steemers "] 6 | maintainers = ["Ilan Steemers "] 7 | license = "MIT" 8 | readme = 'README.rst' 9 | 10 | repository = "https://github.com/koed00/django-q" 11 | homepage = "https://django-q.readthedocs.org" 12 | documentation = "https://django-q.readthedocs.org" 13 | 14 | keywords = ["django", "distributed", "multiprocessing", "queue", "scheduler"] 15 | 16 | classifiers = [ 17 | 'Development Status :: 5 - Production/Stable', 18 | 'Environment :: Web Environment', 19 | 'Framework :: Django', 20 | 'Intended Audience :: Developers', 21 | 'License :: OSI Approved :: MIT License', 22 | 'Operating System :: POSIX', 23 | 'Operating System :: MacOS', 24 | 'Programming Language :: Python', 25 | 'Programming Language :: Python :: 3', 26 | 'Programming Language :: Python :: 3.6', 27 | 'Programming Language :: Python :: 3.7', 28 | 'Programming Language :: Python :: 3.8', 29 | 'Programming Language :: Python :: 3.9', 30 | 'Topic :: Internet :: WWW/HTTP', 31 | 'Topic :: System :: Distributed Computing', 32 | 'Topic :: Software Development :: Libraries :: Python Modules', 33 | ] 34 | include = ['CHANGELOG.md'] 35 | 36 | [tool.poetry.plugins] # Optional super table 37 | 38 | [tool.poetry.plugins."djangoq.errorreporters"] 39 | "rollbar" = "django_q_rollbar:Rollbar" 40 | "sentry" = "django_q_sentry:Sentry" 41 | 42 | 43 | [tool.poetry.dependencies] 44 | python = ">=3.6.2, <4" 45 | django = ">=2.2" 46 | blessed = "^1.17.6" 47 | arrow = "^1.1.0" 48 | django-picklefield = "^3.0.1" 49 | 50 | hiredis = { version = "^1.0.1", optional = true } 51 | redis = { version = "^3.5.3", optional = true } 52 | psutil = { version = "^5.7.0", optional = true } 53 | django-redis = { version = "^4.12.1", optional = true } 54 | iron-mq = { version = "^0.9", optional = true } 55 | boto3 = { version = "^1.14.12", optional = true } 56 | pymongo = { version = "^3.10.1", optional = true } 57 | croniter = { version = "^0.3.34", optional = true } 58 | django-q-rollbar = {version = ">=0.1", optional = true} 59 | django-q-sentry = {version = ">=0.1", optional = true} 60 | 61 | 62 | [tool.poetry.dev-dependencies] 63 | pytest = "^5.4.2" 64 | pytest-django = "^3.9.0" 65 | Sphinx = "^4.0.2" 66 | pytest-cov = "^2.12.0" 67 | black = { version = "^21.5b1", allow-prereleases = true } 68 | isort = {extras = ["requirements_deprecated_finder"], version = "^5.8.0"} 69 | 70 | [tool.poetry.extras] 71 | requires = ["poetry_core>=1.0.0"] 72 | build-backend = ["poetry.core.masonry.api"] 73 | testing = ["django-redis", "croniter", "hiredis", "psutil", "iron-mq", "boto3", "pymongo"] 74 | rollbar = ["django-q-rollbar"] 75 | sentry = ["django-q-sentry"] 76 | 77 | [tool.isort] 78 | profile = "black" 79 | multi_line_output = 3 -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | DJANGO_SETTINGS_MODULE=django_q.tests.settings -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansicon==1.89.0; platform_system == "Windows" and python_version >= "2.7" \ 2 | --hash=sha256:f1def52d17f65c2c9682cf8370c03f541f410c1752d6a14029f97318e4b9dfec \ 3 | --hash=sha256:e4d039def5768a47e4afec8e89e83ec3ae5a26bf00ad851f914d1240b444d2b1 4 | arrow==1.1.1; python_version >= "3.6" \ 5 | --hash=sha256:77a60a4db5766d900a2085ce9074c5c7b8e2c99afeaa98ad627637ff6f292510 \ 6 | --hash=sha256:dee7602f6c60e3ec510095b5e301441bc56288cb8f51def14dcb3079f623823a 7 | asgiref==3.3.4; python_version >= "3.6" \ 8 | --hash=sha256:92906c611ce6c967347bbfea733f13d6313901d54dcca88195eaeb52b2a8e8ee \ 9 | --hash=sha256:d1216dfbdfb63826470995d31caed36225dcaf34f182e0fa257a4dd9e86f1b78 10 | blessed==1.18.1; python_version >= "2.7" \ 11 | --hash=sha256:dd7c0d33db9a2e7f597b446996484d0ed46e1586239db064fb5025008937dcae \ 12 | --hash=sha256:8b09936def6bc06583db99b65636b980075733e13550cb6af262ce724a55da23 13 | django-picklefield==3.0.1; python_version >= "3" \ 14 | --hash=sha256:15ccba592ca953b9edf9532e64640329cd47b136b7f8f10f2939caa5f9ce4287 \ 15 | --hash=sha256:3c702a54fde2d322fe5b2f39b8f78d9f655b8f77944ab26f703be6c0ed335a35 16 | django==3.2.4; python_version >= "3.6" \ 17 | --hash=sha256:ea735cbbbb3b2fba6d4da4784a0043d84c67c92f1fdf15ad6db69900e792c10f \ 18 | --hash=sha256:66c9d8db8cc6fe938a28b7887c1596e42d522e27618562517cc8929eb7e7f296 19 | jinxed==1.1.0; platform_system == "Windows" and python_version >= "2.7" \ 20 | --hash=sha256:6a61ccf963c16aa885304f27e6e5693783676897cea0c7f223270c8b8e78baf8 \ 21 | --hash=sha256:d8f1731f134e9e6b04d95095845ae6c10eb15cb223a5f0cabdea87d4a279c305 22 | python-dateutil==2.8.1; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version >= "3.6" \ 23 | --hash=sha256:73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c \ 24 | --hash=sha256:75bb3f31ea686f1197762692a9ee6a7550b59fc6ca3a1f4b5d7e32fb98e2da2a 25 | pytz==2021.1; python_version >= "3.6" \ 26 | --hash=sha256:eb10ce3e7736052ed3623d49975ce333bcd712c7bb19a58b9e2089d4057d0798 \ 27 | --hash=sha256:83a4a90894bf38e243cf052c8b58f381bfe9a7a483f6a9cab140bc7f702ac4da 28 | six==1.16.0; python_version >= "3.6" and python_full_version < "3.0.0" or python_full_version >= "3.3.0" and python_version >= "3.6" \ 29 | --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 \ 30 | --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 31 | sqlparse==0.4.1; python_version >= "3.6" \ 32 | --hash=sha256:017cde379adbd6a1f15a61873f43e8274179378e95ef3fede90b5aa64d304ed0 \ 33 | --hash=sha256:0f91fd2e829c44362cbcfab3e9ae12e22badaa8a29ad5ff599f9ec109f0454e8 34 | typing-extensions==3.10.0.0; python_version < "3.8" and python_version >= "3.6" \ 35 | --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \ 36 | --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84 \ 37 | --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 38 | wcwidth==0.2.5; python_version >= "2.7" \ 39 | --hash=sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784 \ 40 | --hash=sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83 41 | -------------------------------------------------------------------------------- /test-services-docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '2.2' 2 | 3 | services: 4 | disque: 5 | image: efrecon/disque:1.0-rc1 6 | ports: 7 | - '7711:7711/tcp' 8 | 9 | redis: 10 | image: redis:latest 11 | ports: 12 | - '6379:6379/tcp' 13 | 14 | mongo: 15 | image: mongo:4 16 | ports: 17 | - '27017:27017/tcp' 18 | --------------------------------------------------------------------------------