├── streaq ├── py.typed ├── __main__.py ├── __init__.py ├── constants.py ├── ui │ ├── templates │ │ ├── table.j2 │ │ ├── base.j2 │ │ ├── task.j2 │ │ └── queue.j2 │ ├── deps.py │ ├── __init__.py │ └── tasks.py ├── types.py ├── cli.py ├── utils.py ├── lua │ └── streaq.lua └── task.py ├── tests ├── __init__.py ├── test_utils.py ├── failure.py ├── conftest.py ├── test_web.py ├── test_cli.py ├── test_worker.py └── test_task.py ├── .python-version ├── .github ├── FUNDING.yml ├── pull_request_template.md ├── dependabot.yml └── workflows │ ├── python-publish-test.yml │ ├── python-publish.yml │ └── python-app.yml ├── docs ├── api │ ├── task.rst │ ├── types.rst │ ├── utils.rst │ └── worker.rst ├── Makefile ├── installation.rst ├── make.bat ├── contributing.rst ├── middleware.rst ├── cli.rst ├── getting-started.rst ├── index.rst ├── conf.py ├── integrations.rst ├── worker.rst └── task.rst ├── .readthedocs.yaml ├── Makefile ├── benchmarks ├── bench_streaq.py ├── bench_taskiq.py ├── bench_arq.py ├── bench_saq.py └── README.md ├── example.py ├── coverage.svg ├── LICENSE ├── .gitignore ├── pyproject.toml ├── README.md └── docker-compose.yml /streaq/py.typed: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.10 2 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: tastyware 2 | -------------------------------------------------------------------------------- /streaq/__main__.py: -------------------------------------------------------------------------------- 1 | from .cli import cli 2 | 3 | if __name__ == "__main__": 4 | cli() 5 | -------------------------------------------------------------------------------- /docs/api/task.rst: -------------------------------------------------------------------------------- 1 | streaq.task 2 | =========== 3 | 4 | .. automodule:: streaq.task 5 | :members: 6 | :show-inheritance: 7 | -------------------------------------------------------------------------------- /docs/api/types.rst: -------------------------------------------------------------------------------- 1 | streaq.types 2 | ============ 3 | 4 | .. automodule:: streaq.types 5 | :members: 6 | :show-inheritance: 7 | -------------------------------------------------------------------------------- /docs/api/utils.rst: -------------------------------------------------------------------------------- 1 | streaq.utils 2 | ============ 3 | 4 | .. automodule:: streaq.utils 5 | :members: 6 | :show-inheritance: 7 | -------------------------------------------------------------------------------- /docs/api/worker.rst: -------------------------------------------------------------------------------- 1 | streaq.worker 2 | ============= 3 | 4 | .. automodule:: streaq.worker 5 | :members: 6 | :show-inheritance: 7 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Description 2 | 3 | ## Related issue(s) 4 | Fixes ... 5 | 6 | ## Pre-merge checklist 7 | - [ ] Code formatted correctly (check with `make lint`) 8 | - [ ] Passing tests locally (check with `make test`) 9 | - [ ] New tests added (if applicable) 10 | - [ ] Docs updated (if applicable) 11 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: "ubuntu-22.04" 5 | tools: 6 | python: "3.10" 7 | commands: 8 | - asdf plugin add uv 9 | - asdf install uv latest 10 | - asdf global uv latest 11 | - uv sync --dev 12 | - uv run -m sphinx -T -b html -d docs/_build/doctrees -D language=en docs $READTHEDOCS_OUTPUT/html 13 | sphinx: 14 | configuration: docs/conf.py 15 | 16 | formats: all 17 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from streaq.utils import gather, import_string 4 | 5 | pytestmark = pytest.mark.anyio 6 | 7 | 8 | def test_bad_path(): 9 | with pytest.raises(ImportError): 10 | _ = import_string("asdf") 11 | 12 | 13 | def test_bad_worker_name(): 14 | with pytest.raises(ImportError): 15 | _ = import_string("example:asdf") 16 | 17 | 18 | async def test_useless_gather(): 19 | assert not (await gather()) 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: install lint test docs 2 | 3 | install: 4 | uv sync --all-extras 5 | 6 | lint: 7 | uv run ruff check --select I --fix 8 | uv run ruff format streaq/ tests/ 9 | uv run ruff check streaq/ tests/ example.py 10 | uv run pyright streaq/ tests/ example.py 11 | 12 | test: 13 | UV_PYTHON=3.10 docker compose run --rm tests uv run --locked --all-extras --dev pytest -n auto --dist=loadgroup --cov=streaq tests/ 14 | 15 | docs: 16 | uv run -m sphinx -T -b html -d docs/_build/doctrees -D language=en docs/ docs/_build/ 17 | 18 | cleanup: 19 | docker compose down --remove-orphans 20 | -------------------------------------------------------------------------------- /streaq/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import coredis 4 | 5 | VERSION = "6.0.0" 6 | __version__ = VERSION 7 | 8 | logger = logging.getLogger(__name__) 9 | logger.addHandler(logging.NullHandler()) 10 | 11 | # disable some runtime checks 12 | coredis.Config.optimized = True 13 | 14 | # ruff: noqa: E402 15 | 16 | from .task import StreaqRetry, TaskStatus 17 | from .types import TaskContext 18 | from .utils import StreaqError 19 | from .worker import Worker 20 | 21 | __all__ = [ 22 | "StreaqError", 23 | "StreaqRetry", 24 | "TaskContext", 25 | "TaskStatus", 26 | "Worker", 27 | ] 28 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "uv" 9 | directory: "/" 10 | schedule: 11 | interval: "weekly" 12 | - package-ecosystem: "github-actions" 13 | directory: "/" 14 | schedule: 15 | interval: "weekly" 16 | -------------------------------------------------------------------------------- /streaq/constants.py: -------------------------------------------------------------------------------- 1 | DEFAULT_QUEUE_NAME = "default" 2 | REDIS_ABORT = ":aborted" 3 | REDIS_CHANNEL = ":channels:" 4 | REDIS_CRON = ":cron:" 5 | REDIS_DEPENDENCIES = ":task:dependencies:" 6 | REDIS_DEPENDENTS = ":task:dependents:" 7 | REDIS_GROUP = "workers" 8 | REDIS_HEALTH = ":health" 9 | REDIS_PREFIX = "streaq:" 10 | REDIS_PREVIOUS = ":task:previous:" 11 | REDIS_RESULT = ":task:results:" 12 | REDIS_RETRY = ":task:retry:" 13 | REDIS_RUNNING = ":task:running:" 14 | REDIS_QUEUE = ":queues:delayed:" 15 | REDIS_STREAM = ":queues:" 16 | REDIS_TASK = ":task:data:" 17 | REDIS_TIMEOUT = ":queues:timeout:" 18 | REDIS_UNIQUE = ":task:lock:" 19 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /benchmarks/bench_streaq.py: -------------------------------------------------------------------------------- 1 | import anyio 2 | import typer 3 | 4 | from streaq import Worker 5 | 6 | worker = Worker(concurrency=32) 7 | N_TASKS = 20_000 8 | 9 | 10 | @worker.task() 11 | async def sleeper(time: int) -> None: 12 | if time: 13 | await anyio.sleep(time) 14 | 15 | 16 | async def main(time: int): 17 | start = anyio.current_time() 18 | tasks = [sleeper.enqueue(time) for _ in range(N_TASKS)] 19 | async with worker: 20 | await worker.enqueue_many(tasks) 21 | end = anyio.current_time() 22 | print(f"enqueued {N_TASKS} tasks in {end - start:.2f}s") 23 | 24 | 25 | def run(time: int = 0): 26 | anyio.run(main, time) 27 | 28 | 29 | if __name__ == "__main__": 30 | typer.run(run) 31 | -------------------------------------------------------------------------------- /.github/workflows/python-publish-test.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python distribution to TestPyPI 2 | 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | build: 8 | name: Build distribution 9 | runs-on: ubuntu-latest 10 | permissions: 11 | id-token: write 12 | 13 | steps: 14 | - uses: actions/checkout@v6 15 | - name: Set up Python 16 | uses: actions/setup-python@v6 17 | with: 18 | python-version-file: ".python-version" 19 | - name: Install uv 20 | uses: astral-sh/setup-uv@v7 21 | with: 22 | enable-cache: true 23 | - name: Build package 24 | run: uv build 25 | - name: Publish to TestPyPI 26 | run: uv publish --publish-url https://test.pypi.org/legacy/ 27 | -------------------------------------------------------------------------------- /streaq/ui/templates/table.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | {% for task in tasks %} 12 | 13 | 14 | 15 | 16 | 17 | 18 | {% endfor %} 19 | 20 |
Updated timeFunction nameStatusTask ID
{{ task.enqueue_time }}{{ task.fn_name }}{{ task.status.value }}{{ task.task_id }}
21 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python distribution to PyPI 2 | 3 | on: 4 | release: 5 | types: [created] 6 | workflow_dispatch: 7 | 8 | jobs: 9 | build: 10 | name: Build distribution 11 | runs-on: ubuntu-latest 12 | environment: 13 | name: pypi 14 | permissions: 15 | id-token: write 16 | 17 | steps: 18 | - uses: actions/checkout@v6 19 | - name: Set up Python 20 | uses: actions/setup-python@v6 21 | with: 22 | python-version-file: ".python-version" 23 | - name: Install uv 24 | uses: astral-sh/setup-uv@v7 25 | with: 26 | enable-cache: true 27 | - name: Build package 28 | run: uv build 29 | - name: Publish to PyPI 30 | run: uv publish 31 | -------------------------------------------------------------------------------- /tests/failure.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import pytest 4 | from anyio import create_task_group, run, sleep 5 | 6 | from streaq import Worker 7 | 8 | 9 | @pytest.mark.anyio 10 | async def test_reclaim_idle_task(redis_url: str, task_id: str): 11 | worker1 = Worker(redis_url=redis_url, queue_name="reclaim", idle_timeout=3) 12 | 13 | @worker1.task(name="foo") 14 | async def foo() -> None: 15 | await sleep(2) 16 | 17 | async with create_task_group() as tg: 18 | await tg.start(worker1.run_async) 19 | task = foo.enqueue() 20 | task.id = task_id 21 | await task 22 | 23 | 24 | if __name__ == "__main__": 25 | redis_url = sys.argv[1] 26 | task_id = sys.argv[2] 27 | run(test_reclaim_idle_task, redis_url, task_id) 28 | -------------------------------------------------------------------------------- /example.py: -------------------------------------------------------------------------------- 1 | from anyio import run, sleep 2 | 3 | from streaq import Worker 4 | 5 | worker = Worker(redis_url="redis://localhost:6379") 6 | 7 | 8 | @worker.task() 9 | async def sleeper(time: int) -> int: 10 | await sleep(time) 11 | return time 12 | 13 | 14 | @worker.cron("* * * * mon-fri") # every minute on weekdays 15 | async def cronjob() -> None: 16 | print("Nobody respects the spammish repetition!") 17 | 18 | 19 | async def main() -> None: 20 | async with worker: 21 | await sleeper.enqueue(3) 22 | # enqueue returns a task object that can be used to get results/info 23 | task = await sleeper.enqueue(1).start(delay=3) 24 | print(await task.info()) 25 | print(await task.result(timeout=5)) 26 | 27 | 28 | if __name__ == "__main__": 29 | run(main) 30 | -------------------------------------------------------------------------------- /streaq/ui/deps.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from traceback import format_exception 3 | from typing import Any, Callable 4 | 5 | from fastapi import HTTPException, status 6 | from fastapi.templating import Jinja2Templates 7 | 8 | from streaq import Worker 9 | 10 | BASE_DIR = Path(__file__).parent / "templates" 11 | templates = Jinja2Templates(directory=str(BASE_DIR)) 12 | 13 | 14 | def get_worker() -> Worker[Any]: 15 | raise HTTPException( 16 | status_code=status.HTTP_412_PRECONDITION_FAILED, 17 | detail="get_worker dependency not implemented!", 18 | ) 19 | 20 | 21 | def get_result_formatter() -> Callable[[Any], str]: 22 | return str 23 | 24 | 25 | def get_exception_formatter() -> Callable[[BaseException], str]: 26 | def _format_exc(exc: BaseException) -> str: 27 | return "".join(format_exception(exc)) 28 | 29 | return _format_exc 30 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | Via pypi 5 | -------- 6 | 7 | The easiest way to install streaQ is using pip: 8 | 9 | :: 10 | 11 | $ pip install streaq 12 | 13 | From source 14 | ----------- 15 | 16 | You can also install from source. 17 | Make sure you have `uv `_ installed beforehand. 18 | 19 | :: 20 | 21 | $ git clone https://github.com/tastyware/streaq.git 22 | $ cd streaq 23 | $ make install 24 | 25 | If you're contributing, you'll want to run tests on your changes locally: 26 | 27 | :: 28 | 29 | $ make lint 30 | $ make test 31 | 32 | If you want to build the documentation (usually not necessary): 33 | 34 | :: 35 | 36 | $ make docs 37 | 38 | Windows 39 | ------- 40 | 41 | Use on Windows is not recommended as you lose access to signal handling, uvloop, and Make. Consider using WSL instead. 42 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /streaq/ui/templates/base.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | streaQ | {{ title }} 8 | 9 | 10 | 12 | 13 | 14 | 15 | 16 | {% block content %} 17 | {% endblock %} 18 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /coverage.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | coverage 17 | coverage 18 | 100% 19 | 100% 20 | 21 | 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 tastyware 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from typing import Literal 2 | from uuid import uuid4 3 | 4 | from pytest import fixture 5 | 6 | from streaq import Worker 7 | 8 | 9 | @fixture(scope="session") 10 | def redis_url() -> str: 11 | return "redis://redis-master:6379" 12 | 13 | 14 | @fixture(scope="function") 15 | def sentinel_worker(anyio_backend: Literal["asyncio", "trio"]) -> Worker: 16 | return Worker( 17 | sentinel_nodes=[ 18 | ("sentinel-1", 26379), 19 | ("sentinel-2", 26379), 20 | ("sentinel-3", 26379), 21 | ], 22 | sentinel_master="mymaster", 23 | queue_name=uuid4().hex, 24 | anyio_backend=anyio_backend, 25 | ) 26 | 27 | 28 | @fixture(scope="function") 29 | def normal_worker(anyio_backend: Literal["asyncio", "trio"], redis_url: str) -> Worker: 30 | return Worker( 31 | redis_url=redis_url, queue_name=uuid4().hex, anyio_backend=anyio_backend 32 | ) 33 | 34 | 35 | @fixture(params=["direct", "sentinel"], ids=["redis", "sentinel"]) 36 | def worker(request, normal_worker: Worker, sentinel_worker: Worker) -> Worker: 37 | return normal_worker if request.param == "direct" else sentinel_worker 38 | -------------------------------------------------------------------------------- /streaq/ui/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from contextlib import asynccontextmanager 4 | from typing import Any, AsyncGenerator, cast 5 | 6 | from streaq import Worker 7 | from streaq.ui.deps import get_exception_formatter, get_result_formatter, get_worker 8 | from streaq.ui.tasks import router 9 | from streaq.utils import import_string 10 | 11 | __all__ = [ 12 | "get_worker", 13 | "get_result_formatter", 14 | "get_exception_formatter", 15 | "router", 16 | ] 17 | 18 | 19 | def run_web(host: str, port: int, worker_path: str) -> None: # pragma: no cover 20 | import uvicorn 21 | from fastapi import FastAPI 22 | 23 | sys.path.append(os.getcwd()) 24 | worker = cast(Worker[Any], import_string(worker_path)) 25 | 26 | async def _get_worker() -> AsyncGenerator[Worker[Any], None]: 27 | yield worker 28 | 29 | @asynccontextmanager 30 | async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: 31 | async with worker: 32 | yield 33 | 34 | app = FastAPI(lifespan=lifespan) 35 | app.dependency_overrides[get_worker] = _get_worker 36 | app.include_router(router) 37 | uvicorn.run(app, host=host, port=port) 38 | -------------------------------------------------------------------------------- /.github/workflows/python-app.yml: -------------------------------------------------------------------------------- 1 | name: Python application 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: 15 | - "3.10" 16 | - "3.11" 17 | - "3.12" 18 | - "3.13" 19 | env: 20 | TERM: 'dumb' 21 | steps: 22 | - uses: actions/checkout@v6 23 | with: 24 | fetch-depth: 0 # Fetch all history for all branches 25 | 26 | - name: Install uv and Python 27 | uses: astral-sh/setup-uv@v7 28 | with: 29 | enable-cache: true 30 | python-version: ${{ matrix.python-version }} 31 | 32 | - name: Setup uv venv 33 | run: uv sync --locked --all-extras --dev 34 | 35 | - name: Lint with ruff 36 | run: uv run ruff check streaq/ tests/ example.py 37 | 38 | - name: Type check with pyright 39 | run: uv run pyright streaq/ tests/ example.py 40 | 41 | - name: Test with pytest 42 | run: | 43 | docker compose run --rm tests uv run --locked --all-extras --dev pytest -n auto --dist=loadgroup --cov=streaq tests/ 44 | -------------------------------------------------------------------------------- /benchmarks/bench_taskiq.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Awaitable 3 | 4 | import typer 5 | from taskiq_redis import RedisAsyncResultBackend, RedisStreamBroker 6 | 7 | broker = RedisStreamBroker( 8 | "redis://localhost:6379", xread_count=32 9 | ).with_result_backend( 10 | RedisAsyncResultBackend("redis://localhost:6379", result_ex_time=1) 11 | ) 12 | 13 | N_TASKS = 20_000 14 | sem = asyncio.Semaphore(32) 15 | 16 | 17 | # control the number of simultaneous connections to Redis 18 | async def sem_task(task: Awaitable): 19 | async with sem: 20 | return await task 21 | 22 | 23 | @broker.task 24 | async def sleeper(time: int) -> None: 25 | if time: 26 | await asyncio.sleep(time) 27 | 28 | 29 | async def main(time: int): 30 | await broker.startup() 31 | await asyncio.gather( 32 | *[asyncio.create_task(sem_task(sleeper.kiq(time))) for _ in range(N_TASKS)] 33 | ) 34 | 35 | 36 | def run(time: int = 0): 37 | loop = asyncio.get_event_loop() 38 | start = loop.time() 39 | loop.run_until_complete(main(time)) 40 | end = loop.time() 41 | print(f"enqueued {N_TASKS} tasks in {end - start:.2f}s") 42 | 43 | 44 | if __name__ == "__main__": 45 | typer.run(run) 46 | -------------------------------------------------------------------------------- /benchmarks/bench_arq.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Awaitable 3 | 4 | import typer 5 | from arq import create_pool 6 | from arq.connections import RedisSettings 7 | 8 | N_TASKS = 20_000 9 | sem = asyncio.Semaphore(32) 10 | settings = RedisSettings() 11 | 12 | 13 | # control the number of simultaneous connections to Redis 14 | async def sem_task(task: Awaitable): 15 | async with sem: 16 | return await task 17 | 18 | 19 | async def sleeper(ctx, time: int) -> None: 20 | if time: 21 | await asyncio.sleep(time) 22 | 23 | 24 | class WorkerSettings: 25 | functions = [sleeper] 26 | redis_settings = settings 27 | max_jobs = 32 28 | burst = True 29 | 30 | 31 | async def main(time: int): 32 | queue = await create_pool() 33 | await asyncio.gather( 34 | *[ 35 | asyncio.create_task(sem_task(queue.enqueue_job("sleeper", time))) 36 | for _ in range(N_TASKS) 37 | ] 38 | ) 39 | 40 | 41 | def run(time: int = 0): 42 | loop = asyncio.get_event_loop() 43 | start = loop.time() 44 | loop.run_until_complete(main(time)) 45 | end = loop.time() 46 | print(f"enqueued {N_TASKS} tasks in {end - start:.2f}s") 47 | 48 | 49 | if __name__ == "__main__": 50 | typer.run(run) 51 | -------------------------------------------------------------------------------- /benchmarks/bench_saq.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Awaitable 3 | 4 | import typer 5 | from saq import Queue 6 | 7 | from streaq.utils import now_ms 8 | 9 | N_TASKS = 20_000 10 | sem = asyncio.Semaphore(32) 11 | queue = Queue.from_url("redis://localhost:6379") 12 | 13 | 14 | # control the number of simultaneous connections to Redis 15 | async def sem_task(task: Awaitable): 16 | async with sem: 17 | return await task 18 | 19 | 20 | async def sleeper(ctx, time: int) -> None: 21 | if time: 22 | await asyncio.sleep(time) 23 | 24 | 25 | async def startup(ctx): 26 | ctx["start_time"] = now_ms() 27 | 28 | 29 | async def shutdown(ctx): 30 | run_time = now_ms() - ctx["start_time"] 31 | print(f"finished in {run_time}ms") 32 | 33 | 34 | settings = { 35 | "functions": [sleeper], 36 | "queue": queue, 37 | "concurrency": 32, 38 | "burst": True, 39 | "startup": startup, 40 | "shutdown": shutdown, 41 | "dequeue_timeout": 1, 42 | } 43 | 44 | 45 | async def main(time: int): 46 | await asyncio.gather( 47 | *[ 48 | asyncio.create_task(sem_task(queue.enqueue("sleeper", time=time))) 49 | for _ in range(N_TASKS) 50 | ] 51 | ) 52 | 53 | 54 | def run(time: int = 0): 55 | loop = asyncio.get_event_loop() 56 | start = loop.time() 57 | loop.run_until_complete(main(time)) 58 | end = loop.time() 59 | print(f"enqueued {N_TASKS} tasks in {end - start:.2f}s") 60 | 61 | 62 | if __name__ == "__main__": 63 | typer.run(run) 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | # Byte-compiled / optimized / DLL files 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | tmp.py 7 | 8 | # C extensions 9 | *.so 10 | data/ 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | *.ipynb 78 | 79 | # celery beat schedule file 80 | celerybeat-schedule 81 | 82 | # SageMath parsed files 83 | *.sage.py 84 | 85 | # Environments 86 | .env 87 | .venv 88 | env/ 89 | venv/ 90 | ENV/ 91 | env.bak/ 92 | venv.bak/ 93 | 94 | # Spyder project settings 95 | .spyderproject 96 | .spyproject 97 | 98 | # Rope project settings 99 | .ropeproject 100 | 101 | # mypy 102 | .mypy_cache/ 103 | 104 | # idea 105 | .idea 106 | *.iml 107 | *.ipr 108 | 109 | .vscode/ 110 | 111 | main.py 112 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | Contributing 2 | ============ 3 | 4 | Development 5 | ----------- 6 | 7 | Contributions to streaQ are always welcome! Most development tasks are in the included ``Makefile``: 8 | 9 | - ``make install``: set up the linting environment 10 | - ``make lint``: run ruff to check formatting and pyright to check types 11 | - ``make test``: use the included ``docker-compose.yml`` file to spin up Redis and Sentinel containers, then run test suite. This uses caching so it's faster after the first run. You'll need Docker and compose installed. 12 | - ``make docs``: build the documentation pages with Sphinx 13 | - ``make cleanup``: tear down running Docker containers 14 | 15 | If you need to test individual tests instead of the entire suite, you can do this: 16 | 17 | .. code-block:: bash 18 | 19 | UV_PYTHON=3.10 docker compose run --rm tests uv run --locked --all-extras --dev pytest -sk 'test_name' 20 | 21 | Benchmarks 22 | ---------- 23 | 24 | If you want to run the benchmarks yourself, first install the dependencies: 25 | 26 | .. code-block:: bash 27 | 28 | uv add streaq[benchmark] 29 | 30 | You can enqueue jobs like so: 31 | 32 | .. code-block:: bash 33 | 34 | python benchmarks/bench_streaq.py --time 1 35 | 36 | Here, ``time`` is the number of seconds to sleep per task. 37 | 38 | You can run a worker with one of these commands, adjusting the number of workers as desired: 39 | 40 | .. code-block:: bash 41 | 42 | arq --workers ? --burst bench_arq.WorkerSettings 43 | saq --quiet bench_saq.settings --workers ? 44 | streaq --burst --workers ? bench_streaq.worker 45 | taskiq worker --workers ? --max-async-tasks 32 bench_taskiq:broker --max-prefetch 32 46 | 47 | Donating 48 | -------- 49 | 50 | If you're interested in supporting the ongoing development of this project, donations are welcome! You can do so through GitHub: https://github.com/sponsors/tastyware 51 | -------------------------------------------------------------------------------- /streaq/types.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from dataclasses import dataclass 4 | from datetime import timedelta 5 | from typing import ( 6 | TYPE_CHECKING, 7 | Any, 8 | Callable, 9 | Coroutine, 10 | Generic, 11 | Optional, 12 | ParamSpec, 13 | Protocol, 14 | TypeAlias, 15 | TypeVar, 16 | overload, 17 | ) 18 | 19 | if TYPE_CHECKING: # pragma: no cover 20 | from streaq.task import RegisteredTask 21 | 22 | C = TypeVar("C", bound=Optional[object]) 23 | P = ParamSpec("P") 24 | POther = ParamSpec("POther") 25 | R = TypeVar("R", bound=Optional[object]) 26 | ROther = TypeVar("ROther", bound=Optional[object]) 27 | 28 | 29 | @dataclass(frozen=True) 30 | class StreamMessage: 31 | """ 32 | Dataclass wrapping data stored in the Redis stream. 33 | """ 34 | 35 | message_id: str 36 | task_id: str 37 | priority: str 38 | enqueue_time: int 39 | 40 | 41 | @dataclass(frozen=True) 42 | class TaskContext: 43 | """ 44 | Dataclass containing task-specific information like the try count. 45 | """ 46 | 47 | fn_name: str 48 | task_id: str 49 | timeout: timedelta | int | None 50 | tries: int 51 | ttl: timedelta | int | None 52 | 53 | 54 | AnyCoroutine: TypeAlias = Coroutine[Any, Any, Any] 55 | ReturnCoroutine: TypeAlias = Callable[..., AnyCoroutine] 56 | TypedCoroutine: TypeAlias = Coroutine[Any, Any, R] 57 | 58 | Middleware: TypeAlias = Callable[[ReturnCoroutine], ReturnCoroutine] 59 | 60 | AsyncCron: TypeAlias = Callable[[], TypedCoroutine[R]] 61 | SyncCron: TypeAlias = Callable[[], R] 62 | AsyncTask: TypeAlias = Callable[P, TypedCoroutine[R]] 63 | SyncTask: TypeAlias = Callable[P, R] 64 | 65 | 66 | class CronDefinition(Protocol, Generic[C]): 67 | @overload 68 | def __call__(self, fn: AsyncCron[R]) -> RegisteredTask[C, [], R]: ... 69 | 70 | @overload 71 | def __call__(self, fn: SyncCron[R]) -> RegisteredTask[C, [], R]: ... # type: ignore 72 | 73 | 74 | class TaskDefinition(Protocol, Generic[C]): 75 | @overload 76 | def __call__(self, fn: AsyncTask[P, R]) -> RegisteredTask[C, P, R]: ... 77 | 78 | @overload 79 | def __call__(self, fn: SyncTask[P, R]) -> RegisteredTask[C, P, R]: ... # type: ignore 80 | -------------------------------------------------------------------------------- /docs/middleware.rst: -------------------------------------------------------------------------------- 1 | Middleware 2 | ========== 3 | 4 | Creating middleware 5 | ------------------- 6 | 7 | You can define middleware to wrap task execution. This has a host of potential applications, like observability and exception handling. Here's an example which times function execution: 8 | 9 | .. code-block:: python 10 | 11 | import time 12 | from streaq.types import ReturnCoroutine 13 | from typing import Any 14 | 15 | @worker.middleware 16 | def timer(task: ReturnCoroutine) -> ReturnCoroutine: 17 | async def wrapper(*args, **kwargs) -> Any: 18 | start_time = time.perf_counter() 19 | result = await task(*args, **kwargs) 20 | tid = worker.task_context().task_id 21 | print(f"Executed task {tid} in {time.perf_counter() - start_time:.3f}s") 22 | return result 23 | 24 | return wrapper 25 | 26 | Middleware are structured as wrapped functions for maximum flexibility--not only can you run code before/after execution, you can also access and even modify the arguments or results. 27 | 28 | Stacking middleware 29 | ------------------- 30 | 31 | You can register as many middleware as you like to a worker, which will run them in the same order they were registered. 32 | 33 | .. code-block:: python 34 | 35 | from streaq import StreaqRetry 36 | 37 | @worker.middleware 38 | def timer(task: ReturnCoroutine) -> ReturnCoroutine: 39 | async def wrapper(*args, **kwargs) -> Any: 40 | start_time = time.perf_counter() 41 | result = await task(*args, **kwargs) 42 | tid = worker.task_context().task_id 43 | print(f"Executed task {tid} in {time.perf_counter() - start_time:.3f}s") 44 | return result 45 | 46 | return wrapper 47 | 48 | # retry all exceptions up to a max of 3 tries 49 | @worker.middleware 50 | def retry(task: ReturnCoroutine) -> ReturnCoroutine: 51 | async def wrapper(*args, **kwargs) -> Any: 52 | try: 53 | return await task(*args, **kwargs) 54 | except Exception as e: 55 | try_count = worker.task_context().tries 56 | if try_count < 3: 57 | raise StreaqRetry("Retrying on error!") from e 58 | else: 59 | raise e 60 | 61 | return wrapper 62 | -------------------------------------------------------------------------------- /benchmarks/README.md: -------------------------------------------------------------------------------- 1 | # Benchmarks 2 | 3 | streaQ's performance significantly improves upon [arq](https://github.com/python-arq/arq), and is on-par with [SAQ](https://github.com/tobymao/saq) and [taskiq](https://github.com/taskiq-python/taskiq). If you want to run these tests yourself, first install the dependencies: 4 | ``` 5 | $ pip install streaq[benchmark] 6 | ``` 7 | 8 | You can enqueue jobs like so: 9 | ``` 10 | $ python benchmarks/bench_streaq.py --time 1 11 | ``` 12 | 13 | And run a worker with one of these commands, adjusting the number of workers as desired: 14 | ``` 15 | $ arq --workers ? --burst bench_arq.WorkerSettings 16 | $ saq --quiet bench_saq.settings --workers ? 17 | $ streaq --burst --workers ? bench_streaq.worker 18 | $ taskiq worker --workers ? --max-async-tasks 32 bench_taskiq:broker --max-prefetch 32 19 | ``` 20 | 21 | These benchmarks were run with streaQ v6.0.0 on an M4 Mac Mini using asyncio + uvloop. Trio performance is slightly worse. 22 | 23 | ## Benchmark 1: No-op 24 | 25 | This benchmark evaluates the performance when tasks do nothing, representing negligible amounts of work. 26 | These results are with 20,000 tasks enqueued, a concurrency of `32`, and a variable number of workers. 27 | 28 | | library | enqueuing | 1 worker | 10 workers | 20 workers | 40 workers | 29 | | -------- | --------- | -------- | ---------- | ---------- | ---------- | 30 | | streaq | 0.45s | 8.50s | 3.84s | 4.19s | 5.18s | 31 | | SAQ | 1.67s | 9.86s | 3.46s | 3.45s | 3.93s | 32 | | taskiq | 1.68s | 6.36s | 3.26s | 3.38s | 6.43s | 33 | | arq | 2.31s | 62.66s | 28.10s | 43.33s | ☠️ | 34 | 35 | ## Benchmark 2: Sleep 36 | 37 | This benchmark evaluates the performance when tasks sleep for 1 second, representing a small amount of work. 38 | These results are with 20,000 tasks enqueued, a concurrency of `32`, and a variable number of workers. 39 | 40 | | library | enqueuing | 10 workers | 20 workers | 40 workers | 41 | | -------- | --------- | ---------- | ---------- | ---------- | 42 | | streaq | 0.44s | 63.89s | 33.02s | 17.33s | 43 | | SAQ | 1.69s | 64.51s | 33.56s | 17.74s | 44 | | taskiq | 1.68s | 67.53s | 34.42s | 18.55s | 45 | | arq | 2.27s | 176.87s | 169.47s | ☠️ | 46 | -------------------------------------------------------------------------------- /docs/cli.rst: -------------------------------------------------------------------------------- 1 | Command-line interface 2 | ====================== 3 | 4 | Assuming you have a file called ``example.py`` which defines an instance of ``streaq.Worker`` called ``worker``, you can run a worker process like so: 5 | 6 | .. code-block:: 7 | 8 | $ streaq example.worker 9 | 10 | You can always run ``streaq --help`` to see the help page: 11 | 12 | .. code-block:: text 13 | 14 | Usage: streaq [OPTIONS] WORKER_PATH 15 | 16 | ╭─ Arguments ─────────────────────────────────────────────────────────────╮ 17 | │ * worker_path TEXT [default: None] [required] │ 18 | ╰─────────────────────────────────────────────────────────────────────────╯ 19 | ╭─ Options ───────────────────────────────────────────────────────────────╮ 20 | │ --workers -w INTEGER Number of worker processes to │ 21 | │ spin up │ 22 | │ [default: 1] │ 23 | │ --burst -b Whether to shut down worker when │ 24 | │ the queue is empty │ 25 | │ --reload -r Whether to reload the worker │ 26 | │ upon changes detected │ 27 | │ --verbose -v Whether to use logging.DEBUG │ 28 | │ instead of logging.INFO │ 29 | │ --version Show installed version │ 30 | │ --web Run a web UI for monitoring │ 31 | │ tasks in a separate process. │ 32 | │ --host -h TEXT Host for the web UI server. │ 33 | │ [default: 0.0.0.0] │ 34 | │ --port -p INTEGER Port for the web UI server. │ 35 | │ [default: 8000] │ 36 | │ --install-completion Install completion for the │ 37 | │ current shell. │ 38 | │ --show-completion Show completion for the current │ 39 | │ shell, to copy it or customize │ 40 | │ the installation. │ 41 | │ --help Show this message and exit. │ 42 | ╰─────────────────────────────────────────────────────────────────────────╯ 43 | -------------------------------------------------------------------------------- /docs/getting-started.rst: -------------------------------------------------------------------------------- 1 | Getting started 2 | =============== 3 | 4 | To start, you'll need to create a ``Worker`` object. At worker creation, you can provide an async context manager "lifespan" which will initialize any global dependencies you want to have access to in your tasks: 5 | 6 | .. code-block:: python 7 | 8 | from contextlib import asynccontextmanager 9 | from dataclasses import dataclass 10 | from typing import AsyncGenerator 11 | from httpx import AsyncClient 12 | from streaq import Worker 13 | 14 | @dataclass 15 | class WorkerContext: 16 | """ 17 | Type safe way of defining the dependencies of your tasks. 18 | e.g. HTTP client, database connection, settings. 19 | """ 20 | http_client: AsyncClient 21 | 22 | @asynccontextmanager 23 | async def lifespan() -> AsyncGenerator[WorkerContext]: 24 | """ 25 | Here, we initialize the worker's dependencies. 26 | You can also do any startup/shutdown work here 27 | """ 28 | async with AsyncClient() as http_client: 29 | yield WorkerContext(http_client) 30 | 31 | worker = Worker(redis_url="redis://localhost:6379", lifespan=lifespan) 32 | 33 | You can then register async tasks with the worker like this: 34 | 35 | .. code-block:: python 36 | 37 | @worker.task(timeout=5) 38 | async def fetch(url: str) -> int: 39 | # worker.context here is of type WorkerContext 40 | res = await worker.context.http_client.get(url) 41 | return len(res.text) 42 | 43 | Finally, let's queue up some tasks via the worker's async context manager: 44 | 45 | .. code-block:: python 46 | 47 | async with worker: 48 | await fetch.enqueue("https://tastyware.dev/") 49 | # enqueue returns a task object that can be used to get results/info 50 | task = await fetch.enqueue("https://github.com/tastyware/streaq").start(delay=3) 51 | print(await task.info()) 52 | print(await task.result(timeout=5)) 53 | 54 | Put this all together in a script and spin up a worker: 55 | 56 | .. code-block:: bash 57 | 58 | $ streaq script:worker 59 | 60 | and queue up some tasks like so: 61 | 62 | .. code-block:: bash 63 | 64 | $ python script.py 65 | 66 | Let's see what the output looks like: 67 | 68 | .. code-block:: 69 | 70 | [INFO] 2025-09-23 07:19:48: task fetch □ 45d7ff032e6d42239e9f479a2fc4b70e → worker 12195ce1 71 | [INFO] 2025-09-23 07:19:48: task fetch ■ 45d7ff032e6d42239e9f479a2fc4b70e ← 15 72 | [INFO] 2025-09-23 07:19:51: task fetch □ 65e687f9ba644a1fbe23096fa246dfe1 → worker 12195ce1 73 | [INFO] 2025-09-23 07:19:52: task fetch ■ 65e687f9ba644a1fbe23096fa246dfe1 ← 303659 74 | 75 | .. code-block:: python 76 | 77 | TaskInfo(fn_name='fetch', enqueue_time=1756365588232, tries=0, scheduled=datetime.datetime(2025, 8, 28, 7, 19, 51, 232000, tzinfo=datetime.timezone.utc), dependencies=set(), dependents=set()) 78 | TaskResult(fn_name='fetch', enqueue_time=1756365588232, success=True, result=303659, start_time=1756365591327, finish_time=1756365592081, tries=1, worker_id='12195ce1') 79 | -------------------------------------------------------------------------------- /streaq/ui/templates/task.j2: -------------------------------------------------------------------------------- 1 | {% extends "base.j2" %} 2 | 3 | {% block content %} 4 |
5 |
6 |
7 |
8 |

Task

9 |
10 |
11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 22 | 23 | 24 | 25 | 26 | 27 | {% if not is_done %} 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | {% else %} 45 | 46 | 47 | 48 | 49 | 50 | 51 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | {% endif %} 75 | 76 |
Function{{ function }}
Status 20 | {{ status }} 21 |
Created time{{ created_time }}
Scheduled time{{ scheduled }}
Try count{{ task_try }}
Dependencies{{ dependencies }}
Dependents{{ dependents }}
Success{{ success }}
Result 52 |
54 | {{- result|trim -}} 55 |
56 |
Enqueued time{{ enqueue_time }}
Start time{{ start_time }}
End time{{ finish_time }}
Worker{{ worker_id }}
77 |
78 | {% if not is_done %} 79 | 83 | {% endif %} 84 |
85 |
86 |
87 | {% endblock %} 88 | -------------------------------------------------------------------------------- /tests/test_web.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from anyio import create_task_group, sleep 3 | from fastapi import FastAPI, HTTPException 4 | from httpx import ASGITransport, AsyncClient 5 | 6 | from streaq import TaskStatus, Worker 7 | from streaq.ui import router 8 | from streaq.ui.deps import get_worker 9 | from streaq.utils import gather 10 | 11 | pytestmark = pytest.mark.anyio 12 | 13 | 14 | async def test_no_override(): 15 | with pytest.raises(HTTPException): 16 | _ = get_worker() 17 | 18 | 19 | async def test_get_pages(worker: Worker): 20 | app = FastAPI() 21 | prefix = "/streaq" 22 | app.include_router(router, prefix=prefix) 23 | worker.concurrency = 1 24 | worker.prefetch = 1 25 | 26 | @worker.task() 27 | async def sleeper(time: int) -> None: 28 | await sleep(time) 29 | 30 | @worker.task() 31 | async def fails() -> None: 32 | raise Exception("Oh no!") 33 | 34 | async def _get_worker(): 35 | yield worker 36 | 37 | app.dependency_overrides[get_worker] = _get_worker 38 | async with create_task_group() as tg: 39 | await tg.start(worker.run_async) 40 | # queue up some tasks 41 | failed = fails.enqueue() 42 | scheduled = sleeper.enqueue(10).start(delay=5) 43 | done = sleeper.enqueue(0) 44 | running = sleeper.enqueue(10) 45 | queued = sleeper.enqueue(10) 46 | await worker.enqueue_many([failed, scheduled, done, running, queued]) 47 | await gather(done.result(2), failed.result(2)) # make sure tasks are done 48 | while await running.status() != TaskStatus.RUNNING: 49 | await sleep(1) 50 | async with AsyncClient( 51 | transport=ASGITransport(app=app), base_url="http://test" 52 | ) as client: 53 | # endpoints 54 | res = await client.get(f"{prefix}/") 55 | assert res.status_code == 303 56 | res = await client.get(f"{prefix}/queue") 57 | assert res.status_code == 200 58 | res = await client.patch( 59 | f"{prefix}/queue", 60 | data={ 61 | "functions": ["redis_health_check"], 62 | "statuses": ["queued", "running", "done"], 63 | "sort": "desc", 64 | }, 65 | ) 66 | assert res.status_code == 200 67 | # test fetching tasks in various statuses 68 | res = await gather( 69 | *[ 70 | client.get(f"{prefix}/task/{done.id}"), 71 | client.get(f"{prefix}/task/{running.id}"), 72 | client.get(f"{prefix}/task/{queued.id}"), 73 | client.get(f"{prefix}/task/{scheduled.id}"), 74 | client.get(f"{prefix}/task/{failed.id}"), 75 | ] 76 | ) 77 | assert all(r.status_code == 200 for r in res) 78 | # test aborting a task manually, redirect, and bad ID 79 | res = await client.delete(f"{prefix}/task/{scheduled.id}") 80 | assert res.status_code == 200 81 | assert res.headers["HX-Redirect"] == f"{prefix}/queue" 82 | res = await client.get(f"{prefix}/task/nonexistent") 83 | assert res.status_code == 404 84 | # cleanup worker 85 | tg.cancel_scope.cancel() 86 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. image:: https://readthedocs.org/projects/streaq/badge/?version=latest 2 | :target: https://streaq.readthedocs.io/en/latest/?badge=latest 3 | :alt: Documentation Status 4 | 5 | .. image:: https://img.shields.io/pypi/v/streaq 6 | :target: https://pypi.org/project/streaq 7 | :alt: PyPI Package 8 | 9 | .. image:: https://static.pepy.tech/badge/streaq 10 | :target: https://pepy.tech/project/streaq 11 | :alt: PyPI Downloads 12 | 13 | .. image:: https://img.shields.io/github/v/release/tastyware/streaq?label=release%20notes 14 | :target: https://github.com/tastyware/streaq/releases 15 | :alt: Release 16 | 17 | streaQ documentation 18 | ==================== 19 | 20 | Fast, async, type-safe job queuing with Redis streams 21 | 22 | +----------------------------+--------+-----+-----+--------+ 23 | | Feature comparison | taskiq | arq | SAQ | streaQ | 24 | +============================+========+=====+=====+========+ 25 | | Startup/shutdown hooks | ✅ | ✅ | ✅ | ✅ | 26 | +----------------------------+--------+-----+-----+--------+ 27 | | Task scheduling/cron jobs | ✅ | ✅ | ✅ | ✅ | 28 | +----------------------------+--------+-----+-----+--------+ 29 | | Task middleware | ✅ | ✅ | ✅ | ✅ | 30 | +----------------------------+--------+-----+-----+--------+ 31 | | Web UI available | ✅ | ✅ | ✅ | ✅ | 32 | +----------------------------+--------+-----+-----+--------+ 33 | | Actively maintained | ✅ | ❌ | ✅ | ✅ | 34 | +----------------------------+--------+-----+-----+--------+ 35 | | Custom serializers | ✅ | ✅ | ❌ | ✅ | 36 | +----------------------------+--------+-----+-----+--------+ 37 | | Type safe | ✅ | ❌ | ❌ | ✅ | 38 | +----------------------------+--------+-----+-----+--------+ 39 | | Extensive documentation | ✅ | ❌ | ❌ | ✅ | 40 | +----------------------------+--------+-----+-----+--------+ 41 | | Task abortion | ❌ | ✅ | ✅ | ✅ | 42 | +----------------------------+--------+-----+-----+--------+ 43 | | Synchronous tasks | ✅ | ❌ | ❌ | ✅ | 44 | +----------------------------+--------+-----+-----+--------+ 45 | | Task dependency graph | ✅ | ❌ | ❌ | ✅ | 46 | +----------------------------+--------+-----+-----+--------+ 47 | | Priority queues | ❌ | ❌ | ❌ | ✅ | 48 | +----------------------------+--------+-----+-----+--------+ 49 | | Multiple backends | ✅ | ❌ | ❌ | ❌ | 50 | +----------------------------+--------+-----+-----+--------+ 51 | | Redis Sentinel support | ✅ | ❌ | ❌ | ✅ | 52 | +----------------------------+--------+-----+-----+--------+ 53 | | Structured concurrency | ❌ | ❌ | ❌ | ✅ | 54 | +----------------------------+--------+-----+-----+--------+ 55 | | Trio support | ❌ | ❌ | ❌ | ✅ | 56 | +----------------------------+--------+-----+-----+--------+ 57 | 58 | .. toctree:: 59 | :maxdepth: 2 60 | :caption: Documentation 61 | :hidden: 62 | 63 | installation 64 | getting-started 65 | worker 66 | task 67 | middleware 68 | cli 69 | integrations 70 | contributing 71 | 72 | .. toctree:: 73 | :maxdepth: 2 74 | :caption: API Reference 75 | :hidden: 76 | 77 | api/task 78 | api/types 79 | api/utils 80 | api/worker 81 | 82 | Indices and tables 83 | ================== 84 | 85 | * :ref:`genindex` 86 | * :ref:`modindex` 87 | * :ref:`search` 88 | -------------------------------------------------------------------------------- /streaq/cli.py: -------------------------------------------------------------------------------- 1 | import logging.config 2 | import os 3 | import sys 4 | from multiprocessing import Process 5 | from typing import Annotated, Any, cast 6 | 7 | from typer import Exit, Option, Typer 8 | from watchfiles import run_process 9 | 10 | from streaq import VERSION 11 | from streaq.utils import StreaqError, default_log_config, import_string 12 | from streaq.worker import Worker 13 | 14 | cli = Typer(no_args_is_help=True, pretty_exceptions_show_locals=False) 15 | 16 | 17 | def version_callback(value: bool) -> None: 18 | if value: 19 | print(f"streaQ v{VERSION}") 20 | raise Exit() 21 | 22 | 23 | @cli.command() 24 | def main( 25 | worker_path: str, 26 | workers: Annotated[ 27 | int, Option("--workers", "-w", help="Number of worker processes to spin up") 28 | ] = 1, 29 | burst: Annotated[ 30 | bool, 31 | Option( 32 | "--burst", "-b", help="Whether to shut down worker when the queue is empty" 33 | ), 34 | ] = False, 35 | reload: Annotated[ 36 | bool, 37 | Option( 38 | "--reload", "-r", help="Whether to reload the worker upon changes detected" 39 | ), 40 | ] = False, 41 | verbose: Annotated[ 42 | bool, 43 | Option( 44 | "--verbose", 45 | "-v", 46 | help="Whether to use logging.DEBUG instead of logging.INFO", 47 | ), 48 | ] = False, 49 | version: Annotated[ 50 | bool, 51 | Option("--version", callback=version_callback, help="Show installed version"), 52 | ] = False, 53 | web: Annotated[ 54 | bool, 55 | Option( 56 | "--web", help="Run a web UI for monitoring tasks in a separate process." 57 | ), 58 | ] = False, 59 | host: Annotated[ 60 | str, Option("--host", "-h", help="Host for the web UI server.") 61 | ] = "0.0.0.0", 62 | port: Annotated[ 63 | int, Option("--port", "-p", help="Port for the web UI server.") 64 | ] = 8000, 65 | ) -> None: 66 | web_process: Process | None = None 67 | if web: 68 | try: 69 | from streaq.ui import run_web 70 | except ModuleNotFoundError as e: # pragma: no cover 71 | raise StreaqError( 72 | "web module not installed, try `pip install streaq[web]`" 73 | ) from e 74 | web_process = Process( 75 | target=run_web, 76 | args=(host, port, worker_path), 77 | ) 78 | web_process.start() 79 | for _ in range(workers - 1): 80 | Process( 81 | target=run_worker, 82 | args=(worker_path, burst, reload, verbose), 83 | ).start() 84 | try: 85 | run_worker(worker_path, burst, reload, verbose) 86 | finally: 87 | if web_process and web_process.is_alive(): 88 | web_process.terminate() 89 | web_process.join() 90 | 91 | 92 | def run_worker(path: str, burst: bool, watch: bool, verbose: bool) -> None: 93 | """ 94 | Run a worker with the given options. 95 | """ 96 | if watch: 97 | run_process( 98 | ".", 99 | target=_run_worker, 100 | args=(path, burst, verbose), 101 | callback=lambda _: print("changes detected, reloading..."), 102 | ) 103 | else: 104 | _run_worker(path, burst, verbose) 105 | 106 | 107 | def _run_worker(path: str, burst: bool, verbose: bool) -> None: 108 | sys.path.append(os.getcwd()) 109 | worker = cast(Worker[Any], import_string(path)) 110 | logging.config.dictConfig(default_log_config(worker.tz, verbose)) 111 | worker.burst = burst 112 | worker.run_sync() 113 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [tool.hatch.version] 6 | path = "streaq/__init__.py" 7 | 8 | [tool.hatch.build.targets.wheel] 9 | include = ["streaq/**/*"] 10 | exclude = ["**/__pycache__/**"] 11 | 12 | [tool.hatch.build.targets.sdist] 13 | include = ["streaq/**/*"] 14 | exclude = ["**/__pycache__/**"] 15 | 16 | [tool.hatch.metadata] 17 | allow-direct-references = true 18 | 19 | [project] 20 | name = "streaq" 21 | description = "Fast, async, fully-typed distributed task queue via Redis streams" 22 | readme = "README.md" 23 | classifiers = [ 24 | "Development Status :: 5 - Production/Stable", 25 | "Environment :: Console", 26 | "Framework :: AsyncIO", 27 | "Framework :: AnyIO", 28 | "Framework :: Django", 29 | "Framework :: FastAPI", 30 | "Framework :: Flask", 31 | "Framework :: Trio", 32 | "Intended Audience :: Developers", 33 | "Intended Audience :: Information Technology", 34 | "Intended Audience :: System Administrators", 35 | "License :: OSI Approved :: MIT License", 36 | "Operating System :: MacOS :: MacOS X", 37 | "Operating System :: Microsoft :: Windows", 38 | "Operating System :: Unix", 39 | "Operating System :: POSIX :: Linux", 40 | "Programming Language :: Python", 41 | "Programming Language :: Python :: 3", 42 | "Programming Language :: Python :: 3 :: Only", 43 | "Programming Language :: Python :: 3.10", 44 | "Programming Language :: Python :: 3.11", 45 | "Programming Language :: Python :: 3.12", 46 | "Programming Language :: Python :: 3.13", 47 | "Programming Language :: Python :: 3.14", 48 | "Topic :: Software Development :: Libraries :: Python Modules", 49 | "Topic :: System :: Clustering", 50 | "Topic :: System :: Distributed Computing", 51 | "Topic :: System :: Monitoring", 52 | "Topic :: System :: Systems Administration", 53 | "Typing :: Typed", 54 | ] 55 | requires-python = ">=3.10" 56 | license = {file = "LICENSE"} 57 | authors = [ 58 | { name = "Graeme Holliday", email = "graeme@tastyware.dev" } 59 | ] 60 | dependencies = [ 61 | "anyio>=4.11.0", 62 | "coredis @ git+https://github.com/Graeme22/coredis.git@anyio", 63 | "crontab>=1.0.5", 64 | "typer>=0.19.2", 65 | "uvloop>=0.21.0; sys_platform != 'win32'", 66 | "watchfiles>=1.1.0", 67 | ] 68 | dynamic = ["version"] 69 | 70 | [project.optional-dependencies] 71 | benchmark = [ 72 | "arq @ git+https://github.com/Graeme22/arq.git", 73 | "saq[hiredis]==0.25.2", 74 | "taskiq-redis==1.1.0", 75 | ] 76 | web = [ 77 | "fastapi>=0.117.1", 78 | "jinja2>=3.1.6", 79 | "python-multipart>=0.0.20", 80 | "uvicorn>=0.37.0", 81 | ] 82 | 83 | [project.scripts] 84 | streaq = "streaq.__main__:cli" 85 | 86 | [project.urls] 87 | Homepage = "https://github.com/tastyware/streaq" 88 | Documentation = "https://streaq.rtfd.io" 89 | Funding = "https://github.com/sponsors/tastyware" 90 | Source = "https://github.com/tastyware/streaq" 91 | Changelog = "https://github.com/tastyware/streaq/releases" 92 | 93 | [dependency-groups] 94 | dev = [ 95 | "enum-tools[sphinx]>=0.13.0", 96 | "httpx>=0.28.1", 97 | "pyright>=1.1.406", 98 | "pytest>=8.4.2", 99 | "pytest-cov>=7.0.0", 100 | "pytest-xdist>=3.8.0", 101 | "ruff>=0.13.1", 102 | "sphinx>=8.1.3", 103 | "sphinx-immaterial>=0.13.6", 104 | "trio>=0.30.0", 105 | ] 106 | 107 | [tool.pytest.ini_options] 108 | testpaths = "tests" 109 | 110 | [tool.ruff.lint] 111 | select = ["E", "F", "I"] 112 | 113 | [tool.pyright] 114 | strict = ["streaq/"] 115 | 116 | [tool.coverage.run] 117 | source = ["streaq"] 118 | concurrency = ["multiprocessing", "thread"] 119 | sigterm = true 120 | patch = ["subprocess"] 121 | 122 | [tool.coverage.report] 123 | show_missing = true 124 | fail_under = 100 125 | 126 | [tool.coverage.paths] 127 | streaq = ["streaq", "/app/streaq"] 128 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | import os 6 | import sys 7 | 8 | from streaq import VERSION 9 | 10 | sys.path.insert(0, os.path.abspath("..")) 11 | 12 | # -- Project information ----------------------------------------------------- 13 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 14 | 15 | project = "streaQ" 16 | copyright = "2025, tastyware" 17 | author = "Graeme Holliday" 18 | release = VERSION 19 | 20 | # -- General configuration --------------------------------------------------- 21 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 22 | 23 | extensions = [ 24 | "sphinx.ext.duration", 25 | "sphinx.ext.doctest", 26 | "sphinx.ext.autodoc", 27 | "sphinx.ext.autosummary", 28 | "sphinx.ext.intersphinx", 29 | "enum_tools.autoenum", 30 | "sphinx_immaterial", 31 | ] 32 | 33 | intersphinx_mapping = { 34 | "rtd": ("https://docs.readthedocs.io/en/stable/", None), 35 | "python": ("https://docs.python.org/3/", None), 36 | "sphinx": ("https://www.sphinx-doc.org/en/master/", None), 37 | } 38 | intersphinx_disabled_domains = ["std"] 39 | 40 | templates_path = ["_templates"] 41 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 42 | 43 | 44 | # -- Options for HTML output ------------------------------------------------- 45 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 46 | 47 | html_theme = "sphinx_immaterial" 48 | html_theme_options = { 49 | "icon": { 50 | "repo": "fontawesome/brands/github", 51 | "edit": "material/file-edit-outline", 52 | }, 53 | "site_url": "https://streaq.rtfd.io", 54 | "repo_url": "https://github.com/tastyware/streaq", 55 | "repo_name": "streaq", 56 | "edit_uri": "blob/main/docs", 57 | "globaltoc_collapse": True, 58 | "features": [ 59 | "navigation.expand", 60 | # "navigation.tabs", 61 | # "navigation.tabs.sticky", 62 | # "toc.integrate", 63 | "navigation.sections", 64 | # "navigation.instant", 65 | # "header.autohide", 66 | "navigation.top", 67 | "navigation.footer", 68 | # "navigation.tracking", 69 | # "search.highlight", 70 | "search.share", 71 | "search.suggest", 72 | "toc.follow", 73 | "toc.sticky", 74 | "content.tabs.link", 75 | "content.code.copy", 76 | "content.action.edit", 77 | "content.action.view", 78 | "content.tooltips", 79 | "announce.dismiss", 80 | ], 81 | "palette": [ 82 | { 83 | "media": "(prefers-color-scheme: light)", 84 | "scheme": "default", 85 | "primary": "green", 86 | "accent": "light-blue", 87 | "toggle": { 88 | "icon": "material/lightbulb", 89 | "name": "Switch to dark mode", 90 | }, 91 | }, 92 | { 93 | "media": "(prefers-color-scheme: dark)", 94 | "scheme": "slate", 95 | "primary": "deep-orange", 96 | "accent": "lime", 97 | "toggle": { 98 | "icon": "material/lightbulb-outline", 99 | "name": "Switch to system preference", 100 | }, 101 | }, 102 | ], 103 | "toc_title_is_page_title": True, 104 | # BEGIN: social icons 105 | "social": [ 106 | { 107 | "icon": "fontawesome/brands/github", 108 | "link": "https://github.com/tastyware/streaq", 109 | "name": "Source code", 110 | }, 111 | { 112 | "icon": "fontawesome/brands/python", 113 | "link": "https://pypi.org/project/streaq", 114 | }, 115 | ], 116 | } 117 | html_static_path = ["_static"] 118 | -------------------------------------------------------------------------------- /tests/test_cli.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | from pathlib import Path 4 | 5 | import pytest 6 | from anyio import sleep 7 | from httpx import AsyncClient, ConnectError 8 | from typer.testing import CliRunner 9 | 10 | from streaq import VERSION, Worker 11 | from streaq.cli import cli 12 | 13 | pytestmark = pytest.mark.anyio 14 | runner = CliRunner() 15 | test_module = sys.modules["tests.test_cli"] 16 | 17 | 18 | def test_burst(worker: Worker): 19 | setattr(test_module, "test_worker", worker) 20 | result = runner.invoke(cli, ["tests.test_cli:test_worker", "--burst"]) 21 | assert result.exit_code == 0 22 | 23 | 24 | def test_multiple_workers(worker: Worker): 25 | setattr(test_module, "test_worker", worker) 26 | result = runner.invoke( 27 | cli, ["--burst", "--workers", "2", "tests.test_cli:test_worker"] 28 | ) 29 | assert result.exit_code == 0 30 | 31 | 32 | def test_verbose(worker: Worker): 33 | setattr(test_module, "test_worker", worker) 34 | result = runner.invoke(cli, ["tests.test_cli:test_worker", "--burst", "--verbose"]) 35 | assert result.exit_code == 0 36 | assert "established" in result.stderr 37 | 38 | 39 | def test_version(worker: Worker): 40 | setattr(test_module, "test_worker", worker) 41 | result = runner.invoke(cli, ["--version"]) 42 | assert result.exit_code == 0 43 | assert VERSION in result.stdout 44 | 45 | 46 | def test_help(worker: Worker): 47 | setattr(test_module, "test_worker", worker) 48 | result = runner.invoke(cli, ["--help"]) 49 | assert result.exit_code == 0 50 | assert "--help" in result.stdout 51 | 52 | 53 | def test_main_entry_point(): 54 | result = subprocess.run( 55 | [sys.executable, "-m", "streaq", "--help"], 56 | capture_output=True, 57 | text=True, 58 | check=False, 59 | ) 60 | assert result.returncode == 0 61 | assert "--help" in result.stdout 62 | 63 | 64 | async def test_web_cli(redis_url: str, free_tcp_port: int, tmp_path: Path): 65 | file = tmp_path / "web.py" 66 | file.write_text( 67 | f"""from uuid import uuid4 68 | from streaq import Worker 69 | worker = Worker(redis_url="{redis_url}", queue_name=uuid4().hex)""" 70 | ) 71 | p = subprocess.Popen( 72 | [ 73 | sys.executable, 74 | "-m", 75 | "streaq", 76 | "web:worker", 77 | "--web", 78 | "--port", 79 | str(free_tcp_port), 80 | ], 81 | cwd=str(tmp_path), 82 | stdout=subprocess.PIPE, 83 | stderr=subprocess.PIPE, 84 | text=True, 85 | ) 86 | 87 | async with AsyncClient() as client: 88 | for _ in range(5): 89 | try: 90 | res = await client.get(f"http://localhost:{free_tcp_port}/") 91 | assert res.status_code == 303 92 | break 93 | except ConnectError: 94 | await sleep(1) 95 | else: 96 | pytest.fail("Web CLI never listened on port!") 97 | p.terminate() 98 | out, err = p.communicate(timeout=3) 99 | text = (out + err).lower() 100 | assert "uvicorn" in text 101 | 102 | 103 | async def test_watch_subprocess(redis_url: str, tmp_path: Path): 104 | file = tmp_path / "watch.py" 105 | file.write_text( 106 | f"""from uuid import uuid4 107 | from streaq import Worker 108 | worker = Worker(redis_url="{redis_url}", queue_name=uuid4().hex)""" 109 | ) 110 | p = subprocess.Popen( 111 | [sys.executable, "-m", "streaq", "watch:worker", "--reload"], 112 | cwd=str(tmp_path), 113 | stdout=subprocess.PIPE, 114 | stderr=subprocess.PIPE, 115 | text=True, 116 | ) 117 | 118 | await sleep(2) 119 | with open(file, "a") as f: # make change 120 | f.write(" # change from test") 121 | await sleep(1) 122 | 123 | p.terminate() 124 | out, err = p.communicate(timeout=3) 125 | text = (out + err).lower() 126 | assert "reloading" in text 127 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Docs](https://readthedocs.org/projects/streaq/badge/?version=latest)](https://streaq.readthedocs.io/en/latest/?badge=latest) 2 | [![PyPI](https://img.shields.io/pypi/v/streaq)](https://pypi.org/project/streaq) 3 | [![Downloads](https://static.pepy.tech/badge/streaq)](https://pepy.tech/project/streaq) 4 | [![Release](https://img.shields.io/github/v/release/tastyware/streaq?label=release%20notes)](https://github.com/tastyware/streaq/releases) 5 | ![Coverage](https://raw.githubusercontent.com/tastyware/streaq/master/coverage.svg) 6 | [![Human](https://img.shields.io/badge/human-coded-green?logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHdpZHRoPSIyNCIgaGVpZ2h0PSIyNCIgdmlld0JveD0iMCAwIDI0IDI0IiBmaWxsPSJub25lIiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIiBjbGFzcz0ibHVjaWRlIGx1Y2lkZS1wZXJzb24tc3RhbmRpbmctaWNvbiBsdWNpZGUtcGVyc29uLXN0YW5kaW5nIj48Y2lyY2xlIGN4PSIxMiIgY3k9IjUiIHI9IjEiLz48cGF0aCBkPSJtOSAyMCAzLTYgMyA2Ii8+PHBhdGggZD0ibTYgOCA2IDIgNi0yIi8+PHBhdGggZD0iTTEyIDEwdjQiLz48L3N2Zz4=)](#) 7 | 8 | # streaQ 9 | 10 | Fast, async, fully-typed distributed task queue via Redis streams 11 | 12 | ## Features 13 | 14 | - Up to [5x faster](https://github.com/tastyware/streaq/tree/master/benchmarks) than `arq` 15 | - Fully typed 16 | - Comprehensive documentation 17 | - Support for delayed/scheduled tasks 18 | - Cron jobs 19 | - Task middleware 20 | - Task dependency graph 21 | - Pipelining 22 | - Priority queues 23 | - Support for synchronous tasks (run in separate threads) 24 | - Redis Sentinel support for production 25 | - Built-in web UI for monitoring tasks 26 | - Built with structured concurrency on `anyio`, supports both `asyncio` and `trio` 27 | 28 | ## Installation 29 | 30 | ```console 31 | $ pip install streaq 32 | ``` 33 | 34 | ## Getting started 35 | 36 | To start, you'll need to create a `Worker` object: 37 | 38 | ```python 39 | from streaq import Worker 40 | 41 | worker = Worker(redis_url="redis://localhost:6379") 42 | ``` 43 | 44 | You can then register async tasks with the worker like this: 45 | 46 | ```python 47 | import asyncio 48 | 49 | @worker.task() 50 | async def sleeper(time: int) -> int: 51 | await asyncio.sleep(time) 52 | return time 53 | 54 | @worker.cron("* * * * mon-fri") # every minute on weekdays 55 | async def cronjob() -> None: 56 | print("Nobody respects the spammish repetition!") 57 | ``` 58 | 59 | Finally, let's initialize the worker and queue up some tasks: 60 | 61 | ```python 62 | async with worker: 63 | await sleeper.enqueue(3) 64 | # enqueue returns a task object that can be used to get results/info 65 | task = await sleeper.enqueue(1).start(delay=3) 66 | print(await task.info()) 67 | print(await task.result(timeout=5)) 68 | ``` 69 | 70 | Putting this all together gives us [example.py](https://github.com/tastyware/streaq/blob/master/example.py). Let's spin up a worker: 71 | ``` 72 | $ streaq example:worker 73 | ``` 74 | and queue up some tasks like so: 75 | ``` 76 | $ python example.py 77 | ``` 78 | 79 | Let's see what the output looks like: 80 | 81 | ``` 82 | [INFO] 2025-09-23 02:14:30: starting worker 3265311d for 2 functions 83 | [INFO] 2025-09-23 02:14:35: task sleeper □ cf0c55387a214320bd23e8987283a562 → worker 3265311d 84 | [INFO] 2025-09-23 02:14:38: task sleeper ■ cf0c55387a214320bd23e8987283a562 ← 3 85 | [INFO] 2025-09-23 02:14:40: task sleeper □ 1de3f192ee4a40d4884ebf303874681c → worker 3265311d 86 | [INFO] 2025-09-23 02:14:41: task sleeper ■ 1de3f192ee4a40d4884ebf303874681c ← 1 87 | [INFO] 2025-09-23 02:15:00: task cronjob □ 2a4b864e5ecd4fc99979a92f5db3a6e0 → worker 3265311d 88 | Nobody respects the spammish repetition! 89 | [INFO] 2025-09-23 02:15:00: task cronjob ■ 2a4b864e5ecd4fc99979a92f5db3a6e0 ← None 90 | ``` 91 | ```python 92 | TaskInfo(fn_name='sleeper', enqueue_time=1751508876961, tries=0, scheduled=datetime.datetime(2025, 7, 3, 2, 14, 39, 961000, tzinfo=datetime.timezone.utc), dependencies=set(), dependents=set()) 93 | TaskResult(fn_name='sleeper', enqueue_time=1751508876961, success=True, result=1, start_time=1751508880500, finish_time=1751508881503, tries=1, worker_id='ca5bd9eb') 94 | ``` 95 | 96 | For more examples, check out the [documentation](https://streaq.readthedocs.io/en/latest/). 97 | -------------------------------------------------------------------------------- /docs/integrations.rst: -------------------------------------------------------------------------------- 1 | Framework integrations 2 | ====================== 3 | 4 | FastAPI 5 | ------- 6 | 7 | Integration with FastAPI is straightforward: 8 | 9 | .. code-block:: python 10 | 11 | from fastapi import FastAPI, HTTPException, status 12 | 13 | from example import fetch 14 | 15 | @asynccontextmanager 16 | async def app_lifespan(app: FastAPI) -> AsyncGenerator[None]: 17 | async with worker: 18 | yield 19 | 20 | app = FastAPI(lifespan=app_lifespan) 21 | 22 | @app.post("/fetch") 23 | async def do_fetch(url: str) -> int: 24 | task = await fetch.enqueue(url) 25 | try: 26 | res = await task.result(5) 27 | except TimeoutError as e: 28 | raise HTTPException( 29 | status_code=status.HTTP_408_REQUEST_TIMEOUT, detail="Timed out!" 30 | ) 31 | if not res.success: 32 | raise HTTPException( 33 | status_code=status.HTTP_424_FAILED_DEPENDENCY, detail="Task failed!" 34 | ) 35 | return res.result 36 | 37 | Here, we're building off of the ``fetch`` task defined in :doc:`Getting started `. As you can imagine, integrating with other frameworks should be very similar! 38 | 39 | Separating enqueuing from task definitions 40 | ------------------------------------------ 41 | 42 | A common scenario is to have separate codebases for the backend and the worker. For example, if your worker is serving a large LLM, you probably don't want to load the LLM in the backend. There are two ways to handle this: 43 | 44 | First, you can simply use type stubs to re-define the task signatures in the backend: 45 | 46 | .. code-block:: python 47 | 48 | from streaq import Worker 49 | 50 | # this worker should have the same Redis URL, serializer/deserializer, signing key, 51 | # and queue name as the worker defined elsewhere 52 | worker = Worker(redis_url="redis://localhost:6379") 53 | 54 | @worker.task() 55 | async def fetch(url: str) -> int: ... 56 | 57 | Now, tasks can be enqueued in the same way as before: 58 | 59 | .. code-block:: python 60 | 61 | async with worker: 62 | await fetch.enqueue("https://github.com/tastyware/streaq") 63 | 64 | .. warning:: 65 | 66 | ``fetch.run()`` will not work here, since ``run()`` skips enqueuing entirely! 67 | 68 | The second way is to use ``Worker.enqueue_unsafe``: 69 | 70 | .. code-block:: python 71 | 72 | from streaq import Worker 73 | 74 | # again, this worker should have the same Redis URL, serializer/deserializer, 75 | # signing key, and queue name as the worker defined elsewhere 76 | worker = Worker(redis_url="redis://localhost:6379") 77 | 78 | async with worker: 79 | await worker.enqueue_unsafe("fetch", "https://tastyware.dev") 80 | 81 | This method is not type-safe, but it doesn't require you to re-define the task signature in the backend. Here, the first parameter is the ``fn_name`` of the task defined elsewhere, and the rest of the args and kwargs can be passed normally. 82 | 83 | Web UI integration 84 | ------------------ 85 | 86 | The web UI is useful for monitoring tasks; however, the information available there (and the ability to cancel tasks) is probably not something you want to make available to all your users. 87 | 88 | With a little work the UI can be mounted as a part of an existing FastAPI application. You just need to override the ``get_worker()`` dependency, then your can integrate the UI into your existing app: 89 | 90 | .. code-block:: python 91 | 92 | from streaq.ui import get_worker, router 93 | 94 | app = FastAPI(lifespan=app_lifespan) # see above, we need the worker to be initialized 95 | app.dependency_overrides[get_worker] = lambda: worker 96 | # here, you can add any auth-related dependencies as well 97 | app.include_router(router, prefix="/streaq", dependencies=[...]) 98 | 99 | If desired, you can add custom formatters in a similar way: 100 | 101 | .. code-block:: python 102 | 103 | from streaq.ui import get_result_formatter, get_exception_formatter 104 | 105 | def my_result_formatter(result: Any) -> str: ... 106 | def my_exception_formatter(exc: BaseException) -> str: ... 107 | 108 | app.dependency_overrides[get_result_formatter] = lambda: my_result_formatter 109 | app.dependency_overrides[get_exception_formatter] = lambda: my_exception_formatter 110 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | redis-master: 3 | image: redis:latest 4 | container_name: redis-master 5 | hostname: redis-master 6 | volumes: 7 | - ./data/master:/data 8 | command: 9 | [ 10 | "redis-server", 11 | "--appendonly", 12 | "yes", 13 | "--repl-diskless-load", 14 | "on-empty-db", 15 | "--protected-mode", 16 | "no" 17 | ] 18 | healthcheck: 19 | test: ["CMD-SHELL", "redis-cli -p 6379 ping | grep -q PONG"] 20 | interval: 2s 21 | timeout: 3s 22 | retries: 15 23 | start_period: 5s 24 | 25 | slave-1: 26 | image: redis:latest 27 | container_name: slave-1 28 | hostname: slave-1 29 | depends_on: 30 | - redis-master 31 | volumes: 32 | - ./data/slave1:/data 33 | command: 34 | [ 35 | "redis-server", 36 | "--appendonly", 37 | "yes", 38 | "--replicaof", 39 | "redis-master", 40 | "6379", 41 | "--repl-diskless-load", 42 | "on-empty-db", 43 | "--protected-mode", 44 | "no" 45 | ] 46 | healthcheck: 47 | test: ["CMD-SHELL", "redis-cli -p 6379 --raw INFO replication | grep -q '^role:slave' && redis-cli -p 6379 --raw INFO replication | grep -q 'master_link_status:up'"] 48 | interval: 3s 49 | timeout: 4s 50 | retries: 15 51 | start_period: 15s 52 | 53 | slave-2: 54 | image: redis:latest 55 | container_name: slave-2 56 | hostname: slave-2 57 | depends_on: 58 | - redis-master 59 | volumes: 60 | - ./data/slave2:/data 61 | command: 62 | [ 63 | "redis-server", 64 | "--appendonly", 65 | "yes", 66 | "--replicaof", 67 | "redis-master", 68 | "6379", 69 | "--repl-diskless-load", 70 | "on-empty-db", 71 | "--protected-mode", 72 | "no" 73 | ] 74 | healthcheck: 75 | test: ["CMD-SHELL", "redis-cli -p 6379 --raw INFO replication | grep -q '^role:slave' && redis-cli -p 6379 --raw INFO replication | grep -q 'master_link_status:up'"] 76 | interval: 3s 77 | timeout: 4s 78 | retries: 15 79 | start_period: 15s 80 | 81 | sentinel-1: 82 | image: redis:latest 83 | container_name: sentinel-1 84 | hostname: sentinel-1 85 | depends_on: 86 | - redis-master 87 | command: > 88 | sh -c 'echo "bind 0.0.0.0" > /etc/sentinel.conf && 89 | echo "sentinel monitor mymaster redis-master 6379 2" >> /etc/sentinel.conf && 90 | echo "sentinel resolve-hostnames yes" >> /etc/sentinel.conf && 91 | echo "sentinel down-after-milliseconds mymaster 10000" >> /etc/sentinel.conf && 92 | echo "sentinel failover-timeout mymaster 10000" >> /etc/sentinel.conf && 93 | echo "sentinel parallel-syncs mymaster 1" >> /etc/sentinel.conf && 94 | redis-sentinel /etc/sentinel.conf' 95 | healthcheck: 96 | test: ["CMD-SHELL", "redis-cli -p 26379 SENTINEL ckquorum mymaster | grep -q ^OK"] 97 | interval: 3s 98 | timeout: 4s 99 | retries: 60 100 | start_period: 10s 101 | 102 | sentinel-2: 103 | image: redis:latest 104 | container_name: sentinel-2 105 | hostname: sentinel-2 106 | depends_on: 107 | - redis-master 108 | command: > 109 | sh -c 'echo "bind 0.0.0.0" > /etc/sentinel.conf && 110 | echo "sentinel monitor mymaster redis-master 6379 2" >> /etc/sentinel.conf && 111 | echo "sentinel resolve-hostnames yes" >> /etc/sentinel.conf && 112 | echo "sentinel down-after-milliseconds mymaster 10000" >> /etc/sentinel.conf && 113 | echo "sentinel failover-timeout mymaster 10000" >> /etc/sentinel.conf && 114 | echo "sentinel parallel-syncs mymaster 1" >> /etc/sentinel.conf && 115 | redis-sentinel /etc/sentinel.conf' 116 | healthcheck: 117 | test: ["CMD-SHELL", "redis-cli -p 26379 SENTINEL ckquorum mymaster | grep -q ^OK"] 118 | interval: 3s 119 | timeout: 4s 120 | retries: 60 121 | start_period: 10s 122 | 123 | sentinel-3: 124 | image: redis:latest 125 | container_name: sentinel-3 126 | hostname: sentinel-3 127 | depends_on: 128 | - redis-master 129 | command: > 130 | sh -c 'echo "bind 0.0.0.0" > /etc/sentinel.conf && 131 | echo "sentinel monitor mymaster redis-master 6379 2" >> /etc/sentinel.conf && 132 | echo "sentinel resolve-hostnames yes" >> /etc/sentinel.conf && 133 | echo "sentinel down-after-milliseconds mymaster 10000" >> /etc/sentinel.conf && 134 | echo "sentinel failover-timeout mymaster 10000" >> /etc/sentinel.conf && 135 | echo "sentinel parallel-syncs mymaster 1" >> /etc/sentinel.conf && 136 | redis-sentinel /etc/sentinel.conf' 137 | healthcheck: 138 | test: ["CMD-SHELL", "redis-cli -p 26379 SENTINEL ckquorum mymaster | grep -q ^OK"] 139 | interval: 3s 140 | timeout: 4s 141 | retries: 60 142 | start_period: 10s 143 | 144 | tests: 145 | image: ghcr.io/astral-sh/uv:debian 146 | environment: 147 | - UV_PYTHON=${PYTHON_VERSION} 148 | - UV_LINK_MODE=copy 149 | - UV_PYTHON_CACHE_DIR=/root/.cache/uv/python 150 | working_dir: /app 151 | volumes: 152 | - .:/app 153 | - /app/.venv 154 | - uv-cache:/root/.cache/uv 155 | depends_on: 156 | redis-master: 157 | condition: service_healthy 158 | sentinel-1: 159 | condition: service_healthy 160 | sentinel-2: 161 | condition: service_healthy 162 | sentinel-3: 163 | condition: service_healthy 164 | slave-1: 165 | condition: service_healthy 166 | slave-2: 167 | condition: service_healthy 168 | 169 | volumes: 170 | uv-cache: {} # named volume for uv/pip caches 171 | -------------------------------------------------------------------------------- /streaq/utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | from datetime import datetime, timedelta, tzinfo 3 | from functools import partial, wraps 4 | from importlib import import_module 5 | from logging import Formatter 6 | from typing import Any, Awaitable, Callable, TypeVar, overload 7 | 8 | from anyio import CapacityLimiter, create_task_group 9 | from anyio.to_thread import run_sync 10 | 11 | from streaq.types import P, R, TypedCoroutine 12 | 13 | 14 | class StreaqError(Exception): 15 | pass 16 | 17 | 18 | class StreaqCancelled(StreaqError): 19 | pass 20 | 21 | 22 | class TimezoneFormatter(Formatter): 23 | def __init__( 24 | self, 25 | fmt: str | None = None, 26 | datefmt: str | None = None, 27 | tz: tzinfo | None = None, 28 | **kwargs: Any, 29 | ): 30 | """ 31 | Like a normal formatter, but with a timezone. 32 | """ 33 | super().__init__(fmt, datefmt, **kwargs) 34 | self.tz = tz 35 | 36 | def converter(self, *args: Any) -> time.struct_time: 37 | return datetime.now(self.tz).timetuple() 38 | 39 | 40 | def import_string(dotted_path: str) -> Any: 41 | """ 42 | Taken from pydantic.utils. Import and return the object at a path. 43 | """ 44 | 45 | try: 46 | module_path, class_name = dotted_path.strip(" ").rsplit(":", 1) 47 | except ValueError as e: 48 | raise ImportError(f"'{dotted_path}' doesn't look like a module path") from e 49 | 50 | module = import_module(module_path) 51 | try: 52 | return getattr(module, class_name) 53 | except AttributeError as e: 54 | raise ImportError( 55 | f"Module '{module_path}' does not define a '{class_name}' attribute" 56 | ) from e 57 | 58 | 59 | def to_seconds(timeout: timedelta | int | None) -> float | None: 60 | if isinstance(timeout, timedelta): 61 | return timeout.total_seconds() 62 | return float(timeout) if timeout is not None else None 63 | 64 | 65 | def to_ms(timeout: timedelta | int | float) -> int: 66 | if isinstance(timeout, timedelta): 67 | return round(timeout.total_seconds() * 1000) 68 | return round(timeout * 1000) 69 | 70 | 71 | def now_ms() -> int: 72 | return round(time.time() * 1000) 73 | 74 | 75 | def datetime_ms(dt: datetime) -> int: 76 | return round(dt.timestamp() * 1000) 77 | 78 | 79 | def to_tuple(val: Any) -> tuple[Any, ...]: 80 | return val if isinstance(val, tuple) else (val,) # type: ignore 81 | 82 | 83 | def default_log_config(tz: tzinfo, verbose: bool) -> dict[str, Any]: 84 | """ 85 | Setup default config. for dictConfig. 86 | 87 | :param tz: timezone for logs 88 | :param verbose: level: DEBUG if True, INFO if False 89 | 90 | :return: dict suitable for ``logging.config.dictConfig`` 91 | """ 92 | log_level = "DEBUG" if verbose else "INFO" 93 | return { 94 | "version": 1, 95 | "disable_existing_loggers": False, 96 | "handlers": { 97 | "streaq.standard": { 98 | "level": log_level, 99 | "class": "logging.StreamHandler", 100 | "formatter": "streaq.standard", 101 | } 102 | }, 103 | "formatters": { 104 | "streaq.standard": { 105 | "()": TimezoneFormatter, 106 | "format": "[%(levelname)s] %(asctime)s: %(message)s", 107 | "datefmt": "%Y-%m-%d %H:%M:%S", 108 | "tz": tz, 109 | } 110 | }, 111 | "loggers": {"streaq": {"handlers": ["streaq.standard"], "level": log_level}}, 112 | } 113 | 114 | 115 | def asyncify( 116 | fn: Callable[P, R], limiter: CapacityLimiter 117 | ) -> Callable[P, TypedCoroutine[R]]: 118 | """ 119 | Taken from asyncer v0.0.8 120 | 121 | Take a blocking function and create an async one that receives the same 122 | positional and keyword arguments, and that when called, calls the original 123 | function in a worker thread using `anyio.to_thread.run_sync()`. 124 | 125 | If the task waiting for its completion is cancelled, the thread will still 126 | run its course but its result will be ignored. 127 | 128 | Example usage:: 129 | 130 | def do_work(arg1, arg2, kwarg1="", kwarg2="") -> str: 131 | return "stuff" 132 | 133 | result = await to_thread.asyncify(do_work)( 134 | "spam", 135 | "ham", 136 | kwarg1="a", 137 | kwarg2="b" 138 | ) 139 | print(result) 140 | 141 | :param fn: a blocking regular callable (e.g. a function) 142 | :param limiter: a CapacityLimiter instance to limit the number of concurrent 143 | threads running the blocking function. 144 | 145 | :return: 146 | An async function that takes the same positional and keyword arguments as the 147 | original one, that when called runs the same original function in a thread 148 | worker and returns the result. 149 | """ 150 | 151 | @wraps(fn) 152 | async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R: 153 | call = partial(fn, *args, **kwargs) 154 | return await run_sync(call, abandon_on_cancel=True, limiter=limiter) 155 | 156 | return wrapper 157 | 158 | 159 | T1 = TypeVar("T1") 160 | T2 = TypeVar("T2") 161 | T3 = TypeVar("T3") 162 | T4 = TypeVar("T4") 163 | T5 = TypeVar("T5") 164 | 165 | 166 | @overload 167 | async def gather( 168 | awaitable1: Awaitable[T1], awaitable2: Awaitable[T2], / 169 | ) -> tuple[T1, T2]: ... 170 | 171 | 172 | @overload 173 | async def gather( 174 | awaitable1: Awaitable[T1], awaitable2: Awaitable[T2], awaitable3: Awaitable[T3], / 175 | ) -> tuple[T1, T2, T3]: ... 176 | 177 | 178 | @overload 179 | async def gather( 180 | awaitable1: Awaitable[T1], 181 | awaitable2: Awaitable[T2], 182 | awaitable3: Awaitable[T3], 183 | awaitable4: Awaitable[T4], 184 | /, 185 | ) -> tuple[T1, T2, T3, T4]: ... 186 | 187 | 188 | @overload 189 | async def gather( 190 | awaitable1: Awaitable[T1], 191 | awaitable2: Awaitable[T2], 192 | awaitable3: Awaitable[T3], 193 | awaitable4: Awaitable[T4], 194 | awaitable5: Awaitable[T5], 195 | /, 196 | ) -> tuple[T1, T2, T3, T4, T5]: ... 197 | 198 | 199 | @overload 200 | async def gather(*awaitables: Awaitable[T1]) -> tuple[T1, ...]: ... 201 | 202 | 203 | async def gather(*awaitables: Awaitable[Any]) -> tuple[Any, ...]: 204 | if not awaitables: 205 | return () 206 | if len(awaitables) == 1: # optimize for this case 207 | return (await awaitables[0],) 208 | results: list[Any] = [None] * len(awaitables) 209 | 210 | async def runner(awaitable: Awaitable[Any], i: int) -> None: 211 | results[i] = await awaitable 212 | 213 | async with create_task_group() as tg: 214 | for i, awaitable in enumerate(awaitables): 215 | tg.start_soon(runner, awaitable, i) 216 | return tuple(results) 217 | -------------------------------------------------------------------------------- /streaq/lua/streaq.lua: -------------------------------------------------------------------------------- 1 | #!lua name=streaq 2 | 3 | redis.register_function('create_groups', function(keys, argv) 4 | local stream_key = keys[1] 5 | local group_name = keys[2] 6 | 7 | for i=1, #argv do 8 | local stream = stream_key .. argv[i] 9 | -- create group if it doesn't exist 10 | local ok, groups = pcall(redis.call, 'xinfo', 'groups', stream) 11 | if not ok or #groups == 0 then 12 | redis.call('xgroup', 'create', stream, group_name, '0', 'mkstream') 13 | end 14 | end 15 | end) 16 | 17 | redis.register_function('fail_dependents', function(keys, argv) 18 | local dependents_key = keys[1] 19 | local dependencies_key = keys[2] 20 | local task_id = keys[3] 21 | 22 | local visited = {} 23 | local failed = {} 24 | local stack = { task_id } 25 | 26 | -- iterative DFS to traverse DAG 27 | while #stack > 0 do 28 | -- pop off last element 29 | local tid = stack[#stack] 30 | stack[#stack] = nil 31 | if not visited[tid] then 32 | visited[tid] = true 33 | -- push dependents onto the stack 34 | local deps = redis.call('smembers', dependents_key .. tid) 35 | for _, dep_id in ipairs(deps) do 36 | stack[#stack + 1] = dep_id 37 | redis.call('srem', dependencies_key .. dep_id, tid) 38 | end 39 | -- remove dependents set 40 | redis.call('del', dependents_key .. tid) 41 | -- add to failed list 42 | if tid ~= task_id then 43 | failed[#failed + 1] = tid 44 | end 45 | end 46 | end 47 | 48 | return failed 49 | end) 50 | 51 | redis.register_function('publish_delayed_tasks', function(keys, argv) 52 | local queue_key = keys[1] 53 | local stream_key = keys[2] 54 | 55 | local current_time = argv[1] 56 | 57 | for i=2, #argv do 58 | local priority = argv[i] 59 | local queue = queue_key .. priority 60 | -- get and delete tasks ready to run from delayed queue 61 | local tids = redis.call('zrange', queue, 0, current_time, 'byscore') 62 | if #tids > 0 then 63 | redis.call('zremrangebyscore', queue, 0, current_time) 64 | 65 | local stream = stream_key .. priority 66 | -- add ready tasks to live queue 67 | for j=1, #tids do 68 | redis.call('xadd', stream, '*', 'task_id', tids[j], 'enqueue_time', current_time) 69 | end 70 | end 71 | end 72 | end) 73 | 74 | redis.register_function('publish_task', function(keys, argv) 75 | local stream_key = keys[1] 76 | local queue_key = keys[2] 77 | local task_key = keys[3] 78 | local dependents_key = keys[4] 79 | local dependencies_key = keys[5] 80 | local results_key = keys[6] 81 | 82 | local task_id = argv[1] 83 | local task_data = argv[2] 84 | local priority = argv[3] 85 | local score = argv[4] 86 | local expire = argv[5] 87 | local current_time = argv[6] 88 | 89 | local args 90 | if expire ~= '0' then 91 | args = {'set', task_key, task_data, 'nx', 'px', expire} 92 | else 93 | args = {'set', task_key, task_data, 'nx'} 94 | end 95 | 96 | if not redis.call(unpack(args)) then return 0 end 97 | 98 | local modified = 0 99 | -- additional args are dependencies for task 100 | for i=7, #argv do 101 | local dep_id = argv[i] 102 | -- update dependency DAG if dependency exists 103 | if redis.call('exists', results_key .. dep_id) ~= 1 then 104 | modified = modified + 1 105 | redis.call('sadd', dependencies_key .. task_id, dep_id) 106 | redis.call('sadd', dependents_key .. dep_id, task_id) 107 | end 108 | end 109 | 110 | -- if there are dependencies don't queue yet 111 | if modified == 0 then 112 | -- delayed queue 113 | if score ~= '0' then 114 | redis.call('zadd', queue_key .. priority, score, task_id) 115 | -- live queue 116 | else 117 | return redis.call('xadd', stream_key .. priority, '*', 'task_id', task_id, 'enqueue_time', current_time) 118 | end 119 | end 120 | 121 | return 1 122 | end) 123 | 124 | redis.register_function('read_streams', function(keys, argv) 125 | local stream_key = keys[1] 126 | local group_name = keys[2] 127 | local consumer_name = keys[3] 128 | 129 | local count = tonumber(argv[1]) 130 | local idle = argv[2] 131 | 132 | local entries = {} 133 | 134 | -- additional arguments are the names of custom priorities 135 | for i = 3, #argv do 136 | local stream = stream_key .. argv[i] 137 | local entry_table = {} 138 | -- first, check for idle messages to reclaim 139 | local reclaimed = redis.call('xautoclaim', stream, group_name, consumer_name, idle, '0-0', 'count', count)[2] 140 | -- output format should match XREADGROUP 141 | if #reclaimed > 0 then 142 | for j=1, #reclaimed do entry_table[j] = reclaimed[j] end 143 | count = count - #reclaimed 144 | end 145 | -- next, check for new messages 146 | if count > 0 then 147 | local res = redis.call('xreadgroup', 'group', group_name, consumer_name, 'count', count, 'streams', stream, '>') 148 | local read = res and res[1][2] 149 | if read then 150 | -- this is the table we just created 151 | local len = #entry_table 152 | for j = 1, #read do entry_table[len + j] = read[j] end 153 | count = count - #read 154 | end 155 | end 156 | 157 | if #entry_table > 0 then 158 | table.insert(entries, {stream, entry_table}) 159 | end 160 | if count <= 0 then break end 161 | end 162 | 163 | return entries 164 | end) 165 | 166 | redis.register_function('update_dependents', function(keys, argv) 167 | local dependents_key = keys[1] 168 | local dependencies_key = keys[2] 169 | local task_id = keys[3] 170 | 171 | local runnable = {} 172 | 173 | local deps = redis.call('smembers', dependents_key .. task_id) 174 | for i = 1, #deps do 175 | local dep = deps[i] 176 | redis.call('srem', dependencies_key .. dep, task_id) 177 | -- if no more dependencies are left, it's time to enqueue! 178 | if redis.call('scard', dependencies_key .. dep) == 0 then 179 | table.insert(runnable, dep) 180 | end 181 | end 182 | 183 | redis.call('del', dependents_key .. task_id, dependencies_key .. task_id) 184 | 185 | return runnable 186 | end) 187 | 188 | redis.register_function('refresh_timeout', function(keys, argv) 189 | local stream_key = keys[1] 190 | local group_name = keys[2] 191 | 192 | local consumer = argv[1] 193 | local message_id = argv[2] 194 | 195 | if #redis.call('xpending', stream_key, group_name, message_id, message_id, 1, consumer) > 0 then 196 | redis.call('xclaim', stream_key, group_name, consumer, 0, message_id, 'justid') 197 | return true 198 | end 199 | return false 200 | end) 201 | 202 | redis.register_function('schedule_cron_job', function(keys, argv) 203 | local cron_key = keys[1] 204 | local queue_key = keys[2] 205 | local data_key = keys[3] 206 | local task_key = keys[4] 207 | 208 | local task_id = argv[1] 209 | local score = argv[2] 210 | local member = argv[3] 211 | 212 | -- check if another worker already handled this 213 | if redis.call('zadd', cron_key, 'gt', 'ch', score, member) ~= 0 then 214 | redis.call('zadd', queue_key, score, task_id) 215 | redis.call('copy', data_key, task_key) 216 | end 217 | end) 218 | -------------------------------------------------------------------------------- /docs/worker.rst: -------------------------------------------------------------------------------- 1 | Workers 2 | ======= 3 | 4 | Worker lifespan 5 | --------------- 6 | 7 | Workers accept a ``lifespan`` parameter, which allows you to define task dependencies in a type-safe way, as well as run code at startup/shutdown if desired. 8 | 9 | First, define the dependencies in a custom class: 10 | 11 | .. code-block:: python 12 | 13 | from dataclasses import dataclass 14 | from httpx import AsyncClient 15 | 16 | @dataclass 17 | class WorkerContext: 18 | """ 19 | Type safe way of defining the dependencies of your tasks. 20 | e.g. HTTP client, database connection, settings. 21 | """ 22 | http_client: AsyncClient 23 | 24 | Now, tasks will be able to access the ``http_client`` in order to use API endpoints. 25 | 26 | Next, create an async context manager to run at worker creation/teardown. Use this to set up and tear down your dependencies, as well as run extra code if needed. 27 | 28 | .. code-block:: python 29 | 30 | from contextlib import asynccontextmanager 31 | from typing import AsyncGenerator 32 | from streaq import Worker 33 | 34 | @asynccontextmanager 35 | async def lifespan() -> AsyncGenerator[WorkerContext]: 36 | # here we run code if desired after worker start up 37 | # yield our dependencies as an instance of the class 38 | async with AsyncClient() as http_client: 39 | yield WorkerContext(http_client) 40 | # here we run code if desired before worker shutdown 41 | 42 | Now, tasks created for the worker will have access to the dependencies like so: 43 | 44 | .. code-block:: python 45 | 46 | worker = Worker(lifespan=lifespan) 47 | 48 | @worker.task() 49 | async def fetch(url: str) -> int: 50 | res = await worker.context.http_client.get(url) 51 | return len(res.text) 52 | 53 | Custom serializer/deserializer 54 | ------------------------------ 55 | 56 | If desired, you can use a custom serializing scheme for speed or security reasons: 57 | 58 | .. code-block:: python 59 | 60 | import json 61 | 62 | worker = Worker(serializer=json.dumps, deserializer=json.loads) 63 | 64 | Signature validation before deserialization 65 | ------------------------------------------- 66 | 67 | Pickle is great for serializing/deserializing Python objects. However, it presents security risks when we're using Redis, as an attacker who gains access to the Redis database would be able to run arbitrary code. You can protect against this attack vector by passing a ``signing_secret`` to the worker. The signing key ensures corrupted data from Redis will not be unpickled. 68 | 69 | .. code-block:: python 70 | 71 | worker = Worker(signing_secret="MY-SECRET-KEY") 72 | 73 | The easiest way to generate a new key is with the ``secrets`` module: 74 | 75 | .. code-block:: python 76 | 77 | import secrets 78 | print(secrets.token_urlsafe(32)) 79 | 80 | Other configuration options 81 | --------------------------- 82 | 83 | ``Worker`` accepts a variety of other configuration options: 84 | 85 | - ``redis_url``: the URI for connecting to your Redis instance 86 | - ``redis_kwargs``: additional arguments for Redis connections 87 | - ``concurrency``: the maximum number of tasks the worker can run concurrently 88 | - ``sync_concurrency``: the maximum number of tasks the worker can run simultaneously in separate threads; defaults to the same as ``concurrency`` 89 | - ``queue_name``: name of the queue in Redis, can be used to create multiple queues at once 90 | - ``priorities``: a list of custom priorities for tasks, ordered from lowest to highest 91 | - ``prefetch``: the number of tasks to pre-fetch from Redis, defaults to ``concurrency``. You can set this to ``0`` to disable prefetching entirely. 92 | - ``tz``: ``tzinfo`` controlling the time zone for the worker's cron scheduler and logs 93 | - ``handle_signals``: whether to handle signals for graceful shutdown (unavailable on Windows) 94 | - ``health_crontab``: crontab for frequency to store worker health in Redis 95 | - ``idle_timeout``: the amount of time to wait before re-enqueuing idle tasks (either prefetched tasks that don't run, or running tasks that become unresponsive) 96 | - ``anyio_backend``: either trio or asyncio, defaults to asyncio 97 | - ``anyio_kwargs``: extra arguments for anyio, see documentation `here `_ 98 | - ``sentinel_kwargs``: extra arguments to pass to sentinel connections (see below) 99 | - ``id``: a custom worker ID. You must ensure that it is unique for the specified queue name. 100 | 101 | Deploying with Redis Sentinel 102 | ----------------------------- 103 | 104 | In production environments, high availability guarantees are often needed, which is why Redis Sentinel was created. streaQ allows you to use Redis Sentinel easily: 105 | 106 | .. code-block:: python 107 | 108 | worker = Worker( 109 | redis_sentinel_master="mymaster", 110 | redis_sentinel_nodes=[ 111 | ("sentinel-1", 26379), 112 | ("sentinel-2", 26379), 113 | ("sentinel-3", 26379), 114 | ], 115 | ) 116 | 117 | If you pass in the ``redis_sentinel_nodes`` parameter, you no longer need to pass ``redis_url``. For a simple Docker Compose script to get a cluster running, see `here `_. 118 | 119 | Redis Cluster is not supported, since streaQ makes heavy use of Redis pipelines and Lua scripting, which are difficult to support on Redis Cluster. For scaling beyond a single Redis instance, it's recommended to use a separate queue for each instance and assign workers to each queue. 120 | 121 | Modularizing workers 122 | -------------------- 123 | 124 | Sometimes in large apps, having a single ``Worker`` instance is not feasible (or at the very least, cumbersome). streaQ solves this problem by allowing you to "combine" workers together, which copies all tasks and cron jobs from one worker to another: 125 | 126 | .. code-block:: python 127 | :caption: other.py 128 | 129 | from streaq import Worker 130 | 131 | other = Worker() 132 | 133 | @other.task() 134 | async def foobar() -> bool: ... 135 | 136 | .. code-block:: python 137 | :caption: main.py 138 | 139 | from anyio import run 140 | 141 | from other import foobar, other 142 | from streaq import Worker 143 | 144 | worker = Worker() 145 | worker.include(other) 146 | 147 | @worker.task() 148 | async def barfoo() -> bool: ... 149 | 150 | async def main(): 151 | async with worker: 152 | await foobar.enqueue() 153 | await barfoo.enqueue() 154 | 155 | if __name__ == "__main__": 156 | run(main) 157 | 158 | This allows for grouping tasks in whatever way you choose. You can run just ``other`` like this: 159 | 160 | .. code-block:: bash 161 | 162 | $ streaq other:other 163 | 164 | Or the main worker which will be able to run both ``foobar`` and ``barfoo``: 165 | 166 | .. code-block:: bash 167 | 168 | $ streaq main:worker 169 | 170 | Task-related functions 171 | ---------------------- 172 | 173 | Sometimes you'll want to abort tasks, fetch task info, etc. without having access to the original task object. This can be done easily: 174 | 175 | .. code-block:: python 176 | 177 | async with worker: 178 | print(await worker.status_by_id(my_task_id)) 179 | print(await worker.result_by_id(my_task_id)) 180 | print(await worker.info_by_id(my_task_id)) 181 | print(await worker.abort_by_id(my_task_id)) 182 | await worker.unschedule_by_id(my_task_id) 183 | -------------------------------------------------------------------------------- /streaq/ui/templates/queue.j2: -------------------------------------------------------------------------------- 1 | {% extends "base.j2" %} 2 | 3 | {% block content %} 4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
SCHEDULED
12 |
13 | 14 | 15 | 16 | 17 |
18 |
19 |
20 |
{{ scheduled }}
21 |

tasks deferred

22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
QUEUED
31 |
32 | 33 | 34 | 35 | 36 |
37 |
38 |
39 |
{{ queued }}
40 |

tasks enqueued

41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
RUNNING
50 |
51 | 52 | 53 | 54 | 55 |
56 |
57 |
58 |
{{ running }}
59 |

tasks in execution

60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
DONE
69 |
70 | 71 | 72 | 73 | 74 |
75 |
76 |
77 |
{{ finished }}
78 |

tasks finished

79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |

streaQ

92 |
93 |
94 |
95 | 99 |
100 |
101 |
102 |
103 | 104 | 109 |
110 |
111 | 112 | 118 |
119 |
120 |
121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | {% for task in tasks %} 132 | 133 | 134 | 135 | 136 | 137 | 138 | {% endfor %} 139 | 140 |
Updated timeFunction nameStatusTask ID
{{ task.enqueue_time }}{{ task.fn_name }}{{ task.status.value }}{{ task.task_id }}
141 |
142 |
143 |
144 |
145 |
146 | {% endblock %} 147 | -------------------------------------------------------------------------------- /streaq/ui/tasks.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from typing import Annotated, Any, Callable 3 | 4 | from fastapi import ( 5 | APIRouter, 6 | Depends, 7 | Form, 8 | HTTPException, 9 | Request, 10 | Response, 11 | ) 12 | from fastapi import ( 13 | status as fast_status, 14 | ) 15 | from fastapi.responses import HTMLResponse, RedirectResponse 16 | from pydantic import BaseModel 17 | 18 | from streaq import TaskStatus, Worker 19 | from streaq.constants import REDIS_RESULT, REDIS_RUNNING, REDIS_TASK 20 | from streaq.ui.deps import ( 21 | get_exception_formatter, 22 | get_result_formatter, 23 | get_worker, 24 | templates, 25 | ) 26 | from streaq.utils import gather 27 | 28 | router = APIRouter() 29 | _fmt = "%Y-%m-%d %H:%M:%S" 30 | 31 | 32 | class TaskData(BaseModel): 33 | color: str 34 | text_color: str 35 | enqueue_time: str 36 | task_id: str 37 | status: TaskStatus 38 | fn_name: str 39 | sort_time: datetime 40 | url: str 41 | 42 | 43 | async def _get_context( 44 | worker: Worker[Any], task_url: str, descending: bool 45 | ) -> dict[str, Any]: 46 | async with worker.redis.pipeline(transaction=False) as pipe: 47 | delayed = [ 48 | pipe.zrange(worker.queue_key + priority, 0, -1) 49 | for priority in worker.priorities 50 | ] 51 | commands = ( 52 | pipe.xread( 53 | {worker.stream_key + p: "0-0" for p in worker.priorities}, 54 | count=1000, 55 | ), 56 | pipe.keys(worker.prefix + REDIS_RESULT + "*"), 57 | pipe.keys(worker.prefix + REDIS_RUNNING + "*"), 58 | pipe.keys(worker.prefix + REDIS_TASK + "*"), 59 | ) 60 | _stream, _results, _running, _data = await gather(*commands) 61 | stream: set[str] = ( 62 | set(t.field_values["task_id"] for v in _stream.values() for t in v) # type: ignore 63 | if _stream 64 | else set() 65 | ) 66 | queue: set[str] = set() 67 | for r in await gather(*delayed): 68 | queue |= set(r) 69 | results = set(r.split(":")[-1] for r in _results) 70 | running = set(r.split(":")[-1] for r in _running) 71 | tasks: list[TaskData] = [] 72 | to_fetch: list[str] = list(_data | _results) 73 | serialized = await worker.redis.mget(to_fetch) if to_fetch else () # type: ignore 74 | for i, entry in enumerate(serialized): 75 | td = worker.deserialize(entry) 76 | task_id = to_fetch[i].split(":")[-1] 77 | if task_id in results: 78 | status = TaskStatus.DONE 79 | color = "success" 80 | text_color = "light" 81 | elif task_id in running: 82 | status = TaskStatus.RUNNING 83 | color = "warning" 84 | text_color = "dark" 85 | elif task_id in queue: 86 | status = TaskStatus.SCHEDULED 87 | color = "secondary" 88 | text_color = "light" 89 | else: 90 | status = TaskStatus.QUEUED 91 | color = "info" 92 | text_color = "dark" 93 | ts = td.get("ft") or td.get("t") or 0 94 | dt = datetime.fromtimestamp(ts / 1000, tz=worker.tz) 95 | tasks.append( 96 | TaskData( 97 | color=color, 98 | text_color=text_color, 99 | enqueue_time=dt.strftime(_fmt), 100 | status=status, 101 | task_id=task_id, 102 | fn_name=td["f"], 103 | sort_time=dt, 104 | url=task_url.format(task_id=task_id), 105 | ) 106 | ) 107 | tasks.sort(key=lambda td: td.sort_time, reverse=descending) 108 | return { 109 | "running": len(running), 110 | "queued": len(stream) - len(running), 111 | "scheduled": len(queue), 112 | "finished": len(results), 113 | "functions": list(worker.registry.keys()), 114 | "tasks": tasks, 115 | "title": worker.queue_name, 116 | } 117 | 118 | 119 | async def get_context( 120 | request: Request, 121 | worker: Worker[Any], 122 | functions: list[str] | None = None, 123 | statuses: list[TaskStatus] | None = None, 124 | sort: str = "desc", 125 | ) -> dict[str, Any]: 126 | task_url = request.url_for("get_task", task_id="{task_id}").path 127 | tasks_filter_url = request.url_for("filter_tasks").path 128 | 129 | descending = sort == "desc" 130 | context = await _get_context(worker, task_url, descending) 131 | context["tasks_filter_url"] = tasks_filter_url 132 | 133 | if functions: 134 | context["tasks"] = [t for t in context["tasks"] if t.fn_name in functions] 135 | if statuses: 136 | context["tasks"] = [t for t in context["tasks"] if t.status in statuses] 137 | 138 | context["tasks"] = context["tasks"][:100] 139 | return context 140 | 141 | 142 | @router.get("/") 143 | async def get_root(request: Request) -> RedirectResponse: 144 | url = request.url_for("get_tasks").path 145 | return RedirectResponse(url, status_code=fast_status.HTTP_303_SEE_OTHER) 146 | 147 | 148 | @router.get("/queue", response_class=HTMLResponse) 149 | async def get_tasks( 150 | request: Request, 151 | worker: Annotated[Worker[Any], Depends(get_worker)], 152 | ) -> Any: 153 | context = await get_context(request, worker) 154 | return templates.TemplateResponse(request, "queue.j2", context=context) 155 | 156 | 157 | @router.patch("/queue", response_class=HTMLResponse) 158 | async def filter_tasks( 159 | request: Request, 160 | worker: Annotated[Worker[Any], Depends(get_worker)], 161 | sort: Annotated[str, Form()], 162 | functions: Annotated[list[str] | None, Form()] = None, 163 | statuses: Annotated[list[TaskStatus] | None, Form()] = None, 164 | ) -> Any: 165 | context = await get_context(request, worker, functions, statuses, sort) 166 | return templates.TemplateResponse(request, "table.j2", context=context) 167 | 168 | 169 | @router.get("/task/{task_id}", response_class=HTMLResponse) 170 | async def get_task( 171 | request: Request, 172 | worker: Annotated[Worker[Any], Depends(get_worker)], 173 | result_formatter: Annotated[Callable[[Any], str], Depends(get_result_formatter)], 174 | exception_formatter: Annotated[ 175 | Callable[[BaseException], str], Depends(get_exception_formatter) 176 | ], 177 | task_id: str, 178 | ) -> Any: 179 | status = await worker.status_by_id(task_id) 180 | if status == TaskStatus.DONE: 181 | result = await worker.result_by_id(task_id) 182 | function = result.fn_name 183 | created_time = result.created_time 184 | is_done = True 185 | start_dt = datetime.fromtimestamp(result.start_time / 1000, tz=worker.tz) 186 | end_dt = datetime.fromtimestamp(result.finish_time / 1000, tz=worker.tz) 187 | if result.success: 188 | output = result_formatter(result.result) 189 | else: 190 | output = exception_formatter(result.exception) 191 | task_try = result.tries 192 | worker_id = result.worker_id 193 | enqueue_dt = datetime.fromtimestamp(result.enqueue_time / 1000, tz=worker.tz) 194 | extra = { 195 | "enqueue_time": enqueue_dt.strftime(_fmt), 196 | "success": result.success, 197 | "result": output, 198 | "start_time": start_dt.strftime(_fmt), 199 | "finish_time": end_dt.strftime(_fmt), 200 | } 201 | else: 202 | info = await worker.info_by_id(task_id) 203 | if not info: 204 | raise HTTPException( 205 | status_code=fast_status.HTTP_404_NOT_FOUND, detail="Task not found!" 206 | ) 207 | function = info.fn_name 208 | created_time = info.created_time 209 | worker_id = None 210 | is_done = False 211 | if info.scheduled: 212 | schedule = info.scheduled.strftime(_fmt) 213 | else: 214 | schedule = None 215 | task_try = info.tries 216 | extra = { 217 | "scheduled": schedule, 218 | "dependencies": len(info.dependencies), 219 | "dependents": len(info.dependents), 220 | } 221 | if status == TaskStatus.DONE: 222 | color = "success" 223 | text_color = "light" 224 | elif status == TaskStatus.RUNNING: 225 | color = "warning" 226 | text_color = "dark" 227 | elif status == TaskStatus.SCHEDULED: 228 | color = "secondary" 229 | text_color = "light" 230 | else: 231 | color = "info" 232 | text_color = "dark" 233 | 234 | created_dt = datetime.fromtimestamp(created_time / 1000, tz=worker.tz) 235 | return templates.TemplateResponse( 236 | request, 237 | "task.j2", 238 | context={ 239 | "color": color, 240 | "function": function, 241 | "is_done": is_done, 242 | "created_time": created_dt.strftime(_fmt), 243 | "text_color": text_color, 244 | "title": "task", 245 | "status": status.value, 246 | "task_id": task_id, 247 | "task_abort_url": request.url_for("abort_task", task_id=task_id).path, 248 | "task_try": task_try, 249 | "worker_id": worker_id, 250 | **extra, 251 | }, 252 | ) 253 | 254 | 255 | @router.delete("/task/{task_id}") 256 | async def abort_task( 257 | request: Request, 258 | response: Response, 259 | worker: Annotated[Worker[Any], Depends(get_worker)], 260 | task_id: str, 261 | ) -> None: 262 | await worker.abort_by_id(task_id, timeout=3) 263 | response.headers["HX-Redirect"] = request.url_for("get_tasks").path 264 | -------------------------------------------------------------------------------- /streaq/task.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from dataclasses import dataclass, field 4 | from datetime import datetime, timedelta 5 | from enum import Enum 6 | from typing import TYPE_CHECKING, Any, Generator, Generic, Iterable 7 | from uuid import uuid4 8 | 9 | from anyio import fail_after 10 | 11 | from streaq.constants import REDIS_TASK 12 | from streaq.types import AsyncTask, C, P, POther, R, ROther 13 | from streaq.utils import StreaqError, datetime_ms, now_ms, to_ms, to_seconds 14 | 15 | if TYPE_CHECKING: # pragma: no cover 16 | from streaq.worker import Worker 17 | 18 | 19 | class StreaqRetry(StreaqError): 20 | """ 21 | An exception you can manually raise in your tasks to make sure the task 22 | is retried. 23 | 24 | :param delay: 25 | amount of time to wait before retrying the task; if None and schedule 26 | is not passed either, will be the number of tries squared, in seconds 27 | :param schedule: specific datetime to retry the task at 28 | """ 29 | 30 | def __init__( 31 | self, 32 | *args: Any, 33 | delay: timedelta | int | None = None, 34 | schedule: datetime | None = None, 35 | ): 36 | super().__init__(*args) 37 | self.delay = delay 38 | self.schedule = schedule 39 | 40 | 41 | class TaskStatus(str, Enum): 42 | """ 43 | Enum of possible task statuses: 44 | """ 45 | 46 | NOT_FOUND = "missing" 47 | QUEUED = "queued" 48 | RUNNING = "running" 49 | SCHEDULED = "scheduled" 50 | DONE = "done" 51 | 52 | 53 | @dataclass(frozen=True) 54 | class TaskInfo: 55 | """ 56 | Dataclass containing information about a running or enqueued task. 57 | """ 58 | 59 | fn_name: str 60 | created_time: int 61 | tries: int 62 | scheduled: datetime | None 63 | dependencies: set[str] 64 | dependents: set[str] 65 | 66 | 67 | @dataclass(frozen=True) 68 | class TaskResult(Generic[R]): 69 | """ 70 | Dataclass wrapping the result of a task with additional information 71 | like run time and whether execution terminated successfully. 72 | """ 73 | 74 | fn_name: str 75 | created_time: int 76 | enqueue_time: int 77 | success: bool 78 | start_time: int 79 | finish_time: int 80 | tries: int 81 | worker_id: str 82 | _result: R | BaseException 83 | 84 | @property 85 | def result(self) -> R: 86 | if not self.success: 87 | raise StreaqError( 88 | "Can't access result for a failed task, use TaskResult.exception " 89 | "instead!" 90 | ) 91 | return self._result # type: ignore 92 | 93 | @property 94 | def exception(self) -> BaseException: 95 | if self.success: 96 | raise StreaqError( 97 | "Can't access exception for a successful task, use TaskResult.result " 98 | "instead!" 99 | ) 100 | return self._result # type: ignore 101 | 102 | 103 | @dataclass 104 | class Task(Generic[R]): 105 | """ 106 | Represents a task that has been enqueued or scheduled. 107 | 108 | Awaiting the object directly will enqueue it. 109 | """ 110 | 111 | args: tuple[Any, ...] 112 | kwargs: dict[str, Any] 113 | parent: RegisteredTask[Any, Any, R] 114 | id: str = field(default_factory=lambda: uuid4().hex) 115 | _after: Task[Any] | None = None 116 | after: list[str] = field(default_factory=lambda: []) 117 | delay: timedelta | int | None = None 118 | schedule: datetime | str | None = None 119 | priority: str | None = None 120 | _triggers: Task[Any] | None = None 121 | 122 | def start( 123 | self, 124 | after: str | Iterable[str] | None = None, 125 | delay: timedelta | int | None = None, 126 | schedule: datetime | str | None = None, 127 | priority: str | None = None, 128 | ) -> Task[R]: 129 | """ 130 | Configure the task to modify schedule, queue, or dependencies. 131 | 132 | :param after: task ID(s) to wait for before running this task 133 | :param delay: duration to wait before running the task 134 | :param schedule: 135 | datetime at which to run the task, or crontab for repeated scheduling, 136 | follows the specification 137 | `here `_. 138 | :param priority: priority queue to insert the task 139 | 140 | :return: self 141 | """ 142 | # merge _after and after into a single list[str] 143 | if isinstance(after, str): 144 | self.after.append(after) 145 | elif after: 146 | self.after.extend(after) 147 | self.delay = delay 148 | self.schedule = schedule 149 | self.priority = priority 150 | if (delay and schedule) or (delay and after) or (schedule and after): 151 | raise StreaqError( 152 | "Use one of 'delay', 'schedule', or 'after' when enqueuing tasks, not " 153 | "multiple!" 154 | ) 155 | return self 156 | 157 | async def _enqueue(self) -> Task[R]: 158 | """ 159 | This is called when the task is awaited. 160 | """ 161 | if self._after: 162 | self.after.append(self._after.id) 163 | enqueue_time = now_ms() 164 | data = self.serialize(enqueue_time) 165 | self.priority = self.priority or self.parent.worker.priorities[-1] 166 | expire = to_ms(self.parent.expire or 0) 167 | if self.schedule: 168 | if isinstance(self.schedule, str): 169 | score = self.parent.worker.next_run(self.schedule) 170 | # add to cron registry 171 | async with self.parent.worker.redis.pipeline(transaction=False) as pipe: 172 | pipe.set(self.parent.worker.cron_data_key + self.id, data) 173 | pipe.hset( 174 | self.parent.worker.cron_registry_key, {self.id: self.schedule} 175 | ) 176 | pipe.zadd(self.parent.worker.cron_schedule_key, {self.id: score}) 177 | pipe.fcall( 178 | "publish_task", 179 | keys=[ 180 | self.parent.worker.stream_key, 181 | self.parent.worker.queue_key, 182 | self.task_key(REDIS_TASK), 183 | self.parent.worker.dependents_key, 184 | self.parent.worker.dependencies_key, 185 | self.parent.worker.results_key, 186 | ], 187 | args=[self.id, data, self.priority, score, expire, enqueue_time] 188 | + self.after, 189 | ) 190 | return self 191 | score = datetime_ms(self.schedule) 192 | elif self.delay is not None: 193 | score = enqueue_time + to_ms(self.delay) 194 | else: 195 | score = 0 196 | await self.parent.worker.redis.fcall( 197 | "publish_task", 198 | keys=[ 199 | self.parent.worker.stream_key, 200 | self.parent.worker.queue_key, 201 | self.task_key(REDIS_TASK), 202 | self.parent.worker.dependents_key, 203 | self.parent.worker.dependencies_key, 204 | self.parent.worker.results_key, 205 | ], 206 | args=[self.id, data, self.priority, score, expire, enqueue_time] 207 | + self.after, 208 | ) 209 | return self 210 | 211 | def then( 212 | self, task: RegisteredTask[C, POther, ROther], **kwargs: Any 213 | ) -> Task[ROther]: 214 | """ 215 | Enqueues the given task as a dependent of this one. Positional arguments will 216 | come from the previous task's output (tuple outputs will be unpacked), and any 217 | additional arguments can be passed as kwargs. 218 | 219 | :param task: task to feed output to 220 | 221 | :return: task object for newly created, dependent task 222 | """ 223 | self._triggers = Task((), kwargs, task) 224 | self._triggers._after = self 225 | return self._triggers 226 | 227 | async def _chain(self) -> Task[R]: 228 | # traverse backwards 229 | if self._after: 230 | await self._after 231 | return await self._enqueue() 232 | 233 | def __hash__(self) -> int: 234 | return hash(self.id) 235 | 236 | def __await__(self) -> Generator[Any, None, Task[R]]: 237 | return self._chain().__await__() 238 | 239 | def __or__(self, other: RegisteredTask[C, POther, ROther]) -> Task[ROther]: 240 | self._triggers = Task((), {}, other) 241 | self._triggers._after = self 242 | return self._triggers 243 | 244 | def task_key(self, mid: str) -> str: 245 | return self.parent.worker.prefix + mid + self.id 246 | 247 | def serialize(self, enqueue_time: int) -> Any: 248 | """ 249 | Serializes the task data for sending to the queue. 250 | 251 | :param enqueue_time: the time at which the task was enqueued 252 | 253 | :return: serialized task data 254 | """ 255 | try: 256 | data = { 257 | "f": self.parent.fn_name, 258 | "a": self.args, 259 | "k": self.kwargs, 260 | "t": enqueue_time, 261 | } 262 | if self._after: 263 | data["A"] = self._after.id 264 | if self._triggers: 265 | data["T"] = self._triggers.id 266 | return self.parent.worker.serialize(data) 267 | except Exception as e: 268 | raise StreaqError(f"Unable to serialize task {self.parent.fn_name}!") from e 269 | 270 | async def status(self) -> TaskStatus: 271 | """ 272 | Fetch the current status of the task. 273 | 274 | :return: current task status 275 | """ 276 | return await self.parent.worker.status_by_id(self.id) 277 | 278 | async def result(self, timeout: timedelta | int | None = None) -> TaskResult[R]: 279 | """ 280 | Wait for and return the task's result, optionally with a timeout. 281 | 282 | :param timeout: amount of time to wait before raising a `TimeoutError` 283 | 284 | :return: wrapped result object 285 | """ 286 | return await self.parent.worker.result_by_id(self.id, timeout=timeout) 287 | 288 | async def abort(self, timeout: timedelta | int = 5) -> bool: 289 | """ 290 | Notify workers that the task should be aborted. 291 | 292 | :param timeout: how long to wait to confirm abortion was successful 293 | 294 | :return: whether the task was aborted successfully 295 | """ 296 | return await self.parent.worker.abort_by_id(self.id, timeout=timeout) 297 | 298 | async def info(self) -> TaskInfo | None: 299 | """ 300 | Fetch info about a previously enqueued task. 301 | 302 | :return: task info, unless task has finished or doesn't exist 303 | """ 304 | return await self.parent.worker.info_by_id(self.id) 305 | 306 | async def unschedule(self) -> None: 307 | """ 308 | Stop scheduling the repeating task if registered. 309 | """ 310 | await self.parent.worker.unschedule_by_id(self.id) 311 | 312 | 313 | @dataclass(frozen=True) 314 | class RegisteredTask(Generic[C, P, R]): 315 | fn: AsyncTask[P, R] 316 | expire: timedelta | int | None 317 | max_tries: int | None 318 | silent: bool 319 | timeout: timedelta | int | None 320 | ttl: timedelta | int | None 321 | unique: bool 322 | fn_name: str 323 | worker: Worker[C] 324 | crontab: str | None = None 325 | 326 | def enqueue( 327 | self, 328 | *args: P.args, 329 | **kwargs: P.kwargs, 330 | ) -> Task[R]: 331 | """ 332 | Serialize the task and send it to the queue for later execution by an 333 | active worker. Though this isn't async, it should be awaited as it 334 | returns an object that should be. 335 | """ 336 | return Task(args, kwargs, self) 337 | 338 | async def run(self, *args: P.args, **kwargs: P.kwargs) -> R: 339 | """ 340 | Run the task in the local event loop with the given params and return the 341 | result. This skips enqueuing and result storing in Redis. 342 | """ 343 | with fail_after(to_seconds(self.timeout)): 344 | return await self.fn(*args, **kwargs) 345 | -------------------------------------------------------------------------------- /tests/test_worker.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import pickle 4 | import secrets 5 | import signal 6 | import subprocess 7 | import sys 8 | from contextlib import asynccontextmanager 9 | from dataclasses import dataclass 10 | from typing import Any, AsyncIterator 11 | from uuid import uuid4 12 | 13 | import pytest 14 | from anyio import create_task_group, sleep 15 | 16 | from streaq.constants import REDIS_TASK 17 | from streaq.utils import StreaqError, gather 18 | from streaq.worker import Worker 19 | 20 | NAME_STR = "Freddy" 21 | pytestmark = pytest.mark.anyio 22 | 23 | 24 | async def test_worker_redis(worker: Worker): 25 | async with worker: 26 | await worker.redis.ping() 27 | 28 | 29 | @dataclass 30 | class WorkerContext: 31 | name: str 32 | 33 | 34 | @asynccontextmanager 35 | async def deps() -> AsyncIterator[WorkerContext]: 36 | yield WorkerContext(NAME_STR) 37 | 38 | 39 | async def test_lifespan(redis_url: str): 40 | worker = Worker(redis_url=redis_url, lifespan=deps, queue_name=uuid4().hex) 41 | 42 | @worker.task() 43 | async def foobar() -> str: 44 | return worker.context.name 45 | 46 | async with create_task_group() as tg: 47 | await tg.start(worker.run_async) 48 | task = await foobar.enqueue() 49 | res = await task.result(3) 50 | assert res.success and res.result == NAME_STR 51 | tg.cancel_scope.cancel() 52 | 53 | 54 | async def test_health_check(redis_url: str): 55 | worker = Worker( 56 | redis_url=redis_url, 57 | redis_kwargs={"decode_responses": True}, 58 | health_crontab="* * * * * * *", 59 | queue_name=uuid4().hex, 60 | ) 61 | async with create_task_group() as tg: 62 | await tg.start(worker.run_async) 63 | await sleep(2) 64 | worker_health = await worker.redis.get(f"{worker._health_key}:{worker.id}") 65 | redis_health = await worker.redis.get(worker._health_key + ":redis") 66 | assert worker_health is not None 67 | assert redis_health is not None 68 | tg.cancel_scope.cancel() 69 | 70 | 71 | async def test_queue_size(worker: Worker): 72 | async with worker: 73 | assert await worker.queue_size() == 0 74 | 75 | 76 | def raise_error(*arg, **kwargs) -> Any: 77 | raise Exception("Couldn't serialize/deserialize!") 78 | 79 | 80 | async def test_bad_serializer(redis_url: str): 81 | worker = Worker(redis_url=redis_url, serializer=raise_error, queue_name=uuid4().hex) 82 | 83 | @worker.task() 84 | async def foobar() -> None: 85 | print("This can't print!") 86 | 87 | async with worker: 88 | with pytest.raises(StreaqError): 89 | await foobar.enqueue() 90 | 91 | 92 | async def test_bad_deserializer(redis_url: str): 93 | worker = Worker( 94 | redis_url=redis_url, deserializer=raise_error, queue_name=uuid4().hex 95 | ) 96 | 97 | @worker.task() 98 | async def foobar() -> None: 99 | print("This can't print!") 100 | 101 | worker.burst = True 102 | async with create_task_group() as tg: 103 | await tg.start(worker.run_async) 104 | task = await foobar.enqueue() 105 | with pytest.raises(StreaqError): 106 | await task.result(3) 107 | 108 | 109 | async def test_custom_serializer(worker: Worker): 110 | worker.serializer = json.dumps 111 | worker.deserializer = json.loads 112 | 113 | @worker.task() 114 | async def foobar() -> None: 115 | pass 116 | 117 | async with create_task_group() as tg: 118 | await tg.start(worker.run_async) 119 | task = await foobar.enqueue() 120 | assert (await task.result(3)).success 121 | tg.cancel_scope.cancel() 122 | 123 | 124 | async def test_uninitialized_worker(worker: Worker): 125 | @worker.task() 126 | async def foobar() -> None: 127 | print(worker.context) 128 | 129 | with pytest.raises(StreaqError): 130 | await foobar.run() 131 | with pytest.raises(StreaqError): 132 | await worker.redis.ping() 133 | 134 | 135 | async def test_active_tasks(worker: Worker): 136 | @worker.task() 137 | async def foo() -> None: 138 | await sleep(10) 139 | 140 | n_tasks = 5 141 | tasks = [foo.enqueue() for _ in range(n_tasks)] 142 | async with create_task_group() as tg: 143 | await tg.start(worker.run_async) 144 | await worker.enqueue_many(tasks) 145 | await sleep(3) 146 | assert len(worker) >= n_tasks 147 | tg.cancel_scope.cancel() 148 | 149 | 150 | async def test_handle_signal(worker: Worker): 151 | @worker.task() 152 | async def foo() -> None: 153 | await sleep(3) 154 | 155 | async with create_task_group() as tg: 156 | await tg.start(worker.run_async) 157 | await foo.enqueue() 158 | await sleep(1) 159 | assert len(worker) > 0 160 | os.kill(os.getpid(), signal.SIGINT) 161 | await sleep(1) 162 | assert len(worker) == 0 163 | 164 | 165 | async def test_reclaim_backed_up(redis_url: str): 166 | queue_name = uuid4().hex 167 | worker = Worker( 168 | concurrency=2, redis_url=redis_url, queue_name=queue_name, idle_timeout=1 169 | ) 170 | worker2 = Worker(redis_url=redis_url, queue_name=queue_name, idle_timeout=1) 171 | 172 | async def foo() -> None: 173 | await sleep(3) 174 | 175 | registered = worker.task()(foo) 176 | worker2.task()(foo) 177 | 178 | # enqueue tasks 179 | tasks = [registered.enqueue() for _ in range(4)] 180 | async with create_task_group() as tg: 181 | # run first worker which will pick up all tasks 182 | await tg.start(worker.run_async) 183 | await worker.enqueue_many(tasks) 184 | # run second worker which will pick up prefetched tasks 185 | await tg.start(worker2.run_async) 186 | 187 | results = await gather(*[t.result(5) for t in tasks]) 188 | assert any(r.worker_id == worker2.id for r in results) 189 | tg.cancel_scope.cancel() 190 | 191 | 192 | async def test_reclaim_idle_task(redis_url: str): 193 | worker2 = Worker(redis_url=redis_url, queue_name="reclaim", idle_timeout=3) 194 | 195 | @worker2.task(name="foo") 196 | async def foo() -> None: 197 | await sleep(2) 198 | 199 | # enqueue task 200 | task = foo.enqueue() 201 | # run separate worker which will pick up task 202 | worker = subprocess.Popen([sys.executable, "tests/failure.py", redis_url, task.id]) 203 | await sleep(1) 204 | # kill worker abruptly to disallow cleanup 205 | os.kill(worker.pid, signal.SIGKILL) 206 | worker.wait() 207 | 208 | async with create_task_group() as tg: 209 | await tg.start(worker2.run_async) 210 | assert (await task.result(8)).success 211 | tg.cancel_scope.cancel() 212 | 213 | 214 | async def test_change_cron_schedule(redis_url: str): 215 | async def foo() -> None: 216 | pass 217 | 218 | worker = Worker(redis_url=redis_url, queue_name=uuid4().hex) 219 | foo1 = worker.cron("0 0 1 1 *")(foo) 220 | async with create_task_group() as tg: 221 | await tg.start(worker.run_async) 222 | await sleep(2) 223 | assert worker.next_run(foo1.crontab) == int( # type: ignore 224 | (await worker.redis.zscore(worker.cron_schedule_key, foo1.fn_name)) or 0 225 | ) 226 | tg.cancel_scope.cancel() 227 | 228 | worker2 = Worker(redis_url=redis_url, queue_name=worker.queue_name) 229 | worker2.cron("1 0 1 1 *")(foo) # 1 minute later 230 | async with create_task_group() as tg: 231 | await tg.start(worker2.run_async) 232 | await sleep(2) 233 | assert worker.next_run(foo1.crontab) != int( # type: ignore 234 | (await worker2.redis.zscore(worker.cron_schedule_key, foo1.fn_name)) or 0 235 | ) 236 | tg.cancel_scope.cancel() 237 | 238 | 239 | async def test_signed_data(redis_url: str): 240 | worker = Worker( 241 | redis_url=redis_url, 242 | queue_name=uuid4().hex, 243 | signing_secret=secrets.token_urlsafe(32), 244 | ) 245 | 246 | @worker.task() 247 | async def foo() -> str: 248 | return "bar" 249 | 250 | async with create_task_group() as tg: 251 | await tg.start(worker.run_async) 252 | task = await foo.enqueue() 253 | res = await task.result(3) 254 | assert res.success and res.result == "bar" 255 | tg.cancel_scope.cancel() 256 | 257 | 258 | async def test_sign_non_binary_data(redis_url: str): 259 | worker = Worker( 260 | redis_url=redis_url, 261 | queue_name=uuid4().hex, 262 | signing_secret=secrets.token_urlsafe(32), 263 | serializer=json.dumps, 264 | ) 265 | 266 | @worker.task() 267 | async def foo() -> str: 268 | return "bar" 269 | 270 | async with worker: 271 | with pytest.raises(StreaqError): 272 | await foo.enqueue() 273 | 274 | 275 | async def test_corrupt_signed_data(redis_url: str): 276 | worker = Worker( 277 | redis_url=redis_url, 278 | queue_name=uuid4().hex, 279 | handle_signals=False, 280 | signing_secret=secrets.token_urlsafe(32), 281 | ) 282 | 283 | @worker.task() 284 | async def foo() -> str: 285 | return "bar" 286 | 287 | async with worker: 288 | task = await foo.enqueue() 289 | await worker.redis.set( 290 | task.task_key(REDIS_TASK), pickle.dumps({"f": "This is an attack!"}) 291 | ) 292 | 293 | async with create_task_group() as tg: 294 | await tg.start(worker.run_async) 295 | res = await task.result(5) 296 | assert not res.success and isinstance(res.exception, StreaqError) 297 | tg.cancel_scope.cancel() 298 | 299 | 300 | async def test_enqueue_many(worker: Worker): 301 | @worker.task() 302 | async def foobar(val: int) -> int: 303 | await sleep(1) 304 | return val 305 | 306 | async with worker: 307 | tasks = [foobar.enqueue(i) for i in range(10)] 308 | delayed = foobar.enqueue(1).start(delay=1) 309 | depends = foobar.enqueue(1).start(after=delayed.id) 310 | cron = foobar.enqueue(1).start(schedule="0 0 1 1 *") 311 | tasks.extend([delayed, depends, cron]) 312 | await worker.enqueue_many(tasks) 313 | assert await worker.queue_size() >= 10 314 | 315 | 316 | async def test_invalid_task_context(worker: Worker): 317 | with pytest.raises(StreaqError): 318 | worker.task_context() 319 | 320 | 321 | async def test_custom_worker_id(redis_url: str): 322 | worker_id = uuid4().hex 323 | worker = Worker(redis_url=redis_url, queue_name=uuid4().hex, id=worker_id) 324 | 325 | assert worker.id == worker_id 326 | 327 | 328 | async def test_include_worker(redis_url: str, worker: Worker): 329 | if worker._sentinel: 330 | worker2 = Worker( 331 | sentinel_nodes=[ 332 | ("sentinel-1", 26379), 333 | ("sentinel-2", 26379), 334 | ("sentinel-3", 26379), 335 | ], 336 | sentinel_master="mymaster", 337 | queue_name=worker.queue_name, 338 | anyio_backend=worker.anyio_backend, # type: ignore 339 | ) 340 | else: 341 | worker2 = Worker( 342 | redis_url=redis_url, 343 | queue_name=worker.queue_name, 344 | anyio_backend=worker.anyio_backend, # type: ignore 345 | ) 346 | 347 | @worker2.task() 348 | async def foobar() -> None: 349 | await sleep(0) 350 | 351 | worker.include(worker2) 352 | async with create_task_group() as tg: 353 | await tg.start(worker.run_async) 354 | task = await foobar.enqueue() 355 | res = await task.result(3) 356 | assert res.success 357 | tg.cancel_scope.cancel() 358 | 359 | 360 | async def test_bad_include(redis_url: str, worker: Worker): 361 | if worker._sentinel: 362 | worker2 = Worker( 363 | sentinel_nodes=[ 364 | ("sentinel-1", 26379), 365 | ("sentinel-2", 26379), 366 | ("sentinel-3", 26379), 367 | ], 368 | sentinel_master="mymaster", 369 | queue_name=worker.queue_name, 370 | anyio_backend=worker.anyio_backend, # type: ignore 371 | ) 372 | else: 373 | worker2 = Worker( 374 | redis_url=redis_url, 375 | queue_name=worker.queue_name, 376 | anyio_backend=worker.anyio_backend, # type: ignore 377 | ) 378 | 379 | @worker.task(name="foobar") 380 | async def foobar() -> None: 381 | await sleep(0) 382 | 383 | @worker2.task(name="foobar") 384 | async def barfoo() -> None: 385 | await sleep(0) 386 | 387 | with pytest.raises(StreaqError): 388 | worker.include(worker2) 389 | 390 | 391 | async def test_bad_include_cron(redis_url: str, worker: Worker): 392 | if worker._sentinel: 393 | worker2 = Worker( 394 | sentinel_nodes=[ 395 | ("sentinel-1", 26379), 396 | ("sentinel-2", 26379), 397 | ("sentinel-3", 26379), 398 | ], 399 | sentinel_master="mymaster", 400 | queue_name=worker.queue_name, 401 | anyio_backend=worker.anyio_backend, # type: ignore 402 | ) 403 | else: 404 | worker2 = Worker( 405 | redis_url=redis_url, 406 | queue_name=worker.queue_name, 407 | anyio_backend=worker.anyio_backend, # type: ignore 408 | ) 409 | 410 | @worker.cron("* * * * *", name="foobar") 411 | async def foobar() -> None: 412 | await sleep(0) 413 | 414 | @worker2.cron("* * * * *", name="foobar") 415 | async def barfoo() -> None: 416 | await sleep(0) 417 | 418 | with pytest.raises(StreaqError): 419 | worker.include(worker2) 420 | 421 | 422 | async def test_include_different_queues(redis_url: str, worker: Worker): 423 | if worker._sentinel: 424 | worker2 = Worker( 425 | sentinel_nodes=[ 426 | ("sentinel-1", 26379), 427 | ("sentinel-2", 26379), 428 | ("sentinel-3", 26379), 429 | ], 430 | sentinel_master="mymaster", 431 | queue_name="other", 432 | anyio_backend=worker.anyio_backend, # type: ignore 433 | ) 434 | else: 435 | worker2 = Worker( 436 | redis_url=redis_url, 437 | queue_name="other", 438 | anyio_backend=worker.anyio_backend, # type: ignore 439 | ) 440 | 441 | @worker2.task() 442 | async def foobar() -> None: 443 | await sleep(0) 444 | 445 | with pytest.raises(StreaqError): 446 | worker.include(worker2) 447 | -------------------------------------------------------------------------------- /docs/task.rst: -------------------------------------------------------------------------------- 1 | Tasks 2 | ===== 3 | 4 | Task execution 5 | -------------- 6 | 7 | streaQ preserves arq's task execution model called "pessimistic execution": tasks aren’t removed from the queue until they’ve either succeeded or failed. If the worker shuts down, the task will remain in the queue to be picked up by another worker. ``Worker.idle_timeout`` controls how often task liveness is updated (and consequently, how quickly failed tasks can be retried). 8 | 9 | All streaQ tasks should therefore be designed to cope with being called repeatedly if they’re cancelled. If necessary, use database transactions, idempotency keys or Redis to mark when non-repeatable work has completed to avoid doing it twice. Alternatively, you can opt-out of this behavior on a per-task basis by passing ``max_tries=1`` to the task constructor. 10 | 11 | .. note:: 12 | Idempotency is super easy with Redis, see `here `_! 13 | 14 | streaQ handles exceptions in the following manner: 15 | 16 | * ``StreaqRetry`` exceptions result in retrying the task, sometimes after a delay (see below). 17 | * ``asyncio.CancelledError`` or ``trio.Cancelled`` exceptions result in the task failing if the task was aborted by the user, or being retried if the worker was shut down unexpectedly. 18 | * ``TimeoutError`` exceptions result in the task failing if the task took too long to run. 19 | * Any other ``Exception`` will result in the task failing. 20 | 21 | Registering tasks 22 | ----------------- 23 | 24 | In order to run tasks, they must first be registered with the worker. Let's assume we have a worker that looks like this: 25 | 26 | .. code-block:: python 27 | 28 | from streaq import Worker 29 | worker = Worker(redis_url="redis://localhost:6379") 30 | 31 | We can now register async functions with the worker: 32 | 33 | .. code-block:: python 34 | 35 | from anyio import sleep # you can just as well use asyncio or trio 36 | 37 | @worker.task() 38 | async def sleeper(time: int) -> int: 39 | await sleep(time) 40 | return time 41 | 42 | The ``task`` decorator has several optional arguments that can be used to customize behavior: 43 | 44 | - ``expire``: time after which to dequeue the task, if ``None`` will never be dequeued 45 | - ``max_tries``: maximum number of attempts before giving up if task is retried; defaults to ``3`` 46 | - ``name``: use a custom name for the task instead of the function name 47 | - ``silent``: whether to silence task startup/shutdown logs and task success/failure tracking; defaults to False 48 | - ``timeout``: amount of time to run the task before raising ``TimeoutError``; ``None`` (the default) means never timeout 49 | - ``ttl``: amount of time to store task result in Redis; defaults to 5 minutes. ``None`` means never delete results, ``0`` means never store results 50 | - ``unique``: whether to prevent more than one instance of the task running simultaneously; defaults to ``False`` for normal tasks and ``True`` for cron jobs. (Note that more than one instance may be queued, but two running at once will cause the second to fail.) 51 | 52 | Enqueuing tasks 53 | --------------- 54 | 55 | Once registered, tasks can then be queued up for execution by worker processes (with full type safety!) using the worker's async context manager: 56 | 57 | .. code-block:: python 58 | 59 | async with worker: 60 | # these two are equivalent 61 | await sleeper.enqueue(5) 62 | await sleeper.enqueue(5).start() 63 | 64 | We can also defer task execution to a later time: 65 | 66 | .. code-block:: python 67 | 68 | from datetime import datetime 69 | 70 | async with worker: 71 | await sleeper.enqueue(3).start(delay=10) # start after 10 seconds 72 | await sleeper.enqueue(3).start(schedule=datetime(...)) # start at a specific time 73 | 74 | Tasks can depend on other tasks, meaning they won't be enqueued until their dependencies have finished successfully. If the dependency fails, the dependent task will not be enqueued. 75 | 76 | .. code-block:: python 77 | 78 | async with worker: 79 | task1 = await sleeper.enqueue(1) 80 | task2 = await sleeper.enqueue(2).start(after=task1.id) 81 | task3 = await sleeper.enqueue(3).start(after=[task1.id, task2.id]) 82 | 83 | .. note:: 84 | ``Task.enqueue()`` is actually a sync function that returns a ``Task`` object. Since ``Task`` is awaitable, it gets enqueued when awaited. Therefore, you should always use await even though ``Task.enqueue()`` is sync, unless you're enqueuing by batch (see below). 85 | 86 | Task priorities 87 | --------------- 88 | 89 | Sometimes, certain critical tasks should "skip the line" and receive priority over other tasks. streaQ supports this by allowing you to specify a priority when enqueuing tasks. If a low priority queue is backed up, you can use a high priority queue to ensure that critical tasks are executed quickly. 90 | 91 | By passing the ``priorities`` argument on worker creation, you can create an arbitrary number of queues with your priority ordering. (Please take into account that there will be a slight performance penalty per additional queue.) 92 | 93 | .. code-block:: python 94 | 95 | # this list should be ordered from lowest to highest 96 | worker = Worker(priorities=["low", "high"]) 97 | 98 | async with worker: 99 | await sleeper.enqueue(3).start(priority="low") 100 | 101 | Here's an example that demonstrates how priorities work. Note that the low priority task is enqueued first, but the high priority task is executed first. (Make sure to run this *before* starting the worker!) 102 | 103 | .. code-block:: python 104 | 105 | worker = Worker(concurrency=1) # max 1 task running at a time for demo 106 | 107 | @worker.task() 108 | async def low() -> None: 109 | print("Low priority task") 110 | 111 | @worker.task() 112 | async def high() -> None: 113 | print("High priority task") 114 | 115 | async with worker: 116 | await low.enqueue().start(priority="low") 117 | await high.enqueue().start(priority="high") 118 | 119 | Enqueuing by batch 120 | ------------------ 121 | 122 | For most cases, the above method of enqueuing tasks is sufficient. However, streaQ also provides a way to enqueue a group of tasks together in order to maximize efficiency: 123 | 124 | .. code-block:: python 125 | 126 | # importantly, we're not using `await` here 127 | tasks = [sleeper.enqueue(i) for i in range(10)] 128 | async with worker: 129 | await worker.enqueue_many(tasks) 130 | 131 | Running tasks locally 132 | --------------------- 133 | 134 | Sometimes, you may wish to run a task's underlying function directly and skip enqueuing entirely. This can be done easily: 135 | 136 | .. code-block:: python 137 | 138 | await sleeper.run(3) 139 | 140 | Note that tasks that require access to ``Worker.task_context`` or ``Worker.context`` will fail when run this way as context is initialized upon worker startup. 141 | 142 | Task status & results 143 | --------------------- 144 | 145 | Enqueued tasks return a ``Task`` object which can be used to wait for task results or view the task's status: 146 | 147 | .. code-block:: python 148 | 149 | from datetime import timedelta 150 | 151 | async with worker: 152 | task = await sleeper.enqueue(3).start(delay=timedelta(seconds=5)) 153 | print(await task.status()) 154 | print(await task.result()) 155 | print(await task.status()) 156 | 157 | .. code-block:: python 158 | 159 | TaskStatus.SCHEDULED 160 | TaskResult(fn_name='sleeper', enqueue_time=1740763800091, success=True, result=3, start_time=1740763805099, finish_time=1740763808102, tries=1, worker_id='ca5bd9eb') 161 | TaskStatus.DONE 162 | 163 | The ``TaskResult`` object contains information about the task, such as start/end time. The ``success`` flag will tell you whether the object stored in ``result`` is the result of task execution (if ``True``) or an exception raised during execution (if ``False``). 164 | 165 | Task exceptions 166 | --------------- 167 | 168 | If an exception occurs while performing the task, the result.success flag will be set to ``False``. The exception object itself will be available in the ``exception`` property of ``TaskResult``. 169 | 170 | .. code-block:: python 171 | 172 | async with worker: 173 | result = await task.result() 174 | 175 | if not result.success: 176 | print(result.exception) 177 | 178 | .. important:: 179 | 180 | If you're using the default serialization (pickle), the exception object won't contain traceback information, since pickle doesn't natively support serializing traceback objects — this information will be lost during serialization and deserialization. 181 | 182 | To keep the full traceback details for exceptions, you can use the `python-tblib `_ package. This package makes it easy to serialize traceback objects with pickle. In most cases just two lines of code are needed to add this support: 183 | 184 | .. code-block:: python 185 | 186 | from tblib import pickling_support 187 | 188 | # Declare your own custom Exceptions 189 | ... 190 | 191 | # Finally, install tblib 192 | pickling_support.install() 193 | 194 | 195 | Task context 196 | ------------ 197 | 198 | As we've already seen, tasks can access the worker context via ``Worker.context`` on a per-worker basis. In addition to this, streaQ provides a per-task context, ``Worker.task_context()``, with task-specific information such as the try count: 199 | 200 | .. code-block:: python 201 | 202 | @worker.task() 203 | async def get_id() -> str: 204 | ctx = worker.task_context() 205 | return ctx.task_id 206 | 207 | Calls to ``Worker.task_context()`` anywhere outside of a task or a middleware will result in an error. 208 | 209 | Retrying tasks 210 | -------------- 211 | 212 | streaQ provides a special exception that you can raise manually inside of your tasks to make sure that they're retried (as long as ``tries <= max_tries`` for that task): 213 | 214 | .. code-block:: python 215 | 216 | from streaq.task import StreaqRetry 217 | 218 | @worker.task() 219 | async def try_thrice() -> bool: 220 | if worker.task_context().tries < 3: 221 | raise StreaqRetry("Retrying!") 222 | return True 223 | 224 | By default, the retries will use an exponential backoff, where each retry happens after a ``try**2`` second delay. To change this behavior, you can pass the ``delay`` or ``schedule`` parameters to the ``StreaqRetry`` exception. 225 | 226 | Cancelling tasks 227 | ---------------- 228 | 229 | Tasks that are running or enqueued can be aborted manually: 230 | 231 | .. code-block:: python 232 | 233 | task = await sleeper.enqueue(3) 234 | await task.abort() 235 | 236 | Here, the result of the ``abort`` call will be a boolean representing whether the task was successfully cancelled. 237 | 238 | Cron jobs 239 | --------- 240 | 241 | streaQ also includes cron jobs, which allow you to run code at regular, scheduled intervals. You can register a cron job like this: 242 | 243 | .. code-block:: python 244 | 245 | # 9:30 on weekdays 246 | @worker.cron("30 9 * * mon-fri") 247 | async def cron() -> None: 248 | print("Itsa me, Mario!") 249 | 250 | The ``cron`` decorator has one required parameter, the crontab to use which follows the format specified `here `_, as well as the same optional parameters as the ``task`` decorator. 251 | 252 | The timezone used for the scheduler can be controlled via the worker's ``tz`` parameter. 253 | 254 | Dynamic cron jobs 255 | ----------------- 256 | 257 | Aside from defining cron jobs with the decorator, you can also schedule tasks dynamically: 258 | 259 | .. code-block:: python 260 | 261 | task = await sleeper.enqueue(1).start(schedule="*/5 * * * *") # every 5 minutes 262 | 263 | This causes the task to be ran repeatedly with the given arguments at the given schedule. To stop scheduling a repeating task, you can use: 264 | 265 | .. code-block:: python 266 | 267 | await task.unschedule() 268 | # OR 269 | await worker.unschedule_by_id(task.id) 270 | 271 | Synchronous functions 272 | --------------------- 273 | 274 | streaQ also supports synchronous functions as second-class citizens for use with mixed codebases. Sync functions will be run in a separate thread, so they won't block the event loop. 275 | 276 | Note that if the task waiting for its completion is cancelled, the thread will still run its course but its return value (or any raised exception) will be ignored. 277 | 278 | .. code-block:: python 279 | 280 | import time 281 | 282 | @worker.task() 283 | def sync_sleep(seconds: int) -> int: 284 | time.sleep(seconds) 285 | return seconds 286 | 287 | # here we use await, the wrapper does the magic for us! 288 | async with worker: 289 | task = await sync_sleep.enqueue(1) 290 | print(await task.result(3)) 291 | 292 | Task dependency graph 293 | --------------------- 294 | 295 | streaQ supports chaining tasks together in a dependency graph. This means that tasks depending on other tasks won't be enqueued until their dependencies have finished successfully. If the dependency fails, the dependent task will fail as well. 296 | 297 | Dependencies can be specified using the ``after`` parameter of the ``Task.start`` function: 298 | 299 | .. code-block:: python 300 | 301 | async with worker: 302 | task1 = await sleeper.enqueue(1) 303 | task2 = await sleeper.enqueue(2).start(after=task1.id) 304 | task3 = await sleeper.enqueue(3).start(after=[task1.id, task2.id]) 305 | 306 | And the dependency failing will cause dependent tasks to fail as well: 307 | 308 | .. code-block:: python 309 | 310 | @worker.task() 311 | async def foobar() -> None: 312 | raise Exception("Oh no!") 313 | 314 | @worker.task() 315 | async def do_nothing() -> None: 316 | pass 317 | 318 | async with worker: 319 | task = await foobar.enqueue().start() 320 | dep = await do_nothing.enqueue().start(after=task.id) 321 | print(await dep.result(3)) 322 | 323 | Task pipelining 324 | --------------- 325 | 326 | streaQ also supports task pipelining via the dependency graph, allowing you to directly feed the results of one task to another. Let's build on the ``fetch`` task defined earlier: 327 | 328 | .. code-block:: python 329 | 330 | @worker.task(timeout=5) 331 | async def fetch(url: str) -> int: 332 | res = await worker.context.http_client.get(url) 333 | return len(res.text) 334 | 335 | @worker.task() 336 | async def double(val: int) -> int: 337 | return val * 2 338 | 339 | @worker.task() 340 | async def is_even(val: int) -> bool: 341 | return val % 2 == 0 342 | 343 | async with worker: 344 | task = await fetch.enqueue("https://tastyware.dev").then(double).then(is_even) 345 | print(await task.result(3)) 346 | 347 | .. code-block:: python 348 | 349 | TaskResult(fn_name='is_even', enqueue_time=1743469913601, success=True, result=True, start_time=1743469913901, finish_time=1743469913902, tries=1, worker_id='ca5bd9eb') 350 | 351 | This is useful for ETL pipelines or similar tasks, where each task builds upon the result of the previous one. With a little work, you can build common pipelining utilities from these building blocks: 352 | 353 | .. code-block:: python 354 | 355 | from typing import Any, Sequence 356 | from streaq.utils import to_tuple 357 | 358 | @worker.task() 359 | async def map(data: Sequence[Any], to: str) -> list[Any]: 360 | task = worker.registry[to] 361 | coros = [task.enqueue(*to_tuple(d)).start() for d in data] 362 | tasks = await gather(*coros) 363 | results = await gather(*[t.result(3) for t in tasks]) 364 | return [r.result for r in results] 365 | 366 | @worker.task() 367 | async def filter(data: Sequence[Any], by: str) -> list[Any]: 368 | task = worker.registry[by] 369 | coros = [task.enqueue(*to_tuple(d)).start() for d in data] 370 | tasks = await gather(*coros) 371 | results = await gather(*[t.result(5) for t in tasks]) 372 | return [data[i] for i in range(len(data)) if results[i].result] 373 | 374 | async with worker: 375 | data = [0, 1, 2, 3] 376 | t1 = await map.enqueue(data, to=double.fn_name).then(filter, by=is_even.fn_name) 377 | print(await t1.result()) 378 | t2 = await filter.enqueue(data, by=is_even.fn_name).then(map, to=double.fn_name) 379 | print(await t2.result()) 380 | 381 | .. code-block:: python 382 | 383 | TaskResult(fn_name='filter', enqueue_time=1751712228859, success=True, result=[0, 2, 4, 6], start_time=1751712228895, finish_time=1751712228919, tries=1, worker_id='ca5bd9eb') 384 | TaskResult(fn_name='map', enqueue_time=1751712228923, success=True, result=[0, 4], start_time=1751712228951, finish_time=1751712228966, tries=1, worker_id='ca5bd9eb') 385 | 386 | .. warning:: 387 | For pipelined tasks, positional arguments must all come from the previous task (tuple outputs will be unpacked), and any additional arguments can be passed as kwargs to ``then()``. 388 | 389 | If you don't need to pass additional arguments, tasks can be pipelined using the ``|`` operator as a convenience: 390 | 391 | .. code-block:: python 392 | 393 | async with worker: 394 | await (fetch.enqueue("https://tastyware.dev") | double | is_even) 395 | -------------------------------------------------------------------------------- /tests/test_task.py: -------------------------------------------------------------------------------- 1 | import time 2 | from datetime import datetime, timedelta 3 | from typing import Any 4 | from uuid import uuid4 5 | 6 | import pytest 7 | from anyio import create_task_group, sleep 8 | 9 | from streaq import StreaqError, Worker 10 | from streaq.constants import REDIS_UNIQUE 11 | from streaq.task import StreaqRetry, TaskStatus 12 | from streaq.types import ReturnCoroutine 13 | from streaq.utils import gather 14 | 15 | pytestmark = pytest.mark.anyio 16 | 17 | 18 | async def test_result_timeout(worker: Worker): 19 | @worker.task() 20 | async def foobar() -> None: 21 | await sleep(5) 22 | 23 | async with create_task_group() as tg: 24 | await tg.start(worker.run_async) 25 | task = await foobar.enqueue() 26 | with pytest.raises(TimeoutError): 27 | await task.result(3) 28 | tg.cancel_scope.cancel() 29 | 30 | 31 | async def test_run_local(worker: Worker): 32 | @worker.task(timeout=3) 33 | async def foobar() -> bool: 34 | return True 35 | 36 | assert await foobar.run() 37 | 38 | 39 | async def test_task_timeout(worker: Worker): 40 | @worker.task(timeout=timedelta(seconds=1)) 41 | async def foobar() -> None: 42 | await sleep(5) 43 | 44 | async with create_task_group() as tg: 45 | await tg.start(worker.run_async) 46 | task = await foobar.enqueue() 47 | res = await task.result(3) 48 | assert not res.success 49 | assert isinstance(res.exception, TimeoutError) 50 | tg.cancel_scope.cancel() 51 | 52 | 53 | async def test_task_status(worker: Worker): 54 | @worker.task() 55 | async def foobar() -> None: 56 | await sleep(2) 57 | 58 | async with worker: 59 | task = foobar.enqueue() 60 | assert await task.status() == TaskStatus.NOT_FOUND 61 | await task.start() 62 | assert await task.status() == TaskStatus.QUEUED 63 | task2 = await foobar.enqueue().start(delay=5) 64 | 65 | async with create_task_group() as tg: 66 | await tg.start(worker.run_async) 67 | await sleep(1) 68 | assert await task.status() == TaskStatus.RUNNING 69 | await task.result(3) 70 | assert await task.status() == TaskStatus.DONE 71 | assert await task2.status() == TaskStatus.SCHEDULED 72 | tg.cancel_scope.cancel() 73 | 74 | 75 | async def test_task_cron(worker: Worker): 76 | @worker.cron("30 9 1 1 *") 77 | async def cron1() -> bool: 78 | return True 79 | 80 | @worker.cron("* * * * * * *") # once/second 81 | async def cron2() -> None: 82 | await sleep(5) 83 | 84 | schedule = worker._next_datetime(cron1.crontab) # type: ignore 85 | assert schedule.day == 1 and schedule.month == 1 86 | assert await cron1.run() 87 | async with create_task_group() as tg: 88 | await tg.start(worker.run_async) 89 | await sleep(2) 90 | # this will be set if task is running 91 | assert await worker.redis.get(worker.prefix + REDIS_UNIQUE + cron2.fn_name) 92 | tg.cancel_scope.cancel() 93 | 94 | with pytest.raises(StreaqError): 95 | 96 | @worker.cron("* * * * *", timeout=None) 97 | async def cron3() -> None: 98 | await sleep(0) 99 | 100 | 101 | async def test_task_info(worker: Worker): 102 | @worker.task() 103 | async def foobar() -> None: 104 | pass 105 | 106 | async with worker: 107 | task = await foobar.enqueue().start(delay=5) 108 | task2 = await foobar.enqueue() 109 | info = await task.info() 110 | info2 = await task2.info() 111 | assert info and info.scheduled is not None 112 | assert info2 and info2.scheduled is None 113 | task.id = "fake" 114 | info3 = await task.info() 115 | assert info3 is None 116 | 117 | 118 | async def test_task_retry(worker: Worker): 119 | @worker.task() 120 | async def foobar() -> int: 121 | ctx = worker.task_context() 122 | if ctx.tries < 3: 123 | raise StreaqRetry("Retrying!") 124 | return ctx.tries 125 | 126 | async with create_task_group() as tg: 127 | await tg.start(worker.run_async) 128 | task = await foobar.enqueue() 129 | res = await task.result(10) 130 | assert res.success 131 | assert res.result == 3 132 | tg.cancel_scope.cancel() 133 | 134 | 135 | async def test_task_retry_with_delay(worker: Worker): 136 | @worker.task() 137 | async def foobar() -> int: 138 | ctx = worker.task_context() 139 | if ctx.tries == 1: 140 | raise StreaqRetry("Retrying!", delay=timedelta(seconds=3)) 141 | return ctx.tries 142 | 143 | async with create_task_group() as tg: 144 | await tg.start(worker.run_async) 145 | task = await foobar.enqueue() 146 | res = await task.result(5) 147 | assert res is not None 148 | assert res.success 149 | assert res.result == 2 150 | tg.cancel_scope.cancel() 151 | 152 | 153 | async def test_task_retry_with_schedule(worker: Worker): 154 | @worker.task() 155 | async def foobar() -> int: 156 | ctx = worker.task_context() 157 | if ctx.tries == 1: 158 | raise StreaqRetry( 159 | "Retrying!", schedule=datetime.now() + timedelta(seconds=2) 160 | ) 161 | return ctx.tries 162 | 163 | async with create_task_group() as tg: 164 | await tg.start(worker.run_async) 165 | task = await foobar.enqueue() 166 | res = await task.result(6) 167 | assert res is not None 168 | assert res.success 169 | assert res.result == 2 170 | tg.cancel_scope.cancel() 171 | 172 | 173 | async def test_task_failure(worker: Worker): 174 | @worker.task() 175 | async def foobar() -> None: 176 | raise Exception("That wasn't supposed to happen!") 177 | 178 | async with create_task_group() as tg: 179 | await tg.start(worker.run_async) 180 | task = await foobar.enqueue() 181 | res = await task.result(3) 182 | assert not res.success 183 | assert isinstance(res.exception, Exception) 184 | with pytest.raises(StreaqError): 185 | _ = res.result 186 | tg.cancel_scope.cancel() 187 | 188 | 189 | async def test_task_retry_no_delay(worker: Worker): 190 | @worker.task() 191 | async def foobar() -> bool: 192 | if worker.task_context().tries == 1: 193 | raise StreaqRetry("Retrying!", delay=0) 194 | return True 195 | 196 | async with create_task_group() as tg: 197 | await tg.start(worker.run_async) 198 | task = await foobar.enqueue() 199 | res = await task.result(3) 200 | assert res is not None 201 | assert res.success 202 | assert res.result 203 | tg.cancel_scope.cancel() 204 | 205 | 206 | async def test_task_max_retries(worker: Worker): 207 | @worker.task() 208 | async def foobar() -> None: 209 | raise StreaqRetry("Retrying!", delay=0) 210 | 211 | async with create_task_group() as tg: 212 | await tg.start(worker.run_async) 213 | task = await foobar.enqueue() 214 | res = await task.result(3) 215 | assert res is not None 216 | assert not res.success 217 | assert isinstance(res.exception, StreaqError) 218 | tg.cancel_scope.cancel() 219 | 220 | 221 | async def test_task_failed_abort(worker: Worker): 222 | @worker.task() 223 | async def foobar() -> bool: 224 | return True 225 | 226 | worker.burst = True 227 | async with create_task_group() as tg: 228 | await tg.start(worker.run_async) 229 | task = await foobar.enqueue() 230 | result = await task.result(3) 231 | assert result.success 232 | assert result.result 233 | assert not await task.abort() 234 | tg.cancel_scope.cancel() 235 | 236 | 237 | async def test_task_nonexistent_or_finished_dependency(worker: Worker): 238 | @worker.task() 239 | async def foobar() -> None: 240 | pass 241 | 242 | async with create_task_group() as tg: 243 | await tg.start(worker.run_async) 244 | task = await foobar.enqueue().start(after="nonexistent") 245 | with pytest.raises(TimeoutError): 246 | await task.result(3) 247 | tg.cancel_scope.cancel() 248 | 249 | 250 | async def test_task_dependency(worker: Worker): 251 | @worker.task() 252 | async def foobar() -> None: 253 | await sleep(1) 254 | 255 | async with create_task_group() as tg: 256 | await tg.start(worker.run_async) 257 | task = await foobar.enqueue().start(delay=1) 258 | task2 = await foobar.enqueue().start(after=task.id) 259 | assert await task2.status() == TaskStatus.SCHEDULED 260 | await task.result(3) 261 | result = await task2.result(3) 262 | assert result.success 263 | with pytest.raises(StreaqError): 264 | _ = result.exception 265 | tg.cancel_scope.cancel() 266 | 267 | 268 | async def test_task_dependency_multiple(worker: Worker): 269 | @worker.task() 270 | async def foobar() -> None: 271 | await sleep(1) 272 | 273 | async with create_task_group() as tg: 274 | await tg.start(worker.run_async) 275 | task = await foobar.enqueue().start() 276 | task2 = await foobar.enqueue().start(after=task.id) 277 | task3 = await foobar.enqueue().start(after=[task.id, task2.id]) 278 | assert await task2.status() == TaskStatus.SCHEDULED 279 | assert await task3.status() == TaskStatus.SCHEDULED 280 | res1 = await task.result(3) 281 | assert res1.success 282 | assert await task3.status() == TaskStatus.SCHEDULED 283 | res2 = await task2.result(3) 284 | assert res2.success 285 | res3 = await task3.result(3) 286 | assert res3.success 287 | tg.cancel_scope.cancel() 288 | 289 | 290 | async def test_task_dependency_failed(worker: Worker): 291 | @worker.task() 292 | async def foobar() -> None: 293 | raise Exception("Oh no!") 294 | 295 | @worker.task() 296 | async def do_nothing() -> None: 297 | pass 298 | 299 | async with create_task_group() as tg: 300 | await tg.start(worker.run_async) 301 | task = await foobar.enqueue().start() 302 | dep = await do_nothing.enqueue().start(after=task.id) 303 | res = await dep.result(3) 304 | assert not res.success 305 | assert isinstance(res.exception, StreaqError) 306 | tg.cancel_scope.cancel() 307 | 308 | 309 | async def test_task_dependency_aborted(worker: Worker): 310 | @worker.task() 311 | async def foobar() -> None: 312 | pass 313 | 314 | async with create_task_group() as tg: 315 | async with worker: 316 | dep = await foobar.enqueue() 317 | task = await foobar.enqueue().start(after=dep.id) 318 | await dep.abort(timeout=0) 319 | await tg.start(worker.run_async) 320 | res = await task.result(3) 321 | assert not res.success 322 | assert isinstance(res.exception, StreaqError) 323 | tg.cancel_scope.cancel() 324 | 325 | 326 | async def test_sync_task(worker: Worker): 327 | @worker.task() 328 | def foobar() -> None: 329 | time.sleep(2) 330 | 331 | async with create_task_group() as tg: 332 | await tg.start(worker.run_async) 333 | task = await foobar.enqueue() 334 | task2 = await foobar.enqueue() 335 | # this would time out if these were running sequentially 336 | results = await gather(task.result(3), task2.result(3)) 337 | assert all(res.success for res in results) 338 | tg.cancel_scope.cancel() 339 | 340 | 341 | async def test_unsafe_enqueue(worker: Worker): 342 | @worker.task() 343 | async def foobar(ret: int) -> int: 344 | return ret 345 | 346 | async with create_task_group() as tg: 347 | await tg.start(worker.run_async) 348 | task = await worker.enqueue_unsafe(foobar.fn_name, 42) 349 | res = await task.result(3) 350 | assert res.success 351 | assert res.result == 42 352 | tg.cancel_scope.cancel() 353 | 354 | 355 | async def test_chained_failed_dependencies(worker: Worker): 356 | @worker.task() 357 | async def foobar() -> None: 358 | raise Exception("Oh no!") 359 | 360 | @worker.task() 361 | async def child() -> None: 362 | pass 363 | 364 | async with create_task_group() as tg: 365 | await tg.start(worker.run_async) 366 | task = await foobar.enqueue().start(delay=timedelta(seconds=3)) 367 | dep1 = await child.enqueue().start(after=task.id) 368 | dep2 = await child.enqueue().start(after=[task.id, dep1.id]) 369 | await sleep(1) 370 | assert await task.abort(3) 371 | res1 = await dep1.result(3) 372 | res2 = await dep2.result(3) 373 | assert not res1.success and isinstance(res1.exception, StreaqError) 374 | assert not res2.success and isinstance(res2.exception, StreaqError) 375 | tg.cancel_scope.cancel() 376 | 377 | 378 | async def test_task_priorities(redis_url: str): 379 | worker = Worker( 380 | redis_url=redis_url, 381 | queue_name=uuid4().hex, 382 | concurrency=4, 383 | priorities=["low", "high"], 384 | ) 385 | 386 | @worker.task() 387 | async def foobar() -> None: 388 | await sleep(1) 389 | 390 | async with worker: 391 | low = [foobar.enqueue().start(priority="low") for _ in range(4)] 392 | high = [foobar.enqueue().start(priority="high") for _ in range(4)] 393 | await worker.enqueue_many(low + high) 394 | 395 | async with create_task_group() as tg: 396 | await tg.start(worker.run_async) 397 | results = await gather(*[t.result(3) for t in high]) 398 | statuses = await gather(*[t.status() for t in low]) 399 | assert all(res.success for res in results) 400 | assert all(status != TaskStatus.DONE for status in statuses) 401 | tg.cancel_scope.cancel() 402 | 403 | 404 | async def test_scheduled_task(worker: Worker): 405 | @worker.task() 406 | async def foobar() -> None: 407 | pass 408 | 409 | dt = datetime.now() + timedelta(seconds=1) 410 | async with create_task_group() as tg: 411 | await tg.start(worker.run_async) 412 | task = await foobar.enqueue().start(schedule=dt) 413 | assert await task.status() == TaskStatus.SCHEDULED 414 | res = await task.result(3) 415 | assert res.success 416 | tg.cancel_scope.cancel() 417 | 418 | 419 | async def test_bad_start_params(worker: Worker): 420 | @worker.task() 421 | async def foobar() -> None: 422 | pass 423 | 424 | async with worker: 425 | with pytest.raises(StreaqError): 426 | await foobar.enqueue().start(delay=1, schedule=datetime.now()) 427 | with pytest.raises(StreaqError): 428 | await foobar.enqueue().start(delay=1, after="foobar") 429 | with pytest.raises(StreaqError): 430 | await foobar.enqueue().start(schedule=datetime.now(), after="foobar") 431 | 432 | 433 | async def test_enqueue_unique_task(worker: Worker): 434 | @worker.task(unique=True, timeout=3) 435 | async def foobar() -> None: 436 | await sleep(1) 437 | 438 | async with create_task_group() as tg: 439 | await tg.start(worker.run_async) 440 | task = await foobar.enqueue() 441 | task2 = await foobar.enqueue() 442 | results = await gather(task.result(), task2.result()) 443 | assert any( 444 | not r.success and isinstance(r.exception, StreaqError) for r in results 445 | ) 446 | assert any(r.success and r.result is None for r in results) 447 | tg.cancel_scope.cancel() 448 | 449 | with pytest.raises(StreaqError): 450 | 451 | @worker.task(unique=True) 452 | async def barfoo() -> None: 453 | await sleep(0) 454 | 455 | 456 | @pytest.mark.parametrize("wait", [1, 0]) 457 | async def test_failed_abort(worker: Worker, wait: int): 458 | @worker.task(ttl=0) 459 | async def foobar() -> None: 460 | pass 461 | 462 | async with create_task_group() as tg: 463 | await tg.start(worker.run_async) 464 | task = await foobar.enqueue().start() 465 | await sleep(1) 466 | assert not await task.abort(wait) 467 | tg.cancel_scope.cancel() 468 | 469 | 470 | async def test_cron_run(worker: Worker): 471 | @worker.cron("* * * * * * *") 472 | async def cron1() -> bool: 473 | return True 474 | 475 | @worker.cron("* * * * * * *", timeout=1) 476 | async def cron2() -> None: 477 | await sleep(3) 478 | 479 | assert await cron1.run() 480 | with pytest.raises(TimeoutError): 481 | await cron2.run() 482 | 483 | 484 | async def test_sync_cron(worker: Worker): 485 | @worker.cron("* * * * * * *") 486 | def cronjob() -> None: 487 | time.sleep(3) 488 | 489 | async with create_task_group() as tg: 490 | await tg.start(worker.run_async) 491 | await sleep(2) 492 | assert await worker.redis.get(worker.prefix + REDIS_UNIQUE + cronjob.fn_name) 493 | tg.cancel_scope.cancel() 494 | 495 | 496 | async def test_cron_multiple_runs(worker: Worker): 497 | val = 0 498 | 499 | @worker.cron("* * * * * * *") 500 | async def cronjob() -> None: 501 | nonlocal val 502 | val += 1 503 | 504 | async with create_task_group() as tg: 505 | await tg.start(worker.run_async) 506 | await sleep(5) 507 | assert val > 1 508 | tg.cancel_scope.cancel() 509 | 510 | 511 | async def test_middleware(worker: Worker): 512 | @worker.task() 513 | async def foobar() -> int: 514 | return 2 515 | 516 | @worker.middleware 517 | def double(task: ReturnCoroutine) -> ReturnCoroutine: 518 | async def wrapper(*args, **kwargs) -> Any: 519 | result = await task(*args, **kwargs) 520 | return result * 2 521 | 522 | return wrapper 523 | 524 | async with create_task_group() as tg: 525 | await tg.start(worker.run_async) 526 | task = await foobar.enqueue() 527 | res = await task.result(3) 528 | assert res.success 529 | assert res.result == 4 530 | tg.cancel_scope.cancel() 531 | 532 | 533 | async def test_task_pipeline(worker: Worker): 534 | @worker.task() 535 | async def double(val: int) -> int: 536 | return val * 2 537 | 538 | @worker.task() 539 | async def is_even(val: int) -> bool: 540 | return val % 2 == 0 541 | 542 | async with create_task_group() as tg: 543 | await tg.start(worker.run_async) 544 | task = await double.enqueue(1).then(double).then(is_even) 545 | res = await task.result(3) 546 | assert res.result and res.success 547 | tg.cancel_scope.cancel() 548 | 549 | 550 | async def test_task_pipeline_shorthand(worker: Worker): 551 | @worker.task() 552 | async def double(val: int) -> int: 553 | return val * 2 554 | 555 | async with create_task_group() as tg: 556 | await tg.start(worker.run_async) 557 | task = await (double.enqueue(1) | double | double) 558 | res = await task.result(3) 559 | assert res.success and res.result == 8 560 | tg.cancel_scope.cancel() 561 | 562 | 563 | async def test_task_pipeline_multiple(worker: Worker): 564 | @worker.task() 565 | async def double(val: int) -> int: 566 | return val * 2 567 | 568 | @worker.task() 569 | async def is_even(val: int) -> bool: 570 | return val % 2 == 0 571 | 572 | async with worker: 573 | task1 = double.enqueue(1).then(double).then(is_even) 574 | task2 = double.enqueue(1) | double | double 575 | with pytest.raises(StreaqError): 576 | await worker.enqueue_many([task1, task2]) 577 | 578 | 579 | async def test_task_with_custom_name(worker: Worker): 580 | @worker.task(name="bar") 581 | async def foo() -> int: 582 | return 42 583 | 584 | async def foobar(): 585 | return 586 | 587 | assert foo.fn_name == "bar" 588 | with pytest.raises(StreaqError): 589 | worker.task(name="bar")(foobar) 590 | 591 | @worker.task() 592 | async def bar(): 593 | return 10 594 | 595 | async with create_task_group() as tg: 596 | await tg.start(worker.run_async) 597 | task1 = await worker.enqueue_unsafe("bar") 598 | task2 = await worker.enqueue_unsafe(bar.fn_name) 599 | task3 = await worker.enqueue_unsafe(foo.fn.__qualname__) 600 | res = await task1.result(3) 601 | assert res.result == 42 602 | res = await task2.result(3) 603 | assert res.result == 10 604 | res = await task3.result(3) 605 | assert not res.success 606 | tg.cancel_scope.cancel() 607 | 608 | 609 | async def test_cron_with_custom_name(worker: Worker): 610 | @worker.cron("* * * * * * *", name="foo") 611 | async def cronjob() -> None: 612 | await sleep(3) 613 | 614 | async def cronjob1() -> None: 615 | pass 616 | 617 | assert cronjob.fn_name == "foo" 618 | with pytest.raises(StreaqError): 619 | worker.cron("* * * * * * *", name="foo")(cronjob1) 620 | 621 | async with create_task_group() as tg: 622 | await tg.start(worker.run_async) 623 | await sleep(2) 624 | assert await worker.redis.get(worker.prefix + REDIS_UNIQUE + cronjob.fn_name) 625 | tg.cancel_scope.cancel() 626 | 627 | 628 | @pytest.mark.parametrize("ttl", [60, 0]) 629 | @pytest.mark.parametrize("wait", [1, 0]) 630 | async def test_abort(worker: Worker, ttl: int, wait: int): 631 | @worker.task(ttl=ttl) 632 | async def foobar() -> None: 633 | await sleep(5) 634 | 635 | async with create_task_group() as tg: 636 | await tg.start(worker.run_async) 637 | task = await foobar.enqueue() 638 | await sleep(wait) 639 | assert await task.abort(3) 640 | tg.cancel_scope.cancel() 641 | 642 | 643 | async def test_abort_delayed(worker: Worker): 644 | @worker.task() 645 | async def foobar() -> None: 646 | pass 647 | 648 | async with create_task_group() as tg: 649 | await tg.start(worker.run_async) 650 | task = await foobar.enqueue().start(delay=10) 651 | assert await task.abort(3) 652 | tg.cancel_scope.cancel() 653 | 654 | 655 | async def test_task_expired(worker: Worker): 656 | @worker.task(expire=1) 657 | async def foobar() -> None: 658 | pass 659 | 660 | async with worker: 661 | task = await foobar.enqueue() 662 | await sleep(1) 663 | 664 | async with create_task_group() as tg: 665 | await tg.start(worker.run_async) 666 | res = await task.result(3) 667 | assert not res.success and isinstance(res.exception, StreaqError) 668 | tg.cancel_scope.cancel() 669 | 670 | 671 | @pytest.mark.parametrize("anyio_backend", ["asyncio"]) 672 | async def test_asyncio_enqueue(anyio_backend: str, worker: Worker): 673 | @worker.task() 674 | async def foobar(val: int) -> int: ... 675 | 676 | import asyncio 677 | 678 | async with worker: 679 | await asyncio.gather(*[foobar.enqueue(i) for i in range(5)]) 680 | 681 | 682 | async def test_dynamic_cron(worker: Worker): 683 | vals: list[int] = [] 684 | 685 | @worker.task() 686 | async def foobar(val: int) -> None: 687 | vals.append(val) 688 | 689 | async with create_task_group() as tg: 690 | await tg.start(worker.run_async) 691 | task = await foobar.enqueue(1).start(schedule="* * * * * * *") 692 | await sleep(2) 693 | assert vals 694 | await task.unschedule() 695 | await sleep(2) 696 | last_len = len(vals) 697 | await sleep(2) 698 | assert last_len == len(vals) 699 | tg.cancel_scope.cancel() 700 | --------------------------------------------------------------------------------