├── doc
├── release.rst
├── requirements.txt
├── locale
│ └── README.zh_TW.md
├── installation.rst
├── conf.py
├── api-reference.rst
├── index.rst
├── development.rst
└── getting-started.rst
├── pgmq_postgres.template.env
├── pgmq_sqlalchemy
├── __init__.py
├── _types.py
├── _utils.py
├── schema.py
└── queue.py
├── pgmq_tests.template.env
├── .pre-commit-config.yaml
├── tests
├── constant.py
├── _utils.py
├── test_construct_pgmq.py
├── fixture_deps.py
├── conftest.py
└── test_queue.py
├── .github
├── pull_request_template.md
├── workflows
│ ├── publish.yml
│ └── codecov.yml
└── CONTRIBUTING.md
├── docker-compose.yml
├── LICENSE
├── .readthedocs.yml
├── .dockerignore
├── Makefile
├── Dockerfile
├── pyproject.toml
├── .gitignore
└── README.md
/doc/release.rst:
--------------------------------------------------------------------------------
1 | .. _release:
2 |
3 | Release Notes
4 | =============
5 |
6 |
--------------------------------------------------------------------------------
/doc/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx
2 | sphinx-rtd-theme
3 | sphinx-copybutton
4 | SQLAlchemy
--------------------------------------------------------------------------------
/pgmq_postgres.template.env:
--------------------------------------------------------------------------------
1 | POSTGRES_USER=postgres
2 | POSTGRES_PASSWORD=postgres
3 | POSTGRES_DB=postgres
--------------------------------------------------------------------------------
/pgmq_sqlalchemy/__init__.py:
--------------------------------------------------------------------------------
1 | from .queue import PGMQueue
2 | from . import schema
3 |
4 | __all__ = [
5 | PGMQueue,
6 | schema,
7 | ]
8 |
--------------------------------------------------------------------------------
/pgmq_tests.template.env:
--------------------------------------------------------------------------------
1 | PG_DSN="postgresql://postgres:postgres@pgmq_sqlalchemy:5432/postgres"
2 | PG_ASYNC_DSN="postgresql://postgres:postgres@pgmq_sqlalchemy:5432/postgres"
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/astral-sh/ruff-pre-commit
3 | # Ruff version.
4 | rev: v0.1.14
5 | hooks:
6 | # Run the linter.
7 | - id: ruff
8 | args: [ --fix ]
9 | # Run the formatter.
10 | - id: ruff-format
--------------------------------------------------------------------------------
/pgmq_sqlalchemy/_types.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | from sqlalchemy.orm import Session
4 | from sqlalchemy.engine import Engine
5 | from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession
6 |
7 | ENGINE_TYPE = Union[Engine, AsyncEngine]
8 | SESSION_TYPE = Union[Session, AsyncSession]
9 |
--------------------------------------------------------------------------------
/tests/constant.py:
--------------------------------------------------------------------------------
1 | SYNC_DRIVERS = [
2 | "pg8000",
3 | "psycopg2",
4 | "psycopg",
5 | "psycopg2cffi",
6 | ]
7 |
8 | ASYNC_DRIVERS = [
9 | "asyncpg",
10 | ]
11 |
12 | DRIVERS = [
13 | "pg8000",
14 | "psycopg2",
15 | "psycopg",
16 | "asyncpg",
17 | "psycopg2cffi",
18 | ]
19 |
20 | MSG = {
21 | "foo": "bar",
22 | "hello": "world",
23 | }
24 |
25 | LOCK_FILE_NAME = "pgmq.meta.lock.txt"
26 |
--------------------------------------------------------------------------------
/tests/_utils.py:
--------------------------------------------------------------------------------
1 | from sqlalchemy import text
2 | from sqlalchemy.orm import Session
3 |
4 |
5 | def check_queue_exists(db_session: Session, queue_name: str) -> bool:
6 | row = db_session.execute(
7 | text(
8 | "SELECT queue_name FROM pgmq.list_queues() WHERE queue_name = :queue_name ;"
9 | ),
10 | {"queue_name": queue_name},
11 | ).first()
12 | return row is not None and row[0] == queue_name
13 |
--------------------------------------------------------------------------------
/tests/test_construct_pgmq.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pgmq_sqlalchemy import PGMQueue
3 |
4 | from tests.fixture_deps import pgmq_deps
5 |
6 |
7 | @pgmq_deps
8 | def test_construct_pgmq(pgmq_fixture):
9 | pgmq: PGMQueue = pgmq_fixture
10 | assert pgmq is not None
11 |
12 |
13 | def test_construct_invalid_pgmq():
14 | with pytest.raises(ValueError) as e:
15 | _ = PGMQueue()
16 | error_msg: str = str(e.value)
17 | assert "Must provide either dsn, engine, or session_maker" in error_msg
18 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | # Title (replace this with the title of your pull request)
2 |
3 | ## Description
4 |
5 | Describe the changes you made in this pull request.
6 | ( replace this with the description of your pull request )
7 |
8 | ## Status
9 |
10 | - [ ] In progress
11 | - [ ] Ready for review
12 | - [ ] Done
13 |
14 | ## Checklist
15 |
16 | - [ ] Read the [Contributing Guide](CONTRIBUTING.md)
17 | - [ ] Passes tests
18 | - [ ] Linted ( we use `pre-commit` with `ruff` )
19 | - [ ] Updated documentation
--------------------------------------------------------------------------------
/doc/locale/README.zh_TW.md:
--------------------------------------------------------------------------------
1 | # pgmq-sqlalchemy
2 |
3 | Python client using **sqlalchemy ORM** for the PGMQ Postgres extension.
4 |
5 | 支援 **SQLAlchemy ORM** 的 Python 客戶端
6 | 用於 [PGMQ Postgres 插件](https://github.com/tembo-io/pgmq) 。
7 |
8 | ## Features
9 |
10 | - 支援 **async** 和 **sync** `engines`、`sessionmakers`,或由 `dsn` 構建。
11 | - 支援所有 sqlalchemy 支持的 postgres DBAPIs。
12 | > 例如:`psycopg`, `psycopg2`, `asyncpg`
13 | > 可見 [SQLAlchemy Postgresql Dialects](https://docs.sqlalhttps://docs.sqlalchemy.org/en/20/dialects/postgresql.html)
--------------------------------------------------------------------------------
/doc/installation.rst:
--------------------------------------------------------------------------------
1 |
2 |
3 | Installation
4 | ============
5 |
6 |
7 | Install the package using pip:
8 |
9 | .. code-block:: bash
10 |
11 | pip install pgmq-sqlalchemy
12 |
13 | Install with additional DBAPIs packages
14 |
15 |
16 | .. code-block:: bash
17 |
18 | pip install "pgmq-sqlalchemy[asyncpg]"
19 | pip install "pgmq-sqlalchemy[psycopg2-binary]"
20 | # pip install "pgmq-sqlalchemy[postgres-python-driver]"
21 |
22 | .. Note:: See `SQLAlchemy Postgresql Dialects `_ for all available DBAPIs packages.
23 |
24 |
--------------------------------------------------------------------------------
/doc/conf.py:
--------------------------------------------------------------------------------
1 | """
2 | config file for documentation(sphinx)
3 | """
4 |
5 | import time
6 | import os
7 | import sys
8 |
9 | # path setup
10 | sys.path.insert(0, os.path.abspath(".."))
11 | sys.path.insert(0, os.path.abspath("../pgmq_sqlalchemy"))
12 |
13 | extensions = [
14 | "sphinx_copybutton",
15 | "sphinx.ext.autodoc",
16 | "sphinx.ext.doctest",
17 | "sphinx.ext.intersphinx",
18 | "sphinx.ext.napoleon",
19 | ]
20 |
21 | html_theme = "sphinx_rtd_theme"
22 | project = "pgmq-sqlalchemy"
23 | copyright = f'2024-{time.strftime("%Y")}, the pgmq-sqlalchemy developers'
24 |
25 | source_suffix = {
26 | ".rst": "restructuredtext",
27 | }
28 |
29 | master_doc = "index"
30 |
31 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "conf.py"]
32 |
--------------------------------------------------------------------------------
/doc/api-reference.rst:
--------------------------------------------------------------------------------
1 | .. _api-reference:
2 |
3 | API Reference
4 | =============
5 |
6 | .. tip::
7 | | For a more detailed explanation or implementation of each `PGMQ function`,
8 | | see `PGMQ: SQL functions.md `_.
9 |
10 |
11 | .. autoclass:: pgmq_sqlalchemy.PGMQueue
12 | :members:
13 | :inherited-members:
14 | :member-order: bysource
15 | :special-members: __init__
16 |
17 |
18 | .. autoclass:: pgmq_sqlalchemy.schema.Message
19 | :members:
20 | :undoc-members:
21 | :inherited-members:
22 | :exclude-members: __init__
23 |
24 |
25 |
26 | .. autoclass:: pgmq_sqlalchemy.schema.QueueMetrics
27 | :members:
28 | :undoc-members:
29 | :inherited-members:
30 | :exclude-members: __init__
31 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 | services:
3 | pgmq_postgres:
4 | container_name: pgmq_postgres
5 | image: quay.io/tembo/pg16-pgmq:v4c7423b
6 | env_file:
7 | - pgmq_postgres.env
8 | ports:
9 | - "5432:5432"
10 | command: ["-c", "max_connections=20000"]
11 | volumes:
12 | - ./stateful_volumes/pgmq_postgres:/var/lib/postgresql
13 |
14 | pgmq_tests:
15 | container_name: pgmq_tests
16 | build:
17 | context: .
18 | dockerfile: dockerfile
19 | env_file:
20 | - pgmq_tests.env
21 | depends_on:
22 | - pgmq_postgres
23 | volumes:
24 | - ./stateful_volumes/htmlcov:/pgmq_sqlalchemy_test/htmlcov
25 | command: ["python","-m","pytest", "-v", "--cov=pgmq_sqlalchemy.queue", "--cov-report=html", "-n", "auto", "tests"]
26 |
27 | networks:
28 | default:
29 | name: pgmq_sqlalchemy
--------------------------------------------------------------------------------
/pgmq_sqlalchemy/_utils.py:
--------------------------------------------------------------------------------
1 | import json
2 | from typing import List
3 |
4 | from sqlalchemy.orm import sessionmaker, Session
5 | from sqlalchemy.ext.asyncio import AsyncSession
6 |
7 | from ._types import ENGINE_TYPE, SESSION_TYPE
8 |
9 |
10 | def get_session_type(engine: ENGINE_TYPE) -> SESSION_TYPE:
11 | if engine.dialect.is_async:
12 | return AsyncSession
13 | return Session
14 |
15 |
16 | def is_async_session_maker(session_maker: sessionmaker) -> bool:
17 | return AsyncSession in session_maker.class_.__mro__
18 |
19 |
20 | def is_async_dsn(dsn: str) -> bool:
21 | return dsn.startswith("postgresql+asyncpg")
22 |
23 |
24 | def encode_dict_to_psql(msg: dict) -> str:
25 | return f"'{json.dumps(msg)}'::jsonb"
26 |
27 |
28 | def encode_list_to_psql(messages: List[dict]) -> str:
29 | return f"ARRAY[{','.join([encode_dict_to_psql(msg) for msg in messages])}]"
30 |
--------------------------------------------------------------------------------
/pgmq_sqlalchemy/schema.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from datetime import datetime
3 | from typing import Optional
4 |
5 |
6 | @dataclass
7 | class Message:
8 | """
9 | .. _schema_message_class: #pgmq_sqlalchemy.schema.Message
10 | .. |schema_message_class| replace:: :py:class:`.~pgmq_sqlalchemy.schema.Message`
11 | """
12 |
13 | msg_id: int
14 | read_ct: int
15 | enqueued_at: datetime
16 | vt: datetime
17 | message: dict
18 |
19 |
20 | @dataclass
21 | class QueueMetrics:
22 | """
23 | .. _schema_queue_metrics_class: #pgmq_sqlalchemy.schema.QueueMetrics
24 | .. |schema_queue_metrics_class| replace:: :py:class:`.~pgmq_sqlalchemy.schema.QueueMetrics`
25 | """
26 |
27 | queue_name: str
28 | queue_length: int
29 | newest_msg_age_sec: Optional[int]
30 | oldest_msg_age_sec: Optional[int]
31 | total_messages: int
32 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 LIU ZHE YOU
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # Read the Docs configuration file for Sphinx projects
2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3 |
4 | # Required
5 | version: 2
6 |
7 | # Set the OS, Python version and other tools you might need
8 | build:
9 | os: ubuntu-22.04
10 | tools:
11 | python: "3.12"
12 | # You can also specify other tool versions:
13 | # nodejs: "20"
14 | # rust: "1.70"
15 | # golang: "1.20"
16 |
17 | # Build documentation in the "docs/" directory with Sphinx
18 | sphinx:
19 | configuration: doc/conf.py
20 | # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs
21 | # builder: "dirhtml"
22 | # Fail on all warnings to avoid broken references
23 | fail_on_warning: true
24 |
25 | # Optionally build your docs in additional formats such as PDF and ePub
26 | # formats:
27 | # - pdf
28 | # - epub
29 |
30 | # Optional but recommended, declare the Python requirements required
31 | # to build your documentation
32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
33 | python:
34 | install:
35 | - requirements: doc/requirements.txt
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | on:
4 | push:
5 | tags:
6 | - '*.*.*'
7 |
8 | permissions:
9 | contents: read
10 |
11 | jobs:
12 | pypi-publish:
13 | name: Upload release to PyPI
14 | runs-on: ubuntu-latest
15 | environment:
16 | name: pypi
17 | url: https://pypi.org/project/pgmq-sqlalchemy/
18 | permissions:
19 | id-token: write
20 | steps:
21 | - name: Checkout code
22 | uses: actions/checkout@v4
23 |
24 | - name: Set up Python 3.10
25 | uses: actions/setup-python@v5
26 | with:
27 | python-version: "3.10"
28 |
29 | - name: Install Poetry
30 | run: |
31 | curl -sSL https://install.python-poetry.org | python - -y
32 |
33 | - name: Update PATH
34 | run: echo "$HOME/.local/bin" >> $GITHUB_PATH
35 |
36 | - name: Update Poetry configuration
37 | run: poetry config virtualenvs.create false
38 |
39 | - name: Install dependencies
40 | run: poetry install --sync --no-interaction --without=dev
41 |
42 | - name: Package project
43 | run: poetry build
44 |
45 | - name: Publish package distributions to PyPI
46 | uses: pypa/gh-action-pypi-publish@release/v1
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | # reference: https://github.com/themattrix/python-pypi-template/blob/master/.dockerignore
2 |
3 | # Git
4 | .git
5 | .gitignore
6 | .gitattributes
7 |
8 |
9 | # CI
10 | .codeclimate.yml
11 | .travis.yml
12 | .taskcluster.yml
13 |
14 | # Docker
15 | docker-compose.yml
16 | Dockerfile
17 | .docker
18 | .dockerignore
19 |
20 | # Byte-compiled / optimized / DLL files
21 | **/__pycache__/
22 | **/*.py[cod]
23 |
24 | # C extensions
25 | *.so
26 |
27 | # Distribution / packaging
28 | .Python
29 | env/
30 | build/
31 | develop-eggs/
32 | dist/
33 | downloads/
34 | eggs/
35 | lib/
36 | lib64/
37 | parts/
38 | sdist/
39 | var/
40 | *.egg-info/
41 | .installed.cfg
42 | *.egg
43 |
44 | # PyInstaller
45 | # Usually these files are written by a python script from a template
46 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
47 | *.manifest
48 | *.spec
49 |
50 | # Installer logs
51 | pip-log.txt
52 | pip-delete-this-directory.txt
53 |
54 | # Unit test / coverage reports
55 | htmlcov/
56 | .tox/
57 | .coverage
58 | .cache
59 | nosetests.xml
60 | coverage.xml
61 |
62 | # Translations
63 | *.mo
64 | *.pot
65 |
66 | # Django stuff:
67 | *.log
68 |
69 | # Sphinx documentation
70 | docs/_build/
71 |
72 | # PyBuilder
73 | target/
74 |
75 | # Virtual environment
76 | .env
77 | .venv/
78 | venv/
79 |
80 | # PyCharm
81 | .idea
82 |
83 | # Python mode for VIM
84 | .ropeproject
85 | **/.ropeproject
86 |
87 | # Vim swap files
88 | **/*.swp
89 |
90 | # VS Code
91 | .vscode/
--------------------------------------------------------------------------------
/doc/index.rst:
--------------------------------------------------------------------------------
1 | .. _index:
2 |
3 |
4 | .. image:: https://img.shields.io/endpoint?url=https://python-poetry.org/badge/v0.json
5 | :target: https://python-poetry.org/
6 | :alt: Poetry
7 | .. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json
8 | :target: https://github.com/astral-sh/ruff
9 | :alt: Ruff
10 | .. image:: https://img.shields.io/pypi/v/pgmq-sqlalchemy
11 | :target: https://pypi.org/project/pgmq-sqlalchemy/
12 | :alt: PyPI - Version
13 | .. image:: https://img.shields.io/pypi/l/pgmq-sqlalchemy.svg
14 | :target: https://pypi.org/project/pgmq-sqlalchemy/
15 | :alt: PyPI - License
16 | .. image:: https://codecov.io/gh/jason810496/pgmq-sqlalchemy/graph/badge.svg?token=C5ZVZCW7TE
17 | :target: https://codecov.io/gh/jason810496/pgmq-sqlalchemy
18 | :alt: Codecov
19 |
20 | pgmq-sqlalchemy
21 | ===============
22 |
23 | `PGMQ Postgres extension `_ Python client supporting **SQLAlchemy ORM** .
24 |
25 | Features
26 | --------
27 |
28 | * Supports **async** and **sync** ``engines``, ``sessionmakers``, or directly constructed from ``dsn``.
29 | * **Automatically** creates ``pgmq`` extension on the database if not exists.
30 | * Supports all Postgres DBAPIs supported by ``SQLAlchemy``.
31 | * Examples: ``psycopg``, ``psycopg2``, ``asyncpg``
32 | * See `SQLAlchemy Postgresql Dialects `_
33 |
34 |
35 | Table of Contents
36 | -----------------
37 |
38 | .. toctree::
39 | :maxdepth: 2
40 |
41 | self
42 | installation
43 | getting-started
44 | api-reference
45 | development
46 | release
--------------------------------------------------------------------------------
/.github/workflows/codecov.yml:
--------------------------------------------------------------------------------
1 | # This workflow will run tests using pytest and upload the coverage report to Codecov
2 | # Run test with various Python versions
3 | name: Integration Tests
4 |
5 | on:
6 | push:
7 | branches: [main, develop]
8 | pull_request:
9 | branches: [main, develop]
10 |
11 | jobs:
12 | build:
13 |
14 | runs-on: ubuntu-latest
15 | strategy:
16 | matrix:
17 | python-version: ["3.9","3.10","3.11","3.12"]
18 |
19 | name: Test pgmq-sqlalchemy
20 | steps:
21 | - uses: actions/checkout@v4
22 | - name: Set up Python ${{ matrix.python-version }}
23 | uses: actions/setup-python@v5
24 | with:
25 | python-version: ${{ matrix.python-version }}
26 | # Install dependencies
27 | - name: Install dependencies
28 | run: |
29 | pip install poetry
30 | - name: Set poetry python version
31 | run: |
32 | poetry env use python${{ matrix.python-version }}
33 | - name: Install dependencies
34 | run: poetry install --without=dev
35 | - name: Start PostgreSQL
36 | run: |
37 | cp pgmq_postgres.template.env pgmq_postgres.env
38 | cp pgmq_tests.template.env pgmq_tests.env
39 | make start-db
40 | - name: Run tests and collect coverage
41 | run: poetry run pytest tests --cov=pgmq_sqlalchemy.queue --cov-report=xml -n auto tests
42 | continue-on-error: true
43 | - name: Upload coverage reports to Codecov with GitHub Action
44 | uses: codecov/codecov-action@v4.2.0
45 | env:
46 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .DEFAULT_GOAL := help
2 |
3 | install: ## Install dependencies and `ruff` pre-commit hooks
4 | pre-commit install
5 | poetry install --with dev
6 |
7 | build: ## Build the package
8 | poetry build
9 |
10 | test-local: ## Run tests locally
11 | poetry run pytest tests --cov=pgmq_sqlalchemy.queue
12 |
13 |
14 | test-docker-rebuild: ## Rebuild the docker image
15 | docker rmi -f pgmq-sqlalchemy-pgmq_tests
16 | docker build -t pgmq-sqlalchemy-pgmq_tests -f Dockerfile .
17 |
18 | test-docker: test-docker-rebuild ## Run tests in docker
19 | ifndef CMD
20 | if [ -d "stateful_volumes/htmlcov" ]; then rm -r stateful_volumes/htmlcov; fi
21 | if [ -d "htmlcov" ]; then rm -r htmlcov; fi
22 | docker compose run --rm pgmq_tests
23 | cp -r stateful_volumes/htmlcov/ htmlcov/
24 | rm -r stateful_volumes/htmlcov/
25 | else
26 | docker run --rm --entrypoint '/bin/bash' pgmq-sqlalchemy-pgmq_tests -c '$(CMD)'
27 | endif
28 |
29 | clear-db: ## Clear the database
30 | docker compose down pgmq_postgres
31 | rm -r stateful_volumes/pgmq_postgres/
32 |
33 | start-db: ## Start the database
34 | docker compose up -d pgmq_postgres
35 | while ! docker compose exec pgmq_postgres pg_isready; do sleep 1; done
36 |
37 | exec-db: ## Enter the database container
38 | docker compose exec pgmq_postgres psql -U postgres -d postgres
39 |
40 | doc-build: ## Build the documentation
41 | cd doc && poetry run sphinx-build -nW . _build
42 |
43 | doc-serve: doc-clean ## Serve the documentation
44 | cd doc && poetry run sphinx-autobuild -nW . _build
45 |
46 | doc-clean: ## Clean the documentation
47 | cd doc && rm -r _build
48 |
49 | .PHONY: install test-local test-docker test-docker-rebuild clear-db start-db exec-db doc-build doc-serve
50 |
51 | # generate help from comments
52 | .PHONY: help
53 | help: ## Display this help
54 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.12-bookworm as python-base
2 |
3 | ENV PYTHONUNBUFFERED=1 \
4 | # prevents python creating .pyc files
5 | PYTHONDONTWRITEBYTECODE=1 \
6 | \
7 | # pip
8 | PIP_DISABLE_PIP_VERSION_CHECK=on \
9 | PIP_DEFAULT_TIMEOUT=100 \
10 | \
11 | # poetry
12 | # https://python-poetry.org/docs/configuration/#using-environment-variables
13 | POETRY_VERSION=1.3.2 \
14 | # make poetry install to this location
15 | POETRY_HOME="/opt/poetry" \
16 | # make poetry create the virtual environment in the project's root
17 | # it gets named `.venv`
18 | POETRY_VIRTUALENVS_IN_PROJECT=true \
19 | # do not ask any interactive question
20 | POETRY_NO_INTERACTION=1 \
21 | \
22 | # paths
23 | # this is where our requirements + virtual environment will live
24 | PYSETUP_PATH="/opt/pysetup" \
25 | VENV_PATH="/opt/pysetup/.venv"
26 |
27 |
28 | # prepend poetry and venv to path
29 | ENV PATH="$POETRY_HOME/bin:$VENV_PATH/bin:$PATH"
30 |
31 | FROM python-base as builder-base
32 | RUN apt-get update \
33 | && apt-get install --no-install-recommends -y \
34 | # deps for installing poetry
35 | curl \
36 | # deps for building python deps
37 | build-essential
38 |
39 | # install poetry - respects $POETRY_VERSION & $POETRY_HOME
40 | # The --mount will mount the buildx cache directory to where
41 | # Poetry and Pip store their cache so that they can re-use it
42 | RUN --mount=type=cache,target=/root/.cache \
43 | curl -sSL https://install.python-poetry.org | python3 -
44 |
45 | # copy project requirement files here to ensure they will be cached.
46 | WORKDIR $PYSETUP_PATH
47 | COPY poetry.lock pyproject.toml ./
48 |
49 | # install runtime deps - uses $POETRY_VIRTUALENVS_IN_PROJECT internally
50 | RUN --mount=type=cache,target=/root/.cache \
51 | poetry install --with=dev
52 |
53 | FROM python:3.12-slim-bookworm as runtime
54 |
55 | ENV POETRY_HOME="/opt/poetry" \
56 | VENV_PATH="/opt/pysetup/.venv"
57 | ENV PATH="$POETRY_HOME/bin:$VENV_PATH/bin:$PATH"
58 |
59 | COPY --from=builder-base $PYSETUP_PATH $PYSETUP_PATH
60 | COPY ./pgmq_sqlalchemy /pgmq_sqlalchemy_test/pgmq_sqlalchemy
61 | COPY ./tests /pgmq_sqlalchemy_test/tests
62 |
63 | WORKDIR /pgmq_sqlalchemy_test
64 |
65 | CMD ["python", "-m", "pytest", "-sv" , "tests", "--cov=pgmq_sqlalchemy.queue", "-n" , "4" ]
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | Welcome to contribute to `pgmq-sqlalchemy` !
4 | This document will guide you through the process of contributing to the project.
5 |
6 | ## How to Contribute
7 |
8 | 1. Fork the repository
9 | - Click the `Fork` button in the upper right corner of the repository page.
10 | 2. Clone the repository
11 | - Clone the repository to your local machine.
12 | ```bash
13 | git clone https://github.com/your-username/pgmq-sqlalchemy.git
14 | ```
15 | 3. Create a new branch
16 | - Create a new branch for your changes.
17 | ```bash
18 | git checkout -b feature/your-feature-name
19 | ```
20 | 4. Make your changes
21 | - Make your changes to the codebase.
22 | - Add tests for your changes.
23 | - Add documentation if changes are user-facing.
24 | 5. Commit your changes
25 | - Commit your changes with meaningful commit messages.
26 | - [ref: conventional git commit messages](https://www.conventionalcommits.org/en/v1.0.0/)
27 | ```bash
28 | git commit -m "feat: your feature description"
29 | ```
30 | 6. Push your changes
31 | - Push your changes to your forked repository.
32 | ```bash
33 | git push origin feature/your-feature-name
34 | ```
35 | 7. Create a Pull Request
36 | - Create a Pull Request from your forked repository to the `develop` branch of the original repository.
37 |
38 | ## Development
39 |
40 | ### Setup
41 |
42 | Install dependencies and `ruff` pre-commit hooks.
43 | ```bash
44 | make install
45 | ```
46 |
47 | > Prerequisites: **Docker** and **Docker Compose** installed.
48 |
49 | Start development PostgreSQL
50 | ```bash
51 | make start-db
52 | ```
53 |
54 | Stop development PostgreSQL
55 | ```bash
56 | make stop-db
57 | ```
58 |
59 | ### Makefile utility
60 |
61 | ```bash
62 | make help
63 | ```
64 | > will show all available commands and their descriptions.
65 |
66 | ### Linting
67 |
68 | We use [pre-commit](https://pre-commit.com/) hook with [ruff](https://github.com/astral-sh/ruff-pre-commit) to automatically lint the codebase before committing.
69 |
70 |
71 | ### Testing
72 |
73 | Run tests in local
74 | ```bash
75 | make test-local
76 | ```
77 |
78 | Run tests in docker
79 | ```bash
80 | make test-docker
81 | ```
82 |
83 | ### Documentation
84 |
85 | Serve documentation
86 | ```bash
87 | make doc-serve
88 | ```
89 |
90 | Clean documentation build
91 | ```bash
92 | make doc-clean
93 | ```
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "pgmq-sqlalchemy"
3 | version = "0.1.2"
4 | description = "More flexible PGMQ Postgres extension Python client that using sqlalchemy ORM, supporting both async and sync engines, sessionmakers or built from dsn."
5 | authors = ["jason810496 <810496@email.wlsh.tyc.edu.tw>"]
6 | license = "MIT"
7 | readme = "README.md"
8 | keywords = ["pgmq","PGMQ","sqlalchemy","SQLAlchemy","tembo_pgmq_python"]
9 | classifiers = [
10 | "Intended Audience :: Information Technology",
11 | "Intended Audience :: Developers",
12 | "License :: OSI Approved :: MIT License",
13 | "Programming Language :: Python :: 3",
14 | "Programming Language :: Python :: 3.9",
15 | "Programming Language :: Python :: 3.10",
16 | "Programming Language :: Python :: 3.11",
17 | "Programming Language :: Python :: 3.12",
18 | "Operating System :: Microsoft :: Windows",
19 | "Operating System :: POSIX",
20 | "Operating System :: Unix",
21 | "Operating System :: MacOS",
22 | ]
23 |
24 | [tool.poetry.urls]
25 | "Homepage" = "https://github.com/jason810496/pgmq-sqlalchemy"
26 | "Repository" = "https://github.com/jason810496/pgmq-sqlalchemy"
27 | "Documentation" = "https://pgmq-sqlalchemy.readthedocs.io/en/latest/"
28 |
29 | [tool.poetry.extras]
30 | asyncpg = ["asyncpg", "greenlet"]
31 | pg8000 = ["pg8000"]
32 | psycopg = ["psycopg"]
33 | psycopg2-binary = ["psycopg2-binary"]
34 | psycopg2cffi = ["psycopg2cffi"]
35 |
36 |
37 | [tool.poetry.dependencies]
38 | python = "^3.9"
39 | SQLAlchemy = "^2.0.31"
40 | # optional dependencies
41 | asyncpg = {version = "^0.29.0", optional = true}
42 | greenlet = {version = "^3.0.3", optional = true}
43 | pg8000 = {version = "^1.31.2", optional = true}
44 | psycopg = {version = "^3.2.1", optional = true}
45 | psycopg2-binary = {version = "^2.9.9", optional = true}
46 | psycopg2cffi = {version = "^2.9.0", optional = true}
47 |
48 | [tool.poetry.group.dev.dependencies]
49 | # postgresql drivers
50 | asyncpg = "^0.29.0"
51 | greenlet = "^3.0.3"
52 | pg8000 = "^1.31.2"
53 | psycopg = "^3.2.1"
54 | psycopg2-binary = "^2.9.9"
55 | psycopg2cffi = "^2.9.0"
56 | # testing
57 | pytest = "7.4.4"
58 | pytest-lazy-fixture = "^0.6.3"
59 | pytest-cov = "^5.0.0"
60 | pytest-xdist = "^3.6.1"
61 | filelock = "^3.15.4"
62 | # docs
63 | sphinx = "^7.3.7"
64 | sphinx-autobuild = "^2024.4.16"
65 | sphinx-rtd-theme = "^2.0.0"
66 | sphinx-copybutton = "^0.5.2"
67 |
68 |
69 | [build-system]
70 | requires = ["poetry-core"]
71 | build-backend = "poetry.core.masonry.api"
72 |
--------------------------------------------------------------------------------
/doc/development.rst:
--------------------------------------------------------------------------------
1 |
2 |
3 | Development
4 | ===========
5 |
6 | | Welcome to contributing to ``pgmq-sqlalchemy`` !
7 | | This document will guide you through the process of contributing to the project.
8 |
9 | How to Contribute
10 | -----------------
11 |
12 | 1. Fork the repository
13 | - Click the `Fork` button in the upper right corner of the repository page.
14 | 2. Clone the repository
15 | - Clone the repository to your local machine.
16 |
17 | .. code-block:: bash
18 |
19 | git clone https://github.com/your-username/pgmq-sqlalchemy.git
20 |
21 | 3. Create a new branch
22 | - Create a new branch for your changes.
23 |
24 | .. code-block:: bash
25 |
26 | git checkout -b feature/your-feature-name
27 |
28 | 4. Make your changes
29 | - Make your changes to the codebase.
30 | - Add tests for your changes.
31 | - Add documentation if changes are user-facing.
32 | 5. Commit your changes
33 | * Commit your changes with meaningful commit messages.
34 | * `ref: conventional git commit messages `_
35 |
36 | .. code-block:: bash
37 |
38 | git commit -m "feat: your feature description"
39 |
40 | 6. Push your changes
41 | - Push your changes to your forked repository.
42 |
43 | .. code-block:: bash
44 |
45 | git push origin feature/your-feature-name
46 |
47 | 7. Create a Pull Request
48 | - Create a Pull Request from your forked repository to the ``develop`` branch of the original repository.
49 |
50 | Development Setup
51 | -----------------
52 |
53 | Setup
54 | ~~~~~
55 |
56 | Install dependencies and `ruff` pre-commit hooks.
57 |
58 | .. code-block:: bash
59 |
60 | make install
61 |
62 | Prerequisites: **Docker** and **Docker Compose** installed.
63 |
64 | Start development PostgreSQL
65 |
66 | .. code-block:: bash
67 |
68 | make start-db
69 |
70 | Stop development PostgreSQL
71 |
72 | .. code-block:: bash
73 |
74 | make stop-db
75 |
76 | Makefile utility
77 | ~~~~~~~~~~~~~~~~
78 |
79 | .. code-block:: bash
80 |
81 | make help
82 |
83 | # will show all available commands and their descriptions.
84 |
85 | Linting
86 | ~~~~~~~
87 |
88 | We use `pre-commit `_ hook with `ruff `_ to automatically lint the codebase before committing.
89 |
90 | Testing
91 | -------
92 |
93 | Run tests locally
94 |
95 | .. code-block:: bash
96 |
97 | make test-local
98 |
99 | Run tests in docker
100 |
101 | .. code-block:: bash
102 |
103 | make test-docker
104 |
105 | Documentation
106 | -------------
107 |
108 | Serve documentation
109 |
110 | .. code-block:: bash
111 |
112 | make doc-serve
113 |
114 | Clean documentation build
115 |
116 | .. code-block:: bash
117 |
118 | make doc-clean
119 |
--------------------------------------------------------------------------------
/tests/fixture_deps.py:
--------------------------------------------------------------------------------
1 | import uuid
2 | from typing import Tuple
3 |
4 | import pytest
5 |
6 | from pgmq_sqlalchemy import PGMQueue
7 | from tests._utils import check_queue_exists
8 |
9 | LAZY_FIXTURES = [
10 | pytest.lazy_fixture("pgmq_by_dsn"),
11 | pytest.lazy_fixture("pgmq_by_async_dsn"),
12 | pytest.lazy_fixture("pgmq_by_engine"),
13 | pytest.lazy_fixture("pgmq_by_async_engine"),
14 | pytest.lazy_fixture("pgmq_by_session_maker"),
15 | pytest.lazy_fixture("pgmq_by_async_session_maker"),
16 | pytest.lazy_fixture("pgmq_by_dsn_and_engine"),
17 | pytest.lazy_fixture("pgmq_by_dsn_and_session_maker"),
18 | ]
19 |
20 | pgmq_deps = pytest.mark.parametrize(
21 | "pgmq_fixture",
22 | LAZY_FIXTURES,
23 | )
24 | """
25 | Decorator that allows a test function to receive a PGMQueue instance as a parameter.
26 |
27 | Usage:
28 |
29 | ```
30 | from tests.fixture_deps import pgmq_deps
31 |
32 | @pgmq_deps
33 | def test_create_queue(pgmq_fixture,db_session):
34 | pgmq:PGMQueue = pgmq_fixture
35 | # test code here
36 | ```
37 |
38 | Note:
39 | `pytest` version should < 8.0.0,
40 | or `pytest-lazy-fixture` will not work
41 | ref: https://github.com/TvoroG/pytest-lazy-fixture/issues/65
42 | """
43 |
44 | PGMQ_WITH_QUEUE = Tuple[PGMQueue, str]
45 |
46 |
47 | @pytest.fixture(scope="function", params=LAZY_FIXTURES)
48 | def pgmq_setup_teardown(request: pytest.FixtureRequest, db_session) -> PGMQ_WITH_QUEUE:
49 | """
50 | Fixture that provides a PGMQueue instance with a unique temporary queue with setup and teardown.
51 |
52 | Args:
53 | request (pytest.FixtureRequest): The pytest fixture request object.
54 | db_session (sqlalchemy.orm.Session): The SQLAlchemy session object.
55 |
56 | Yields:
57 | tuple[PGMQueue,str]: A tuple containing the PGMQueue instance and the name of the temporary queue.
58 |
59 | Usage:
60 | @pgmq_setup_teardown
61 | def test_something(pgmq_setup_teardown):
62 | pgmq, queue_name = pgmq_setup_teardown
63 | # test code here
64 |
65 | """
66 | pgmq = request.param
67 | queue_name = f"test_queue_{uuid.uuid4().hex}"
68 | assert check_queue_exists(db_session, queue_name) is False
69 | pgmq.create_queue(queue_name)
70 | assert check_queue_exists(db_session, queue_name) is True
71 | yield pgmq, queue_name
72 | pgmq.drop_queue(queue_name)
73 | assert check_queue_exists(db_session, queue_name) is False
74 |
75 |
76 | @pytest.fixture(scope="function", params=LAZY_FIXTURES)
77 | def pgmq_partitioned_setup_teardown(
78 | request: pytest.FixtureRequest, db_session
79 | ) -> PGMQ_WITH_QUEUE:
80 | """
81 | Fixture that provides a PGMQueue instance with a unique temporary partitioned queue with setup and teardown.
82 |
83 | Args:
84 | request (pytest.FixtureRequest): The pytest fixture request object.
85 | db_session (sqlalchemy.orm.Session): The SQLAlchemy session object.
86 |
87 | Yields:
88 | tuple[PGMQueue,str]: A tuple containing the PGMQueue instance and the name of the temporary queue.
89 |
90 | Usage:
91 | @pgmq_setup_teardown
92 | def test_something(pgmq_setup_teardown):
93 | pgmq, queue_name = pgmq_setup_teardown
94 | # test code here
95 |
96 | """
97 | pgmq: PGMQueue = request.param
98 | queue_name = f"test_queue_{uuid.uuid4().hex}"
99 | assert check_queue_exists(db_session, queue_name) is False
100 | pgmq.create_partitioned_queue(queue_name)
101 | assert check_queue_exists(db_session, queue_name) is True
102 | yield pgmq, queue_name
103 | pgmq.drop_queue(queue_name, partitioned=True)
104 | assert check_queue_exists(db_session, queue_name) is False
105 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 | from pytest import FixtureRequest
5 | from sqlalchemy import create_engine
6 | from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
7 | from sqlalchemy.orm import sessionmaker, Session
8 |
9 | from pgmq_sqlalchemy import PGMQueue
10 | from tests.constant import ASYNC_DRIVERS, SYNC_DRIVERS
11 |
12 |
13 | @pytest.fixture(scope="module")
14 | def get_sa_host():
15 | return os.getenv("SQLALCHEMY_HOST", "localhost")
16 |
17 |
18 | @pytest.fixture(scope="module")
19 | def get_sa_port():
20 | return os.getenv("SQLALCHEMY_PORT", "5432")
21 |
22 |
23 | @pytest.fixture(scope="module")
24 | def get_sa_user():
25 | return os.getenv("SQLALCHEMY_USER", "postgres")
26 |
27 |
28 | @pytest.fixture(scope="module")
29 | def get_sa_password():
30 | return os.getenv("SQLALCHEMY_PASSWORD", "postgres")
31 |
32 |
33 | @pytest.fixture(scope="module")
34 | def get_sa_db():
35 | return os.getenv("SQLALCHEMY_DB", "postgres")
36 |
37 |
38 | @pytest.fixture(scope="function", params=SYNC_DRIVERS)
39 | def get_dsn(
40 | request: FixtureRequest,
41 | get_sa_host,
42 | get_sa_port,
43 | get_sa_user,
44 | get_sa_password,
45 | get_sa_db,
46 | ):
47 | driver = request.param
48 | return f"postgresql+{driver}://{get_sa_user}:{get_sa_password}@{get_sa_host}:{get_sa_port}/{get_sa_db}"
49 |
50 |
51 | @pytest.fixture(scope="function", params=ASYNC_DRIVERS)
52 | def get_async_dsn(
53 | request: FixtureRequest,
54 | get_sa_host,
55 | get_sa_port,
56 | get_sa_user,
57 | get_sa_password,
58 | get_sa_db,
59 | ):
60 | driver = request.param
61 | return f"postgresql+{driver}://{get_sa_user}:{get_sa_password}@{get_sa_host}:{get_sa_port}/{get_sa_db}"
62 |
63 |
64 | @pytest.fixture(scope="function")
65 | def get_engine(get_dsn):
66 | return create_engine(get_dsn)
67 |
68 |
69 | @pytest.fixture(scope="function")
70 | def get_async_engine(get_async_dsn):
71 | return create_async_engine(get_async_dsn)
72 |
73 |
74 | @pytest.fixture(scope="function")
75 | def get_session_maker(get_engine):
76 | return sessionmaker(bind=get_engine, class_=Session)
77 |
78 |
79 | @pytest.fixture(scope="function")
80 | def get_async_session_maker(get_async_engine):
81 | return sessionmaker(bind=get_async_engine, class_=AsyncSession)
82 |
83 |
84 | @pytest.fixture(scope="function")
85 | def pgmq_by_dsn(get_dsn):
86 | pgmq = PGMQueue(dsn=get_dsn)
87 | return pgmq
88 |
89 |
90 | @pytest.fixture(scope="function")
91 | def pgmq_by_async_dsn(get_async_dsn):
92 | pgmq = PGMQueue(dsn=get_async_dsn)
93 | return pgmq
94 |
95 |
96 | @pytest.fixture(scope="function")
97 | def pgmq_by_engine(get_engine):
98 | pgmq = PGMQueue(engine=get_engine)
99 | return pgmq
100 |
101 |
102 | @pytest.fixture(scope="function")
103 | def pgmq_by_async_engine(get_async_engine):
104 | pgmq = PGMQueue(engine=get_async_engine)
105 | return pgmq
106 |
107 |
108 | @pytest.fixture(scope="function")
109 | def pgmq_by_session_maker(get_session_maker):
110 | pgmq = PGMQueue(session_maker=get_session_maker)
111 | return pgmq
112 |
113 |
114 | @pytest.fixture(scope="function")
115 | def pgmq_by_async_session_maker(get_async_session_maker):
116 | pgmq = PGMQueue(session_maker=get_async_session_maker)
117 | return pgmq
118 |
119 |
120 | @pytest.fixture(scope="function")
121 | def pgmq_by_dsn_and_engine(get_dsn, get_engine):
122 | pgmq = PGMQueue(dsn=get_dsn, engine=get_engine)
123 | return pgmq
124 |
125 |
126 | @pytest.fixture(scope="function")
127 | def pgmq_by_dsn_and_session_maker(get_dsn, get_session_maker):
128 | pgmq = PGMQueue(dsn=get_dsn, session_maker=get_session_maker)
129 | return pgmq
130 |
131 |
132 | @pytest.fixture(scope="function")
133 | def db_session(get_session_maker) -> Session:
134 | return get_session_maker()
135 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110 | .pdm.toml
111 | .pdm-python
112 | .pdm-build/
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
164 | # env files
165 | *.env
166 | # docker-compose volumes
167 | stateful_volumes/*
168 | # filelock temp files
169 | pgmq.meta.lock.txt
170 | # for doc build
171 | doc/_build/
--------------------------------------------------------------------------------
/doc/getting-started.rst:
--------------------------------------------------------------------------------
1 | .. _getting-started:
2 |
3 | Getting Started
4 | ===============
5 |
6 | .. Note::
7 | Make sure you have the following installed:
8 | * `Docker `_
9 | * `Docker Compose `_
10 |
11 | Postgres Setup
12 | --------------
13 |
14 | For quick setup:
15 | .. code-block:: bash
16 |
17 | docker run -d --name postgres -e POSTGRES_PASSWORD=postgres -p 5432:5432 quay.io/tembo/pg16-pgmq:latest
18 |
19 |
20 | Or using **Docker Compose** to start Postgres with ``PGMQ`` extension:
21 | ``docker-compose.yml``:
22 | .. code-block:: yaml
23 |
24 | version: '3.8'
25 | services:
26 | pgmq_postgres:
27 | container_name: pgmq_postgres
28 | image: quay.io/tembo/pg16-pgmq:latest
29 | environment:
30 | - POSTGRES_PASSWORD=postgres
31 | ports:
32 | - "5432:5432"
33 | volumes:
34 | - ./pgmq_postgres_volume:/var/lib/postgresql
35 |
36 | Then run the following command:
37 |
38 | .. code-block:: bash
39 |
40 | docker-compose up pgmq_postgres -d
41 |
42 |
43 | For more information, see `PGMQ GitHub `_.
44 |
45 | pgmq-sqlalchemy Setup
46 | ---------------------
47 |
48 | .. tip::
49 |
50 | See `API Reference `_ for **more examples and detailed usage**.
51 |
52 | For ``dispatcher.py``:
53 |
54 | .. code-block:: python
55 |
56 | from typing import List
57 | from pgmq_sqlalchemy import PGMQueue
58 |
59 | postgres_dsn = 'postgresql://postgres:postgres@localhost:5432/postgres'
60 |
61 | pgmq = PGMQueue(dsn=postgres_dsn)
62 | pgmq.create_queue('my_queue')
63 |
64 | msg = {'key': 'value', 'key2': 'value2'}
65 | msg_id:int = pgmq.send('my_queue', msg)
66 |
67 | # could also send a list of messages
68 | msg_ids:List[int] = pgmq.send_batch('my_queue', [msg, msg])
69 |
70 | .. seealso::
71 |
72 | .. _init_method: ref:`pgmq_sqlalchemy.PGMQueue.__init__`
73 | .. |init_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.__init__`
74 |
75 | .. _send_method: ref:`pgmq_sqlalchemy.PGMQueue.send`
76 | .. |send_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.send`
77 |
78 | See |init_method|_ for more options on how to initialize the ``PGMQueue`` object, and advance usage with |send_method|_ on `API Reference `_.
79 |
80 |
81 | For ``consumer.py``:
82 |
83 | .. code-block:: python
84 |
85 | from pgmq_sqlalchemy import PGMQueue
86 | from pgmq_sqlalchemy.schema import Message
87 |
88 | postgres_dsn = 'postgresql://postgres:postgres@localhost:5432/postgres'
89 |
90 | pgmq = PGMQueue(dsn=postgres_dsn)
91 |
92 | # read a single message
93 | msg:Message = pgmq.read('my_queue')
94 |
95 | # read a batch of messages
96 | msgs:List[Message] = pgmq.read_batch('my_queue', 10)
97 |
98 | .. seealso::
99 |
100 | .. _read_with_poll_method: ref:`pgmq_sqlalchemy.PGMQueue.read_with_poll`
101 | .. |read_with_poll_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.read_with_poll`
102 |
103 | .. _read_method: ref:`pgmq_sqlalchemy.PGMQueue.read`
104 | .. |read_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.read`
105 |
106 | See |read_with_poll_method|_ for reading messages with long-polling, and advance usage with |read_method|_ for **consumer retries mechanism** and more control over message consumption on `API Reference `_.
107 |
108 | For ``monitor.py``:
109 |
110 | .. code-block:: python
111 |
112 | from pgmq_sqlalchemy import PGMQueue
113 | from pgmq_sqlalchemy.schema import QueueMetrics
114 |
115 | postgres_dsn = 'postgresql://postgres:postgres@localhost:5432/postgres'
116 |
117 | pgmq = PGMQueue(dsn=postgres_dsn)
118 |
119 | # get queue metrics
120 | metrics:QueueMetrics = pgmq.metrics('my_queue')
121 | print(metrics.queue_length)
122 | print(metrics.total_messages)
123 |
124 |
125 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://python-poetry.org/)
2 | [](https://github.com/astral-sh/ruff)
3 | 
4 | [](https://github.com/jason810496/pgmq-sqlalchemy/blob/main/LICENSE)
5 | [](https://pypi.python.org/pypi/pgmq-sqlalchemy)
6 | [](https://codecov.io/gh/jason810496/pgmq-sqlalchemy)
7 | [](https://pgmq-sqlalchemy.readthedocs.io/en/latest/?badge=latest)
8 |
9 |
10 | # pgmq-sqlalchemy
11 |
12 | More flexible [PGMQ Postgres extension](https://github.com/tembo-io/pgmq) Python client that using **sqlalchemy ORM**, supporting both **async** and **sync** `engines`, `sessionmakers` or built from `dsn`.
13 |
14 | ## Table of Contents
15 |
16 | * [pgmq-sqlalchemy](#pgmq-sqlalchemy)
17 | * [Features](#features)
18 | * [Installation](#installation)
19 | * [Getting Started](#getting-started)
20 | * [Postgres Setup](#postgres-setup)
21 | * [Usage](#usage)
22 | * [Issue/ Contributing / Development](#issue-contributing--development)
23 | * [TODO](#todo)
24 |
25 |
26 | ## Features
27 |
28 | - Supports **async** and **sync** `engines` and `sessionmakers`, or built from `dsn`.
29 | - **Automatically** creates `pgmq` (or `pg_partman`) extension on the database if not exists.
30 | - Supports **all postgres DBAPIs supported by sqlalchemy**.
31 | > e.g. `psycopg`, `psycopg2`, `asyncpg` ..
32 | > See [SQLAlchemy Postgresql Dialects](https://docs.sqlalhttps://docs.sqlalchemy.org/en/20/dialects/postgresql.html)
33 |
34 | ## Installation
35 |
36 | Install with pip:
37 |
38 | ```bash
39 | pip install pgmq-sqlalchemy
40 | ```
41 |
42 | Install with additional DBAPIs packages:
43 |
44 | ```bash
45 | pip install "pgmq-sqlalchemy[asyncpg]"
46 | pip install "pgmq-sqlalchemy[psycopg2-binary]"
47 | # pip install "pgmq-sqlalchemy[postgres-python-driver]"
48 | ```
49 |
50 | ## Getting Started
51 |
52 | ### Postgres Setup
53 |
54 | Prerequisites: **Postgres** with **PGMQ** extension installed.
55 | For quick setup:
56 | ```bash
57 | docker run -d --name postgres -e POSTGRES_PASSWORD=postgres -p 5432:5432 quay.io/tembo/pg16-pgmq:latest
58 | ```
59 | > For more information, see [PGMQ](https://github.com/tembo-io/pgmq)
60 |
61 | ### Usage
62 |
63 | > [!NOTE]
64 | > Check [pgmq-sqlalchemy Document](https://pgmq-sqlalchemy.readthedocs.io/en/latest/) for more examples and detailed usage.
65 |
66 |
67 | For `dispatcher.py`:
68 | ```python
69 | from typing import List
70 | from pgmq_sqlalchemy import PGMQueue
71 |
72 | postgres_dsn = 'postgresql://postgres:postgres@localhost:5432/postgres'
73 |
74 | pgmq = PGMQueue(dsn=postgres_dsn)
75 | pgmq.create_queue('my_queue')
76 |
77 | msg = {'key': 'value', 'key2': 'value2'}
78 | msg_id:int = pgmq.send('my_queue', msg)
79 |
80 | # could also send a list of messages
81 | msg_ids:List[int] = pgmq.send_batch('my_queue', [msg, msg])
82 | ```
83 |
84 | For `consumer.py`:
85 | ```python
86 | from pgmq_sqlalchemy import PGMQueue
87 | from pgmq_sqlalchemy.schema import Message
88 |
89 | postgres_dsn = 'postgresql://postgres:postgres@localhost:5432/postgres'
90 |
91 | pgmq = PGMQueue(dsn=postgres_dsn)
92 |
93 | # read a single message
94 | msg:Message = pgmq.read('my_queue')
95 |
96 | # read a batch of messages
97 | msgs:List[Message] = pgmq.read_batch('my_queue', 10)
98 | ```
99 |
100 | For `monitor.py`:
101 | ```python
102 | from pgmq_sqlalchemy import PGMQueue
103 | from pgmq_sqlalchemy.schema import QueueMetrics
104 |
105 | postgres_dsn = 'postgresql://postgres:postgres@localhost:5432/postgres'
106 |
107 | pgmq = PGMQueue(dsn=postgres_dsn)
108 |
109 | # get queue metrics
110 | metrics:QueueMetrics = pgmq.metrics('my_queue')
111 | print(metrics.queue_length)
112 | print(metrics.total_messages)
113 | ```
114 |
115 | ## Issue/ Contributing / Development
116 |
117 | Welcome to open an issue or pull request !
118 | See [`Development` on Online Document](https://pgmq-sqlalchemy.readthedocs.io/en/latest/) or [CONTRIBUTING.md](.github/CONTRIBUTING.md) for more information.
119 |
120 | ## TODO
121 |
122 | - [ ] Add **time-based** partition option and validation to `create_partitioned_queue` method.
123 | - [ ] Read(single/batch) Archive Table ( `read_archive` method )
124 | - [ ] Detach Archive Table ( `detach_archive` method )
125 | - [ ] Add `set_vt` utils method.
--------------------------------------------------------------------------------
/tests/test_queue.py:
--------------------------------------------------------------------------------
1 | import uuid
2 | import pytest
3 | import time
4 |
5 | from sqlalchemy.exc import ProgrammingError
6 | from filelock import FileLock
7 | from pgmq_sqlalchemy import PGMQueue
8 |
9 | from tests.fixture_deps import (
10 | pgmq_deps,
11 | PGMQ_WITH_QUEUE,
12 | pgmq_setup_teardown,
13 | pgmq_partitioned_setup_teardown,
14 | )
15 |
16 | from tests._utils import check_queue_exists
17 | from tests.constant import MSG, LOCK_FILE_NAME
18 |
19 | use_fixtures = [
20 | pgmq_setup_teardown,
21 | pgmq_partitioned_setup_teardown,
22 | ]
23 |
24 |
25 | @pgmq_deps
26 | def test_create_queue(pgmq_fixture, db_session):
27 | pgmq: PGMQueue = pgmq_fixture
28 | queue_name = f"test_queue_{uuid.uuid4().hex}"
29 | pgmq.create_queue(queue_name)
30 | assert check_queue_exists(db_session, queue_name) is True
31 |
32 |
33 | @pgmq_deps
34 | def test_create_partitioned_queue(pgmq_fixture, db_session):
35 | pgmq: PGMQueue = pgmq_fixture
36 | queue_name = f"test_queue_{uuid.uuid4().hex}"
37 | pgmq.create_partitioned_queue(queue_name)
38 | assert check_queue_exists(db_session, queue_name) is True
39 |
40 |
41 | def test_create_same_queue(pgmq_setup_teardown: PGMQ_WITH_QUEUE, db_session):
42 | pgmq, queue_name = pgmq_setup_teardown
43 | pgmq.create_queue(queue_name)
44 | assert check_queue_exists(db_session, queue_name) is True
45 | pgmq.create_queue(queue_name)
46 | # `create_queue` with the same queue name should not raise an exception
47 | # and the queue should still exist
48 | assert check_queue_exists(db_session, queue_name) is True
49 |
50 |
51 | @pgmq_deps
52 | def test_validate_queue_name(pgmq_fixture):
53 | pgmq: PGMQueue = pgmq_fixture
54 | queue_name = f"test_queue_{uuid.uuid4().hex}"
55 | pgmq.validate_queue_name(queue_name)
56 | # `queue_name` should be a less than 48 characters
57 | with pytest.raises(Exception) as e:
58 | pgmq.validate_queue_name("a" * 49)
59 | error_msg: str = str(e.value.orig)
60 | assert "queue name is too long, maximum length is 48 characters" in error_msg
61 |
62 |
63 | def test_drop_queue(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
64 | _ = pgmq_setup_teardown
65 | pass
66 |
67 |
68 | @pgmq_deps
69 | def test_drop_non_exist_queue(pgmq_fixture, db_session):
70 | pgmq: PGMQueue = pgmq_fixture
71 | queue_name = f"test_queue_{uuid.uuid4().hex}"
72 | assert check_queue_exists(db_session, queue_name) is False
73 | with pytest.raises(ProgrammingError):
74 | pgmq.drop_queue(queue_name)
75 |
76 |
77 | def test_drop_partitioned_queue(pgmq_partitioned_setup_teardown: PGMQ_WITH_QUEUE):
78 | _ = pgmq_partitioned_setup_teardown
79 | pass
80 |
81 |
82 | @pgmq_deps
83 | def test_drop_non_exist_partitioned_queue(pgmq_fixture, db_session):
84 | pgmq: PGMQueue = pgmq_fixture
85 | queue_name = f"test_queue_{uuid.uuid4().hex}"
86 | assert check_queue_exists(db_session, queue_name) is False
87 | with pytest.raises(ProgrammingError):
88 | pgmq.drop_queue(queue_name, partitioned=True)
89 |
90 |
91 | def test_list_queues(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
92 | pgmq, queue_name = pgmq_setup_teardown
93 | queues = pgmq.list_queues()
94 | assert queue_name in queues
95 |
96 |
97 | def test_list_partitioned_queues(pgmq_partitioned_setup_teardown: PGMQ_WITH_QUEUE):
98 | pgmq, queue_name = pgmq_partitioned_setup_teardown
99 | queues = pgmq.list_queues()
100 | assert queue_name in queues
101 |
102 |
103 | def test_send_and_read_msg(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
104 | pgmq, queue_name = pgmq_setup_teardown
105 | msg = MSG
106 | msg_id: int = pgmq.send(queue_name, msg)
107 | msg_read = pgmq.read(queue_name)
108 | assert msg_read.message == msg
109 | assert msg_read.msg_id == msg_id
110 |
111 |
112 | def test_send_and_read_msg_with_delay(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
113 | pgmq, queue_name = pgmq_setup_teardown
114 | msg = MSG
115 | msg_id: int = pgmq.send(queue_name, msg, delay=2)
116 | msg_read = pgmq.read(queue_name)
117 | assert msg_read is None
118 | time.sleep(1)
119 | msg_read = pgmq.read(queue_name)
120 | assert msg_read is None
121 | time.sleep(1.1)
122 | msg_read = pgmq.read(queue_name)
123 | assert msg_read.message == msg
124 | assert msg_read.msg_id == msg_id
125 |
126 |
127 | def test_send_and_read_msg_with_vt(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
128 | pgmq, queue_name = pgmq_setup_teardown
129 | msg = MSG
130 | msg_id: int = pgmq.send(queue_name, msg)
131 | msg_read = pgmq.read(queue_name, vt=2)
132 | assert msg_read.message == msg
133 | assert msg_read.msg_id == msg_id
134 | time.sleep(1.5)
135 | msg_read = pgmq.read(queue_name)
136 | assert msg_read is None
137 | time.sleep(0.6)
138 | msg_read = pgmq.read(queue_name)
139 | assert msg_read.message == msg
140 | assert msg_read.msg_id == msg_id
141 |
142 |
143 | def test_send_and_read_msg_with_vt_and_delay(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
144 | pgmq, queue_name = pgmq_setup_teardown
145 | msg = MSG
146 | msg_id: int = pgmq.send(queue_name, msg, delay=2)
147 | msg_read = pgmq.read(queue_name, vt=2)
148 | assert msg_read is None
149 | time.sleep(1)
150 | msg_read = pgmq.read(queue_name, vt=2)
151 | assert msg_read is None
152 | time.sleep(1.1)
153 | msg_read = pgmq.read(queue_name, vt=2)
154 | assert msg_read.message == msg
155 | assert msg_read.msg_id == msg_id
156 | time.sleep(1.5)
157 | msg_read = pgmq.read(queue_name)
158 | assert msg_read is None
159 | time.sleep(0.6)
160 | msg_read = pgmq.read(queue_name)
161 | assert msg_read.message == msg
162 | assert msg_read.msg_id == msg_id
163 |
164 |
165 | def test_read_empty_queue(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
166 | pgmq, queue_name = pgmq_setup_teardown
167 | msg_read = pgmq.read(queue_name)
168 | assert msg_read is None
169 |
170 |
171 | def test_read_batch(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
172 | pgmq, queue_name = pgmq_setup_teardown
173 | msg = MSG
174 | msg_id_1: int = pgmq.send(queue_name, msg)
175 | msg_id_2: int = pgmq.send(queue_name, msg)
176 | msg_read = pgmq.read_batch(queue_name, 3)
177 | assert len(msg_read) == 2
178 | assert msg_read[0].message == msg
179 | assert msg_read[0].msg_id == msg_id_1
180 | assert msg_read[1].message == msg
181 | assert msg_read[1].msg_id == msg_id_2
182 |
183 |
184 | def test_read_batch_empty_queue(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
185 | pgmq, queue_name = pgmq_setup_teardown
186 | msg_read = pgmq.read_batch(queue_name, 3)
187 | assert msg_read is None
188 |
189 |
190 | def test_send_batch(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
191 | pgmq, queue_name = pgmq_setup_teardown
192 | msg = MSG
193 | msg_ids = pgmq.send_batch(queue_name=queue_name, messages=[msg, msg, msg])
194 | assert len(msg_ids) == 3
195 | assert msg_ids == [1, 2, 3]
196 |
197 |
198 | def test_send_batch_with_read_batch(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
199 | pgmq, queue_name = pgmq_setup_teardown
200 | msg = MSG
201 | msg_ids = pgmq.send_batch(queue_name=queue_name, messages=[msg, msg, msg])
202 | assert len(msg_ids) == 3
203 | assert msg_ids == [1, 2, 3]
204 | msg_read_batch = pgmq.read_batch(queue_name, 3)
205 | assert len(msg_read_batch) == 3
206 | assert [msg_read.message for msg_read in msg_read_batch] == [msg, msg, msg]
207 | assert [msg_read.msg_id for msg_read in msg_read_batch] == [1, 2, 3]
208 |
209 |
210 | def test_read_with_poll(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
211 | pgmq, queue_name = pgmq_setup_teardown
212 | msg = MSG
213 | msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg, msg, msg], delay=2)
214 | start_time = time.time()
215 | msg_reads = pgmq.read_with_poll(
216 | queue_name,
217 | vt=1000,
218 | qty=3,
219 | max_poll_seconds=5,
220 | poll_interval_ms=1001,
221 | )
222 | end_time = time.time()
223 | duration = end_time - start_time
224 | assert len(msg_reads) == 3
225 | assert [msg_read.msg_id for msg_read in msg_reads] == msg_ids[:3]
226 | assert duration < 5 and duration > 2
227 |
228 |
229 | def test_read_with_poll_with_empty_queue(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
230 | pgmq, queue_name = pgmq_setup_teardown
231 | start_time = time.time()
232 | msg_reads = pgmq.read_with_poll(
233 | queue_name,
234 | vt=1000,
235 | qty=3,
236 | max_poll_seconds=2,
237 | poll_interval_ms=100,
238 | )
239 | end_time = time.time()
240 | duration = end_time - start_time
241 | assert msg_reads is None
242 | assert duration > 1.9
243 |
244 |
245 | def test_set_vt(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
246 | pgmq, queue_name = pgmq_setup_teardown
247 | msg = MSG
248 | msg_id = pgmq.send(queue_name, msg)
249 | msg_read = pgmq.set_vt(queue_name, msg_id, 2)
250 | assert msg is not None
251 | assert pgmq.read(queue_name) is None
252 | time.sleep(1.5)
253 | assert pgmq.read(queue_name) is None
254 | time.sleep(0.6)
255 | msg_read = pgmq.read(queue_name)
256 | assert msg_read.message == msg
257 |
258 |
259 | def test_set_vt_to_smaller_value(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
260 | pgmq, queue_name = pgmq_setup_teardown
261 | msg = MSG
262 | msg_id = pgmq.send(queue_name, msg)
263 | _ = pgmq.read(queue_name, vt=5) # set vt to 5 seconds
264 | assert msg is not None
265 | assert pgmq.read(queue_name) is None
266 | time.sleep(0.5)
267 | assert pgmq.set_vt(queue_name, msg_id, 1) is not None
268 | time.sleep(0.3)
269 | assert pgmq.read(queue_name) is None
270 | time.sleep(0.8)
271 | assert pgmq.read(queue_name) is not None
272 |
273 |
274 | def test_pop(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
275 | pgmq, queue_name = pgmq_setup_teardown
276 | msg = MSG
277 | msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg])
278 | msg = pgmq.pop(queue_name)
279 | assert msg.msg_id == msg_ids[0]
280 | assert msg.message == MSG
281 | msg_reads = pgmq.read_batch(queue_name, 3)
282 | assert len(msg_reads) == 2
283 | assert [msg_read.msg_id for msg_read in msg_reads] == msg_ids[1:]
284 |
285 |
286 | def test_pop_empty_queue(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
287 | pgmq, queue_name = pgmq_setup_teardown
288 | msg = pgmq.pop(queue_name)
289 | assert msg is None
290 |
291 |
292 | def test_delete_msg(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
293 | pgmq, queue_name = pgmq_setup_teardown
294 | msg = MSG
295 | msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg])
296 | assert pgmq.delete(queue_name, msg_ids[1]) is True
297 | msg_reads = pgmq.read_batch(queue_name, 3)
298 | assert len(msg_reads) == 2
299 | assert [msg_read.msg_id for msg_read in msg_reads] == [msg_ids[0], msg_ids[2]]
300 |
301 |
302 | def test_delete_msg_not_exist(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
303 | pgmq, queue_name = pgmq_setup_teardown
304 | msg = MSG
305 | msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg])
306 | assert pgmq.delete(queue_name, 999) is False
307 | msg_reads = pgmq.read_batch(queue_name, 3)
308 | assert len(msg_reads) == 3
309 | assert [msg_read.msg_id for msg_read in msg_reads] == msg_ids
310 |
311 |
312 | def test_delete_batch(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
313 | pgmq, queue_name = pgmq_setup_teardown
314 | msg = MSG
315 | msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg])
316 | assert pgmq.delete_batch(queue_name, [msg_ids[0], msg_ids[2]]) == [
317 | msg_ids[0],
318 | msg_ids[2],
319 | ]
320 | msg_reads = pgmq.read_batch(queue_name, 3)
321 | assert len(msg_reads) == 1
322 | assert [msg_read.msg_id for msg_read in msg_reads] == [msg_ids[1]]
323 |
324 |
325 | def test_delete_batch_not_exist(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
326 | pgmq, queue_name = pgmq_setup_teardown
327 | msg = MSG
328 | msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg])
329 | assert pgmq.delete_batch(queue_name, [999, 998]) == []
330 | msg_reads = pgmq.read_batch(queue_name, 3)
331 | assert len(msg_reads) == 3
332 | assert [msg_read.msg_id for msg_read in msg_reads] == msg_ids
333 |
334 |
335 | def test_archive(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
336 | pgmq, queue_name = pgmq_setup_teardown
337 | msg = MSG
338 | msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg])
339 | assert pgmq.archive(queue_name, msg_ids[0]) is True
340 | msg_reads = pgmq.read_batch(queue_name, 3)
341 | assert len(msg_reads) == 2
342 | assert [msg_read.msg_id for msg_read in msg_reads] == [msg_ids[1], msg_ids[2]]
343 |
344 |
345 | def test_archive_not_exist(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
346 | pgmq, queue_name = pgmq_setup_teardown
347 | msg = MSG
348 | msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg])
349 | assert pgmq.archive(queue_name, 999) is False
350 | msg_reads = pgmq.read_batch(queue_name, 3)
351 | assert len(msg_reads) == 3
352 | assert [msg_read.msg_id for msg_read in msg_reads] == msg_ids
353 |
354 |
355 | def test_archive_batch(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
356 | pgmq, queue_name = pgmq_setup_teardown
357 | msg = MSG
358 | msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg])
359 | assert pgmq.archive_batch(queue_name, [msg_ids[0], msg_ids[2]]) == [
360 | msg_ids[0],
361 | msg_ids[2],
362 | ]
363 | msg_reads = pgmq.read_batch(queue_name, 3)
364 | assert len(msg_reads) == 1
365 | assert [msg_read.msg_id for msg_read in msg_reads] == [msg_ids[1]]
366 |
367 |
368 | def test_archive_batch_not_exist(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
369 | pgmq, queue_name = pgmq_setup_teardown
370 | msg = MSG
371 | msg_ids = pgmq.send_batch(queue_name, [msg, msg, msg])
372 | assert pgmq.archive_batch(queue_name, [999, 998]) == []
373 | msg_reads = pgmq.read_batch(queue_name, 3)
374 | assert len(msg_reads) == 3
375 | assert [msg_read.msg_id for msg_read in msg_reads] == msg_ids
376 |
377 |
378 | def test_purge(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
379 | pgmq, queue_name = pgmq_setup_teardown
380 | msg = MSG
381 | assert pgmq.purge(queue_name) == 0
382 | pgmq.send_batch(queue_name, [msg, msg, msg])
383 | assert pgmq.purge(queue_name) == 3
384 |
385 |
386 | def test_metrics(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
387 | pgmq, queue_name = pgmq_setup_teardown
388 | metrics = pgmq.metrics(queue_name)
389 | assert metrics is not None
390 | assert metrics.queue_name == queue_name
391 | assert metrics.queue_length == 0
392 | assert metrics.newest_msg_age_sec is None
393 | assert metrics.oldest_msg_age_sec is None
394 | assert metrics.total_messages == 0
395 |
396 |
397 | def test_metrics_all_queues(pgmq_setup_teardown: PGMQ_WITH_QUEUE):
398 | # Since default PostgreSQL isolation level is `READ COMMITTED`,
399 | # pytest-xdist is running in **muti-process** mode, which causes **Phantom read** !
400 | # - `pgmq.metrics_all()` will first get the queue list, then get the metrics for each queue
401 | # - If another process teardown the queue before the metrics are fetched, will throw an exception that the `{queue_name}` does not exist
402 | with FileLock(LOCK_FILE_NAME):
403 | pgmq, queue_name_1 = pgmq_setup_teardown
404 | queue_name_2 = f"test_queue_{uuid.uuid4().hex}"
405 | pgmq.create_queue(queue_name_2)
406 | pgmq.send_batch(queue_name_1, [MSG, MSG, MSG])
407 | pgmq.send_batch(queue_name_2, [MSG, MSG])
408 | metrics_all = pgmq.metrics_all()
409 | queue_1 = [q for q in metrics_all if q.queue_name == queue_name_1][0]
410 | queue_2 = [q for q in metrics_all if q.queue_name == queue_name_2][0]
411 | assert queue_1.queue_length == 3
412 | assert queue_2.queue_length == 2
413 | assert queue_1.total_messages == 3
414 | assert queue_2.total_messages == 2
415 |
--------------------------------------------------------------------------------
/pgmq_sqlalchemy/queue.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from typing import List, Optional
3 |
4 | from sqlalchemy import create_engine, text
5 | from sqlalchemy.orm import sessionmaker
6 | from sqlalchemy.ext.asyncio import create_async_engine
7 |
8 | from .schema import Message, QueueMetrics
9 | from ._types import ENGINE_TYPE
10 | from ._utils import (
11 | get_session_type,
12 | is_async_session_maker,
13 | is_async_dsn,
14 | encode_dict_to_psql,
15 | encode_list_to_psql,
16 | )
17 |
18 |
19 | class PGMQueue:
20 | engine: ENGINE_TYPE = None
21 | session_maker: sessionmaker = None
22 | delay: int = 0
23 | vt: int = 30
24 |
25 | is_async: bool = False
26 | is_pg_partman_ext_checked: bool = False
27 | loop: asyncio.AbstractEventLoop = None
28 |
29 | def __init__(
30 | self,
31 | dsn: Optional[str] = None,
32 | engine: Optional[ENGINE_TYPE] = None,
33 | session_maker: Optional[sessionmaker] = None,
34 | ) -> None:
35 | """
36 |
37 | | There are **3** ways to initialize ``PGMQueue`` class:
38 | | 1. Initialize with a ``dsn``:
39 |
40 | .. code-block:: python
41 |
42 | from pgmq_sqlalchemy import PGMQueue
43 |
44 | pgmq_client = PGMQueue(dsn='postgresql+psycopg://postgres:postgres@localhost:5432/postgres')
45 | # or async dsn
46 | async_pgmq_client = PGMQueue(dsn='postgresql+asyncpg://postgres:postgres@localhost:5432/postgres')
47 |
48 | | 2. Initialize with an ``engine`` or ``async_engine``:
49 |
50 | .. code-block:: python
51 |
52 | from pgmq_sqlalchemy import PGMQueue
53 | from sqlalchemy import create_engine
54 | from sqlalchemy.ext.asyncio import create_async_engine
55 |
56 | engine = create_engine('postgresql+psycopg://postgres:postgres@localhost:5432/postgres')
57 | pgmq_client = PGMQueue(engine=engine)
58 | # or async engine
59 | async_engine = create_async_engine('postgresql+asyncpg://postgres:postgres@localhost:5432/postgres')
60 | async_pgmq_client = PGMQueue(engine=async_engine)
61 |
62 | | 3. Initialize with a ``session_maker``:
63 |
64 | .. code-block:: python
65 |
66 | from pgmq_sqlalchemy import PGMQueue
67 | from sqlalchemy.orm import sessionmaker
68 | from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
69 |
70 | engine = create_engine('postgresql+psycopg://postgres:postgres@localhost:5432/postgres')
71 | session_maker = sessionmaker(bind=engine)
72 | pgmq_client = PGMQueue(session_maker=session_maker)
73 | # or async session_maker
74 | async_engine = create_async_engine('postgresql+asyncpg://postgres:postgres@localhost:5432/post
75 | async_session_maker = sessionmaker(bind=async_engine, class_=AsyncSession)
76 | async_pgmq_client = PGMQueue(session_maker=async_session_maker)
77 |
78 | .. note::
79 | | ``PGMQueue`` will **auto create** the ``pgmq`` extension ( and ``pg_partman`` extension if the method is related with **partitioned_queue** ) if it does not exist in the Postgres.
80 | | But you must make sure that the ``pgmq`` extension ( or ``pg_partman`` extension ) already **installed** in the Postgres.
81 | """
82 | if not dsn and not engine and not session_maker:
83 | raise ValueError("Must provide either dsn, engine, or session_maker")
84 | # initialize the engine and session_maker
85 | if session_maker:
86 | self.session_maker = session_maker
87 | self.is_async = is_async_session_maker(session_maker)
88 | elif engine:
89 | self.engine = engine
90 | self.is_async = self.engine.dialect.is_async
91 | self.session_maker = sessionmaker(
92 | bind=self.engine, class_=get_session_type(self.engine)
93 | )
94 | else:
95 | self.engine = (
96 | create_async_engine(dsn) if is_async_dsn(dsn) else create_engine(dsn)
97 | )
98 | self.is_async = self.engine.dialect.is_async
99 | self.session_maker = sessionmaker(
100 | bind=self.engine, class_=get_session_type(self.engine)
101 | )
102 |
103 | if self.is_async:
104 | self.loop = asyncio.new_event_loop()
105 |
106 | # create pgmq extension if not exists
107 | self._check_pgmq_ext()
108 |
109 | async def _check_pgmq_ext_async(self) -> None:
110 | """Check if the pgmq extension exists."""
111 | async with self.session_maker() as session:
112 | await session.execute(text("create extension if not exists pgmq cascade;"))
113 | await session.commit()
114 |
115 | def _check_pgmq_ext_sync(self) -> None:
116 | """Check if the pgmq extension exists."""
117 | with self.session_maker() as session:
118 | session.execute(text("create extension if not exists pgmq cascade;"))
119 | session.commit()
120 |
121 | def _check_pgmq_ext(self) -> None:
122 | """Check if the pgmq extension exists."""
123 | if self.is_async:
124 | return self.loop.run_until_complete(self._check_pgmq_ext_async())
125 | return self._check_pgmq_ext_sync()
126 |
127 | async def _check_pg_partman_ext_async(self) -> None:
128 | """Check if the pg_partman extension exists."""
129 | async with self.session_maker() as session:
130 | await session.execute(
131 | text("create extension if not exists pg_partman cascade;")
132 | )
133 | await session.commit()
134 |
135 | def _check_pg_partman_ext_sync(self) -> None:
136 | """Check if the pg_partman extension exists."""
137 | with self.session_maker() as session:
138 | session.execute(text("create extension if not exists pg_partman cascade;"))
139 | session.commit()
140 |
141 | def _check_pg_partman_ext(self) -> None:
142 | """Check if the pg_partman extension exists."""
143 | if self.is_pg_partman_ext_checked:
144 | return
145 | self.is_pg_partman_ext_checked
146 |
147 | if self.is_async:
148 | return self.loop.run_until_complete(self._check_pg_partman_ext_async())
149 | return self._check_pg_partman_ext_sync()
150 |
151 | def _create_queue_sync(self, queue_name: str, unlogged: bool = False) -> None:
152 | """ """
153 | with self.session_maker() as session:
154 | if unlogged:
155 | session.execute(
156 | text("select pgmq.create_unlogged(:queue);"), {"queue": queue_name}
157 | )
158 | else:
159 | session.execute(
160 | text("select pgmq.create(:queue);"), {"queue": queue_name}
161 | )
162 | session.commit()
163 |
164 | async def _create_queue_async(
165 | self, queue_name: str, unlogged: bool = False
166 | ) -> None:
167 | """Create a new queue."""
168 | async with self.session_maker() as session:
169 | if unlogged:
170 | await session.execute(
171 | text("select pgmq.create_unlogged(:queue);"), {"queue": queue_name}
172 | )
173 | else:
174 | await session.execute(
175 | text("select pgmq.create(:queue);"), {"queue": queue_name}
176 | )
177 | await session.commit()
178 |
179 | def create_queue(self, queue_name: str, unlogged: bool = False) -> None:
180 | """
181 | .. _unlogged_table: https://www.postgresql.org/docs/current/sql-createtable.html#SQL-CREATETABLE-UNLOGGED
182 | .. |unlogged_table| replace:: **UNLOGGED TABLE**
183 |
184 | **Create a new queue.**
185 |
186 | * if ``unlogged`` is ``True``, the queue will be created as an |unlogged_table|_ .
187 | * ``queue_name`` must be **less than 48 characters**.
188 |
189 | .. code-block:: python
190 |
191 | pgmq_client.create_queue('my_queue')
192 | # or unlogged table queue
193 | pgmq_client.create_queue('my_queue', unlogged=True)
194 |
195 | """
196 | if self.is_async:
197 | return self.loop.run_until_complete(
198 | self._create_queue_async(queue_name, unlogged)
199 | )
200 | return self._create_queue_sync(queue_name, unlogged)
201 |
202 | def _create_partitioned_queue_sync(
203 | self,
204 | queue_name: str,
205 | partition_interval: str,
206 | retention_interval: str,
207 | ) -> None:
208 | """Create a new partitioned queue."""
209 | with self.session_maker() as session:
210 | session.execute(
211 | text(
212 | "select pgmq.create_partitioned(:queue_name, :partition_interval, :retention_interval);"
213 | ),
214 | {
215 | "queue_name": queue_name,
216 | "partition_interval": partition_interval,
217 | "retention_interval": retention_interval,
218 | },
219 | )
220 | session.commit()
221 |
222 | async def _create_partitioned_queue_async(
223 | self,
224 | queue_name: str,
225 | partition_interval: str,
226 | retention_interval: str,
227 | ) -> None:
228 | """Create a new partitioned queue."""
229 | async with self.session_maker() as session:
230 | await session.execute(
231 | text(
232 | "select pgmq.create_partitioned(:queue_name, :partition_interval, :retention_interval);"
233 | ),
234 | {
235 | "queue_name": queue_name,
236 | "partition_interval": partition_interval,
237 | "retention_interval": retention_interval,
238 | },
239 | )
240 | await session.commit()
241 |
242 | def create_partitioned_queue(
243 | self,
244 | queue_name: str,
245 | partition_interval: int = 10000,
246 | retention_interval: int = 100000,
247 | ) -> None:
248 | """Create a new **partitioned** queue.
249 |
250 | .. _pgmq_partitioned_queue: https://github.com/tembo-io/pgmq?tab=readme-ov-file#partitioned-queues
251 | .. |pgmq_partitioned_queue| replace:: **PGMQ: Partitioned Queues**
252 |
253 | .. code-block:: python
254 |
255 | pgmq_client.create_partitioned_queue('my_partitioned_queue', partition_interval=10000, retention_interval=100000)
256 |
257 | Args:
258 | queue_name (str): The name of the queue, should be less than 48 characters.
259 | partition_interval (int): Will create a new partition every ``partition_interval`` messages.
260 | retention_interval (int): The interval for retaining partitions. Any messages that have a `msg_id` less than ``max(msg_id)`` - ``retention_interval`` will be dropped.
261 |
262 | .. note::
263 | | Currently, only support for partitioning by **msg_id**.
264 | | Will add **time-based partitioning** in the future ``pgmq-sqlalchemy`` release.
265 |
266 | .. important::
267 | | You must make sure that the ``pg_partman`` extension already **installed** in the Postgres.
268 | | ``pgmq-sqlalchemy`` will **auto create** the ``pg_partman`` extension if it does not exist in the Postgres.
269 | | For more details about ``pgmq`` with ``pg_partman``, checkout the |pgmq_partitioned_queue|_.
270 |
271 |
272 | """
273 | # check if the pg_partman extension exists before creating a partitioned queue at runtime
274 | self._check_pg_partman_ext()
275 |
276 | if self.is_async:
277 | return self.loop.run_until_complete(
278 | self._create_partitioned_queue_async(
279 | queue_name, str(partition_interval), str(retention_interval)
280 | )
281 | )
282 | return self._create_partitioned_queue_sync(
283 | queue_name, str(partition_interval), str(retention_interval)
284 | )
285 |
286 | def _validate_queue_name_sync(self, queue_name: str) -> None:
287 | """Validate the length of a queue name."""
288 | with self.session_maker() as session:
289 | session.execute(
290 | text("select pgmq.validate_queue_name(:queue);"), {"queue": queue_name}
291 | )
292 | session.commit()
293 |
294 | async def _validate_queue_name_async(self, queue_name: str) -> None:
295 | """Validate the length of a queue name."""
296 | async with self.session_maker() as session:
297 | await session.execute(
298 | text("select pgmq.validate_queue_name(:queue);"), {"queue": queue_name}
299 | )
300 | await session.commit()
301 |
302 | def validate_queue_name(self, queue_name: str) -> None:
303 | """
304 | * Will raise an error if the ``queue_name`` is more than 48 characters.
305 | """
306 | if self.is_async:
307 | return self.loop.run_until_complete(
308 | self._validate_queue_name_async(queue_name)
309 | )
310 | return self._validate_queue_name_sync(queue_name)
311 |
312 | def _drop_queue_sync(self, queue: str, partitioned: bool = False) -> bool:
313 | """Drop a queue."""
314 | with self.session_maker() as session:
315 | row = session.execute(
316 | text("select pgmq.drop_queue(:queue, :partitioned);"),
317 | {"queue": queue, "partitioned": partitioned},
318 | ).fetchone()
319 | session.commit()
320 | return row[0]
321 |
322 | async def _drop_queue_async(self, queue: str, partitioned: bool = False) -> bool:
323 | """Drop a queue."""
324 | async with self.session_maker() as session:
325 | row = (
326 | await session.execute(
327 | text("select pgmq.drop_queue(:queue, :partitioned);"),
328 | {"queue": queue, "partitioned": partitioned},
329 | )
330 | ).fetchone()
331 | await session.commit()
332 | return row[0]
333 |
334 | def drop_queue(self, queue: str, partitioned: bool = False) -> bool:
335 | """Drop a queue.
336 |
337 | .. _drop_queue_method: ref:`pgmq_sqlalchemy.PGMQueue.drop_queue`
338 | .. |drop_queue_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.drop_queue`
339 |
340 | .. code-block:: python
341 |
342 | pgmq_client.drop_queue('my_queue')
343 | # for partitioned queue
344 | pgmq_client.drop_queue('my_partitioned_queue', partitioned=True)
345 |
346 | .. warning::
347 | | All messages and queue itself will be deleted. (``pgmq.q_`` table)
348 | | **Archived tables** (``pgmq.a_`` table **will be dropped as well. )**
349 | |
350 | | See |archive_method|_ for more details.
351 | """
352 | # check if the pg_partman extension exists before dropping a partitioned queue at runtime
353 | if partitioned:
354 | self._check_pg_partman_ext()
355 |
356 | if self.is_async:
357 | return self.loop.run_until_complete(
358 | self._drop_queue_async(queue, partitioned)
359 | )
360 | return self._drop_queue_sync(queue, partitioned)
361 |
362 | def _list_queues_sync(self) -> List[str]:
363 | """List all queues."""
364 | with self.session_maker() as session:
365 | rows = session.execute(
366 | text("select queue_name from pgmq.list_queues();")
367 | ).fetchall()
368 | session.commit()
369 | return [row[0] for row in rows]
370 |
371 | async def _list_queues_async(self) -> List[str]:
372 | """List all queues."""
373 | async with self.session_maker() as session:
374 | rows = (
375 | await session.execute(
376 | text("select queue_name from pgmq.list_queues();")
377 | )
378 | ).fetchall()
379 | await session.commit()
380 | return [row[0] for row in rows]
381 |
382 | def list_queues(self) -> List[str]:
383 | """List all queues.
384 |
385 | .. code-block:: python
386 |
387 | queue_list = pgmq_client.list_queues()
388 | print(queue_list)
389 | """
390 | if self.is_async:
391 | return self.loop.run_until_complete(self._list_queues_async())
392 | return self._list_queues_sync()
393 |
394 | def _send_sync(self, queue_name: str, message: str, delay: int = 0) -> int:
395 | with self.session_maker() as session:
396 | row = (
397 | session.execute(
398 | text(f"select * from pgmq.send('{queue_name}',{message},{delay});")
399 | )
400 | ).fetchone()
401 | session.commit()
402 | return row[0]
403 |
404 | async def _send_async(self, queue_name: str, message: str, delay: int = 0) -> int:
405 | async with self.session_maker() as session:
406 | row = (
407 | await session.execute(
408 | text(f"select * from pgmq.send('{queue_name}',{message},{delay});")
409 | )
410 | ).fetchone()
411 | await session.commit()
412 | return row[0]
413 |
414 | def send(self, queue_name: str, message: dict, delay: int = 0) -> int:
415 | """Send a message to a queue.
416 |
417 | .. code-block:: python
418 |
419 | msg_id = pgmq_client.send('my_queue', {'key': 'value', 'key2': 'value2'})
420 | print(msg_id)
421 |
422 | Example with delay:
423 |
424 | .. code-block:: python
425 |
426 | msg_id = pgmq_client.send('my_queue', {'key': 'value', 'key2': 'value2'}, delay=10)
427 | msg = pgmq_client.read('my_queue')
428 | assert msg is None
429 | time.sleep(10)
430 | msg = pgmq_client.read('my_queue')
431 | assert msg is not None
432 | """
433 | if self.is_async:
434 | return self.loop.run_until_complete(
435 | self._send_async(queue_name, encode_dict_to_psql(message), delay)
436 | )
437 | return self._send_sync(queue_name, encode_dict_to_psql(message), delay)
438 |
439 | def _send_batch_sync(
440 | self, queue_name: str, messages: str, delay: int = 0
441 | ) -> List[int]:
442 | with self.session_maker() as session:
443 | rows = (
444 | session.execute(
445 | text(
446 | f"select * from pgmq.send_batch('{queue_name}',{messages},{delay});"
447 | )
448 | )
449 | ).fetchall()
450 | session.commit()
451 | return [row[0] for row in rows]
452 |
453 | async def _send_batch_async(
454 | self, queue_name: str, messages: str, delay: int = 0
455 | ) -> List[int]:
456 | async with self.session_maker() as session:
457 | rows = (
458 | await session.execute(
459 | text(
460 | f"select * from pgmq.send_batch('{queue_name}',{messages},{delay});"
461 | )
462 | )
463 | ).fetchall()
464 | await session.commit()
465 | return [row[0] for row in rows]
466 |
467 | def send_batch(
468 | self, queue_name: str, messages: List[dict], delay: int = 0
469 | ) -> List[int]:
470 | """
471 | Send a batch of messages to a queue.
472 |
473 | .. code-block:: python
474 |
475 | msgs = [{'key': 'value', 'key2': 'value2'}, {'key': 'value', 'key2': 'value2'}]
476 | msg_ids = pgmq_client.send_batch('my_queue', msgs)
477 | print(msg_ids)
478 | # send with delay
479 | msg_ids = pgmq_client.send_batch('my_queue', msgs, delay=10)
480 |
481 | """
482 | if self.is_async:
483 | return self.loop.run_until_complete(
484 | self._send_batch_async(queue_name, encode_list_to_psql(messages), delay)
485 | )
486 | return self._send_batch_sync(queue_name, encode_list_to_psql(messages), delay)
487 |
488 | def _read_sync(self, queue_name: str, vt: int) -> Optional[Message]:
489 | with self.session_maker() as session:
490 | row = session.execute(
491 | text("select * from pgmq.read(:queue_name,:vt,1);"),
492 | {"queue_name": queue_name, "vt": vt},
493 | ).fetchone()
494 | session.commit()
495 | if row is None:
496 | return None
497 | return Message(
498 | msg_id=row[0], read_ct=row[1], enqueued_at=row[2], vt=row[3], message=row[4]
499 | )
500 |
501 | async def _read_async(self, queue_name: str, vt: int) -> Optional[Message]:
502 | async with self.session_maker() as session:
503 | row = (
504 | await session.execute(
505 | text("select * from pgmq.read(:queue_name,:vt,1);"),
506 | {"queue_name": queue_name, "vt": vt},
507 | )
508 | ).fetchone()
509 | await session.commit()
510 | if row is None:
511 | return None
512 | return Message(
513 | msg_id=row[0], read_ct=row[1], enqueued_at=row[2], vt=row[3], message=row[4]
514 | )
515 |
516 | def read(self, queue_name: str, vt: Optional[int] = None) -> Optional[Message]:
517 | """
518 | .. _for_update_skip_locked: https://www.postgresql.org/docs/current/sql-select.html#SQL-FOR-UPDATE-SHARE
519 | .. |for_update_skip_locked| replace:: **FOR UPDATE SKIP LOCKED**
520 |
521 | .. _read_method: ref:`pgmq_sqlalchemy.PGMQueue.read`
522 | .. |read_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.read`
523 |
524 | Read a message from the queue.
525 |
526 | Returns:
527 | |schema_message_class|_ or ``None`` if the queue is empty.
528 |
529 | .. note::
530 | | ``PGMQ`` use |for_update_skip_locked|_ lock to make sure **a message is only read by one consumer**.
531 | | See the `pgmq.read `_ function for more details.
532 | |
533 | | For **consumer retries mechanism** (e.g. mark a message as failed after a certain number of retries) can be implemented by using the ``read_ct`` field in the |schema_message_class|_ object.
534 |
535 |
536 | .. important::
537 | | ``vt`` is the **visibility timeout** in seconds.
538 | | When a message is read from the queue, it will be invisible to other consumers for the duration of the ``vt``.
539 |
540 | Usage:
541 |
542 | .. code-block:: python
543 |
544 | from pgmq_sqlalchemy.schema import Message
545 |
546 | msg:Message = pgmq_client.read('my_queue')
547 | print(msg.msg_id)
548 | print(msg.message)
549 | print(msg.read_ct) # read count, how many times the message has been read
550 |
551 | Example with ``vt``:
552 |
553 | .. code-block:: python
554 |
555 | # assert `read_vt_demo` is empty
556 | pgmq_client.send('read_vt_demo', {'key': 'value', 'key2': 'value2'})
557 | msg = pgmq_client.read('read_vt_demo', vt=10)
558 | assert msg is not None
559 |
560 | # try to read immediately
561 | msg = pgmq_client.read('read_vt_demo')
562 | assert msg is None # will return None because the message is still invisible
563 |
564 | # try to read after 5 seconds
565 | time.sleep(5)
566 | msg = pgmq_client.read('read_vt_demo')
567 | assert msg is None # still invisible after 5 seconds
568 |
569 | # try to read after 11 seconds
570 | time.sleep(6)
571 | msg = pgmq_client.read('read_vt_demo')
572 | assert msg is not None # the message is visible after 10 seconds
573 |
574 |
575 | """
576 | if self.is_async:
577 | return self.loop.run_until_complete(self._read_async(queue_name, vt))
578 | return self._read_sync(queue_name, vt)
579 |
580 | def _read_batch_sync(
581 | self,
582 | queue_name: str,
583 | vt: int,
584 | batch_size: int = 1,
585 | ) -> Optional[List[Message]]:
586 | if vt is None:
587 | vt = self.vt
588 | with self.session_maker() as session:
589 | rows = session.execute(
590 | text("select * from pgmq.read(:queue_name,:vt,:batch_size);"),
591 | {
592 | "queue_name": queue_name,
593 | "vt": vt,
594 | "batch_size": batch_size,
595 | },
596 | ).fetchall()
597 | session.commit()
598 | if not rows:
599 | return None
600 | return [
601 | Message(
602 | msg_id=row[0],
603 | read_ct=row[1],
604 | enqueued_at=row[2],
605 | vt=row[3],
606 | message=row[4],
607 | )
608 | for row in rows
609 | ]
610 |
611 | async def _read_batch_async(
612 | self,
613 | queue_name: str,
614 | vt: int,
615 | batch_size: int = 1,
616 | ) -> Optional[List[Message]]:
617 | async with self.session_maker() as session:
618 | rows = (
619 | await session.execute(
620 | text("select * from pgmq.read(:queue_name,:vt,:batch_size);"),
621 | {
622 | "queue_name": queue_name,
623 | "vt": vt,
624 | "batch_size": batch_size,
625 | },
626 | )
627 | ).fetchall()
628 | await session.commit()
629 | if not rows:
630 | return None
631 | return [
632 | Message(
633 | msg_id=row[0],
634 | read_ct=row[1],
635 | enqueued_at=row[2],
636 | vt=row[3],
637 | message=row[4],
638 | )
639 | for row in rows
640 | ]
641 |
642 | def read_batch(
643 | self,
644 | queue_name: str,
645 | batch_size: int = 1,
646 | vt: Optional[int] = None,
647 | ) -> Optional[List[Message]]:
648 | """
649 | | Read a batch of messages from the queue.
650 | | Usage:
651 |
652 | Returns:
653 | List of |schema_message_class|_ or ``None`` if the queue is empty.
654 |
655 | .. code-block:: python
656 |
657 | from pgmq_sqlalchemy.schema import Message
658 |
659 | msgs:List[Message] = pgmq_client.read_batch('my_queue', batch_size=10)
660 | # with vt
661 | msgs:List[Message] = pgmq_client.read_batch('my_queue', batch_size=10, vt=10)
662 |
663 | """
664 | if vt is None:
665 | vt = self.vt
666 | if self.is_async:
667 | return self.loop.run_until_complete(
668 | self._read_batch_async(queue_name, batch_size, vt)
669 | )
670 | return self._read_batch_sync(queue_name, batch_size, vt)
671 |
672 | def _read_with_poll_sync(
673 | self,
674 | queue_name: str,
675 | vt: int,
676 | qty: int = 1,
677 | max_poll_seconds: int = 5,
678 | poll_interval_ms: int = 100,
679 | ) -> Optional[List[Message]]:
680 | """Read messages from a queue with polling."""
681 | with self.session_maker() as session:
682 | rows = session.execute(
683 | text(
684 | "select * from pgmq.read_with_poll(:queue_name,:vt,:qty,:max_poll_seconds,:poll_interval_ms);"
685 | ),
686 | {
687 | "queue_name": queue_name,
688 | "vt": vt,
689 | "qty": qty,
690 | "max_poll_seconds": max_poll_seconds,
691 | "poll_interval_ms": poll_interval_ms,
692 | },
693 | ).fetchall()
694 | session.commit()
695 | if not rows:
696 | return None
697 | return [
698 | Message(
699 | msg_id=row[0],
700 | read_ct=row[1],
701 | enqueued_at=row[2],
702 | vt=row[3],
703 | message=row[4],
704 | )
705 | for row in rows
706 | ]
707 |
708 | async def _read_with_poll_async(
709 | self,
710 | queue_name: str,
711 | vt: int,
712 | qty: int = 1,
713 | max_poll_seconds: int = 5,
714 | poll_interval_ms: int = 100,
715 | ) -> Optional[List[Message]]:
716 | """Read messages from a queue with polling."""
717 | async with self.session_maker() as session:
718 | rows = (
719 | await session.execute(
720 | text(
721 | "select * from pgmq.read_with_poll(:queue_name,:vt,:qty,:max_poll_seconds,:poll_interval_ms);"
722 | ),
723 | {
724 | "queue_name": queue_name,
725 | "vt": vt,
726 | "qty": qty,
727 | "max_poll_seconds": max_poll_seconds,
728 | "poll_interval_ms": poll_interval_ms,
729 | },
730 | )
731 | ).fetchall()
732 | await session.commit()
733 | if not rows:
734 | return None
735 | return [
736 | Message(
737 | msg_id=row[0],
738 | read_ct=row[1],
739 | enqueued_at=row[2],
740 | vt=row[3],
741 | message=row[4],
742 | )
743 | for row in rows
744 | ]
745 |
746 | def read_with_poll(
747 | self,
748 | queue_name: str,
749 | vt: Optional[int] = None,
750 | qty: int = 1,
751 | max_poll_seconds: int = 5,
752 | poll_interval_ms: int = 100,
753 | ) -> Optional[List[Message]]:
754 | """
755 |
756 | .. _read_with_poll_method: ref:`pgmq_sqlalchemy.PGMQueue.read_with_poll`
757 | .. |read_with_poll_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.read_with_poll`
758 |
759 |
760 | | Read messages from a queue with long-polling.
761 | |
762 | | When the queue is empty, the function block at most ``max_poll_seconds`` seconds.
763 | | During the polling, the function will check the queue every ``poll_interval_ms`` milliseconds, until the queue has ``qty`` messages.
764 |
765 | Args:
766 | queue_name (str): The name of the queue.
767 | vt (Optional[int]): The visibility timeout in seconds.
768 | qty (int): The number of messages to read.
769 | max_poll_seconds (int): The maximum number of seconds to poll.
770 | poll_interval_ms (int): The interval in milliseconds to poll.
771 |
772 | Returns:
773 | List of |schema_message_class|_ or ``None`` if the queue is empty.
774 |
775 | Usage:
776 |
777 | .. code-block:: python
778 |
779 | msg_id = pgmq_client.send('my_queue', {'key': 'value'}, delay=6)
780 |
781 | # the following code will block for 5 seconds
782 | msgs = pgmq_client.read_with_poll('my_queue', qty=1, max_poll_seconds=5, poll_interval_ms=100)
783 | assert msgs is None
784 |
785 | # try read_with_poll again
786 | # the following code will only block for 1 second
787 | msgs = pgmq_client.read_with_poll('my_queue', qty=1, max_poll_seconds=5, poll_interval_ms=100)
788 | assert msgs is not None
789 |
790 | Another example:
791 |
792 | .. code-block:: python
793 |
794 | msg = {'key': 'value'}
795 | msg_ids = pgmq_client.send_batch('my_queue', [msg, msg, msg, msg], delay=3)
796 |
797 | # the following code will block for 3 seconds
798 | msgs = pgmq_client.read_with_poll('my_queue', qty=3, max_poll_seconds=5, poll_interval_ms=100)
799 | assert len(msgs) == 3 # will read at most 3 messages (qty=3)
800 |
801 | """
802 | if vt is None:
803 | vt = self.vt
804 |
805 | if self.is_async:
806 | return self.loop.run_until_complete(
807 | self._read_with_poll_async(
808 | queue_name, vt, qty, max_poll_seconds, poll_interval_ms
809 | )
810 | )
811 | return self._read_with_poll_sync(
812 | queue_name, vt, qty, max_poll_seconds, poll_interval_ms
813 | )
814 |
815 | def _set_vt_sync(
816 | self, queue_name: str, msg_id: int, vt_offset: int
817 | ) -> Optional[Message]:
818 | """Set the visibility timeout for a message."""
819 | with self.session_maker() as session:
820 | row = session.execute(
821 | text("select * from pgmq.set_vt(:queue_name,:msg_id,:vt_offset);"),
822 | {"queue_name": queue_name, "msg_id": msg_id, "vt_offset": vt_offset},
823 | ).fetchone()
824 | session.commit()
825 | if row is None:
826 | return None
827 | return Message(
828 | msg_id=row[0], read_ct=row[1], enqueued_at=row[2], vt=row[3], message=row[4]
829 | )
830 |
831 | async def _set_vt_async(
832 | self, queue_name: str, msg_id: int, vt_offset: int
833 | ) -> Optional[Message]:
834 | """Set the visibility timeout for a message."""
835 | async with self.session_maker() as session:
836 | row = (
837 | await session.execute(
838 | text("select * from pgmq.set_vt(:queue_name,:msg_id,:vt_offset);"),
839 | {
840 | "queue_name": queue_name,
841 | "msg_id": msg_id,
842 | "vt_offset": vt_offset,
843 | },
844 | )
845 | ).fetchone()
846 | await session.commit()
847 | print("row", row)
848 | if row is None:
849 | return None
850 | return Message(
851 | msg_id=row[0], read_ct=row[1], enqueued_at=row[2], vt=row[3], message=row[4]
852 | )
853 |
854 | def set_vt(self, queue_name: str, msg_id: int, vt_offset: int) -> Optional[Message]:
855 | """
856 | .. _set_vt_method: ref:`pgmq_sqlalchemy.PGMQueue.set_vt`
857 | .. |set_vt_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.set_vt`
858 |
859 | Set the visibility timeout for a message.
860 |
861 | Args:
862 | queue_name (str): The name of the queue.
863 | msg_id (int): The message id.
864 | vt_offset (int): The visibility timeout in seconds.
865 |
866 | Returns:
867 | |schema_message_class|_ or ``None`` if the message does not exist.
868 |
869 | Usage:
870 |
871 | .. code-block:: python
872 |
873 | msg_id = pgmq_client.send('my_queue', {'key': 'value'}, delay=10)
874 | msg = pgmq_client.read('my_queue')
875 | assert msg is not None
876 | msg = pgmq_client.set_vt('my_queue', msg.msg_id, 10)
877 | assert msg is not None
878 |
879 | .. tip::
880 | | |read_method|_ and |set_vt_method|_ can be used together to implement **exponential backoff** mechanism.
881 | | `ref: Exponential Backoff And Jitter `_.
882 | | **For example:**
883 |
884 | .. code-block:: python
885 |
886 | from pgmq_sqlalchemy import PGMQueue
887 | from pgmq_sqlalchemy.schema import Message
888 |
889 | def _exp_backoff_retry(msg: Message)->int:
890 | # exponential backoff retry
891 | if msg.read_ct < 5:
892 | return 2 ** msg.read_ct
893 | return 2 ** 5
894 |
895 | def consumer_with_backoff_retry(pgmq_client: PGMQueue, queue_name: str):
896 | msg = pgmq_client.read(
897 | queue_name=queue_name,
898 | vt=1000, # set vt to 1000 seconds temporarily
899 | )
900 | if msg is None:
901 | return
902 |
903 | # set exponential backoff retry
904 | pgmq_client.set_vt(
905 | queue_name=query_name,
906 | msg_id=msg.msg_id,
907 | vt_offset=_exp_backoff_retry(msg)
908 | )
909 |
910 | """
911 | if self.is_async:
912 | return self.loop.run_until_complete(
913 | self._set_vt_async(queue_name, msg_id, vt_offset)
914 | )
915 | return self._set_vt_sync(queue_name, msg_id, vt_offset)
916 |
917 | def _pop_sync(self, queue_name: str) -> Optional[Message]:
918 | with self.session_maker() as session:
919 | row = session.execute(
920 | text("select * from pgmq.pop(:queue_name);"),
921 | {"queue_name": queue_name},
922 | ).fetchone()
923 | session.commit()
924 | if row is None:
925 | return None
926 | return Message(
927 | msg_id=row[0], read_ct=row[1], enqueued_at=row[2], vt=row[3], message=row[4]
928 | )
929 |
930 | async def _pop_async(self, queue_name: str) -> Optional[Message]:
931 | async with self.session_maker() as session:
932 | row = (
933 | await session.execute(
934 | text("select * from pgmq.pop(:queue_name);"),
935 | {"queue_name": queue_name},
936 | )
937 | ).fetchone()
938 | await session.commit()
939 | if row is None:
940 | return None
941 | return Message(
942 | msg_id=row[0], read_ct=row[1], enqueued_at=row[2], vt=row[3], message=row[4]
943 | )
944 |
945 | def pop(self, queue_name: str) -> Optional[Message]:
946 | """
947 | Reads a single message from a queue and deletes it upon read.
948 |
949 | .. code-block:: python
950 |
951 | msg = pgmq_client.pop('my_queue')
952 | print(msg.msg_id)
953 | print(msg.message)
954 |
955 | """
956 | if self.is_async:
957 | return self.loop.run_until_complete(self._pop_async(queue_name))
958 | return self._pop_sync(queue_name)
959 |
960 | def _delete_sync(
961 | self,
962 | queue_name: str,
963 | msg_id: int,
964 | ) -> bool:
965 | with self.session_maker() as session:
966 | # should add explicit type casts to choose the correct candidate function
967 | row = session.execute(
968 | text(f"select * from pgmq.delete('{queue_name}',{msg_id}::BIGINT);")
969 | ).fetchone()
970 | session.commit()
971 | return row[0]
972 |
973 | async def _delete_async(
974 | self,
975 | queue_name: str,
976 | msg_id: int,
977 | ) -> bool:
978 | async with self.session_maker() as session:
979 | # should add explicit type casts to choose the correct candidate function
980 | row = (
981 | await session.execute(
982 | text(f"select * from pgmq.delete('{queue_name}',{msg_id}::BIGINT);")
983 | )
984 | ).fetchone()
985 | await session.commit()
986 | return row[0]
987 |
988 | def delete(self, queue_name: str, msg_id: int) -> bool:
989 | """
990 | Delete a message from the queue.
991 |
992 | .. _delete_method: ref:`pgmq_sqlalchemy.PGMQueue.delete`
993 | .. |delete_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.delete`
994 |
995 | * Raises an error if the ``queue_name`` does not exist.
996 | * Returns ``True`` if the message is deleted successfully.
997 | * If the message does not exist, returns ``False``.
998 |
999 | .. code-block:: python
1000 |
1001 | msg_id = pgmq_client.send('my_queue', {'key': 'value'})
1002 | assert pgmq_client.delete('my_queue', msg_id)
1003 | assert not pgmq_client.delete('my_queue', msg_id)
1004 |
1005 | """
1006 | if self.is_async:
1007 | return self.loop.run_until_complete(self._delete_async(queue_name, msg_id))
1008 | return self._delete_sync(queue_name, msg_id)
1009 |
1010 | def _delete_batch_sync(
1011 | self,
1012 | queue_name: str,
1013 | msg_ids: List[int],
1014 | ) -> List[int]:
1015 | # should add explicit type casts to choose the correct candidate function
1016 | with self.session_maker() as session:
1017 | rows = session.execute(
1018 | text(f"select * from pgmq.delete('{queue_name}',ARRAY{msg_ids});")
1019 | ).fetchall()
1020 | session.commit()
1021 | return [row[0] for row in rows]
1022 |
1023 | async def _delete_batch_async(
1024 | self,
1025 | queue_name: str,
1026 | msg_ids: List[int],
1027 | ) -> List[int]:
1028 | # should add explicit type casts to choose the correct candidate function
1029 | async with self.session_maker() as session:
1030 | rows = (
1031 | await session.execute(
1032 | text(f"select * from pgmq.delete('{queue_name}',ARRAY{msg_ids});")
1033 | )
1034 | ).fetchall()
1035 | await session.commit()
1036 | return [row[0] for row in rows]
1037 |
1038 | def delete_batch(self, queue_name: str, msg_ids: List[int]) -> List[int]:
1039 | """
1040 | Delete a batch of messages from the queue.
1041 |
1042 | .. _delete_batch_method: ref:`pgmq_sqlalchemy.PGMQueue.delete_batch`
1043 | .. |delete_batch_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.delete_batch`
1044 |
1045 | .. note::
1046 | | Instead of return `bool` like |delete_method|_,
1047 | | |delete_batch_method|_ will return a list of ``msg_id`` that are successfully deleted.
1048 |
1049 | .. code-block:: python
1050 |
1051 | msg_ids = pgmq_client.send_batch('my_queue', [{'key': 'value'}, {'key': 'value'}])
1052 | assert pgmq_client.delete_batch('my_queue', msg_ids) == msg_ids
1053 |
1054 | """
1055 | if self.is_async:
1056 | return self.loop.run_until_complete(
1057 | self._delete_batch_async(queue_name, msg_ids)
1058 | )
1059 | return self._delete_batch_sync(queue_name, msg_ids)
1060 |
1061 | def _archive_sync(self, queue_name: str, msg_id: int) -> bool:
1062 | """Archive a message from a queue synchronously."""
1063 | with self.session_maker() as session:
1064 | row = session.execute(
1065 | text(f"select pgmq.archive('{queue_name}',{msg_id}::BIGINT);")
1066 | ).fetchone()
1067 | session.commit()
1068 | return row[0]
1069 |
1070 | async def _archive_async(self, queue_name: str, msg_id: int) -> bool:
1071 | """Archive a message from a queue asynchronously."""
1072 | async with self.session_maker() as session:
1073 | row = (
1074 | await session.execute(
1075 | text(f"select pgmq.archive('{queue_name}',{msg_id}::BIGINT);")
1076 | )
1077 | ).fetchone()
1078 | await session.commit()
1079 | return row[0]
1080 |
1081 | def archive(self, queue_name: str, msg_id: int) -> bool:
1082 | """
1083 | Archive a message from a queue.
1084 |
1085 | .. _archive_method: ref:`pgmq_sqlalchemy.PGMQueue.archive`
1086 | .. |archive_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.archive`
1087 |
1088 |
1089 | * Message will be deleted from the queue and moved to the archive table.
1090 | * Will be deleted from ``pgmq.q_`` and be inserted into the ``pgmq.a_`` table.
1091 | * raises an error if the ``queue_name`` does not exist.
1092 | * returns ``True`` if the message is archived successfully.
1093 |
1094 | .. code-block:: python
1095 |
1096 | msg_id = pgmq_client.send('my_queue', {'key': 'value'})
1097 | assert pgmq_client.archive('my_queue', msg_id)
1098 | # since the message is archived, queue will be empty
1099 | assert pgmq_client.read('my_queue') is None
1100 |
1101 | """
1102 | if self.is_async:
1103 | return self.loop.run_until_complete(self._archive_async(queue_name, msg_id))
1104 | return self._archive_sync(queue_name, msg_id)
1105 |
1106 | def _archive_batch_sync(self, queue_name: str, msg_ids: List[int]) -> List[int]:
1107 | """Archive multiple messages from a queue synchronously."""
1108 | with self.session_maker() as session:
1109 | rows = session.execute(
1110 | text(f"select * from pgmq.archive('{queue_name}',ARRAY{msg_ids});")
1111 | ).fetchall()
1112 | session.commit()
1113 | return [row[0] for row in rows]
1114 |
1115 | async def _archive_batch_async(
1116 | self, queue_name: str, msg_ids: List[int]
1117 | ) -> List[int]:
1118 | """Archive multiple messages from a queue asynchronously."""
1119 | async with self.session_maker() as session:
1120 | rows = (
1121 | await session.execute(
1122 | text(f"select * from pgmq.archive('{queue_name}',ARRAY{msg_ids});")
1123 | )
1124 | ).fetchall()
1125 | await session.commit()
1126 | return [row[0] for row in rows]
1127 |
1128 | def archive_batch(self, queue_name: str, msg_ids: List[int]) -> List[int]:
1129 | """
1130 | Archive multiple messages from a queue.
1131 |
1132 | * Messages will be deleted from the queue and moved to the archive table.
1133 | * Returns a list of ``msg_id`` that are successfully archived.
1134 |
1135 | .. code-block:: python
1136 |
1137 | msg_ids = pgmq_client.send_batch('my_queue', [{'key': 'value'}, {'key': 'value'}])
1138 | assert pgmq_client.archive_batch('my_queue', msg_ids) == msg_ids
1139 | assert pgmq_client.read('my_queue') is None
1140 |
1141 | """
1142 | if self.is_async:
1143 | return self.loop.run_until_complete(
1144 | self._archive_batch_async(queue_name, msg_ids)
1145 | )
1146 | return self._archive_batch_sync(queue_name, msg_ids)
1147 |
1148 | def _purge_sync(self, queue_name: str) -> int:
1149 | """Purge a queue synchronously,return deleted_count."""
1150 | with self.session_maker() as session:
1151 | row = session.execute(
1152 | text("select pgmq.purge_queue(:queue_name);"),
1153 | {"queue_name": queue_name},
1154 | ).fetchone()
1155 | session.commit()
1156 | return row[0]
1157 |
1158 | async def _purge_async(self, queue_name: str) -> int:
1159 | """Purge a queue asynchronously,return deleted_count."""
1160 | async with self.session_maker() as session:
1161 | row = (
1162 | await session.execute(
1163 | text("select pgmq.purge_queue(:queue_name);"),
1164 | {"queue_name": queue_name},
1165 | )
1166 | ).fetchone()
1167 | await session.commit()
1168 | return row[0]
1169 |
1170 | def purge(self, queue_name: str) -> int:
1171 | """
1172 | * Delete all messages from a queue, return the number of messages deleted.
1173 | * Archive tables will **not** be affected.
1174 |
1175 | .. code-block:: python
1176 |
1177 | msg_ids = pgmq_client.send_batch('my_queue', [{'key': 'value'}, {'key': 'value'}])
1178 | assert pgmq_client.purge('my_queue') == 2
1179 | assert pgmq_client.read('my_queue') is None
1180 |
1181 | """
1182 | if self.is_async:
1183 | return self.loop.run_until_complete(self._purge_async(queue_name))
1184 | return self._purge_sync(queue_name)
1185 |
1186 | def _metrics_sync(self, queue_name: str) -> Optional[QueueMetrics]:
1187 | """Get queue metrics synchronously."""
1188 | with self.session_maker() as session:
1189 | row = session.execute(
1190 | text("select * from pgmq.metrics(:queue_name);"),
1191 | {"queue_name": queue_name},
1192 | ).fetchone()
1193 | session.commit()
1194 | if row is None:
1195 | return None
1196 | return QueueMetrics(
1197 | queue_name=row[0],
1198 | queue_length=row[1],
1199 | newest_msg_age_sec=row[2],
1200 | oldest_msg_age_sec=row[3],
1201 | total_messages=row[4],
1202 | )
1203 |
1204 | async def _metrics_async(self, queue_name: str) -> Optional[QueueMetrics]:
1205 | """Get queue metrics asynchronously."""
1206 | async with self.session_maker() as session:
1207 | row = (
1208 | await session.execute(
1209 | text("select * from pgmq.metrics(:queue_name);"),
1210 | {"queue_name": queue_name},
1211 | )
1212 | ).fetchone()
1213 | if row is None:
1214 | return None
1215 | return QueueMetrics(
1216 | queue_name=row[0],
1217 | queue_length=row[1],
1218 | newest_msg_age_sec=row[2],
1219 | oldest_msg_age_sec=row[3],
1220 | total_messages=row[4],
1221 | )
1222 |
1223 | def metrics(self, queue_name: str) -> Optional[QueueMetrics]:
1224 | """
1225 | Get metrics for a queue.
1226 |
1227 | Returns:
1228 | |schema_queue_metrics_class|_ or ``None`` if the queue does not exist.
1229 |
1230 | Usage:
1231 |
1232 | .. code-block:: python
1233 |
1234 | from pgmq_sqlalchemy.schema import QueueMetrics
1235 |
1236 | metrics:QueueMetrics = pgmq_client.metrics('my_queue')
1237 | print(metrics.queue_name)
1238 | print(metrics.queue_length)
1239 | print(metrics.queue_length)
1240 |
1241 | """
1242 | if self.is_async:
1243 | return self.loop.run_until_complete(self._metrics_async(queue_name))
1244 | return self._metrics_sync(queue_name)
1245 |
1246 | def _metrics_all_sync(self) -> Optional[List[QueueMetrics]]:
1247 | """Get metrics for all queues synchronously."""
1248 | with self.session_maker() as session:
1249 | rows = session.execute(text("select * from pgmq.metrics_all();")).fetchall()
1250 | if not rows:
1251 | return None
1252 | return [
1253 | QueueMetrics(
1254 | queue_name=row[0],
1255 | queue_length=row[1],
1256 | newest_msg_age_sec=row[2],
1257 | oldest_msg_age_sec=row[3],
1258 | total_messages=row[4],
1259 | )
1260 | for row in rows
1261 | ]
1262 |
1263 | async def _metrics_all_async(self) -> Optional[List[QueueMetrics]]:
1264 | """Get metrics for all queues asynchronously."""
1265 | async with self.session_maker() as session:
1266 | rows = (
1267 | await session.execute(text("select * from pgmq.metrics_all();"))
1268 | ).fetchall()
1269 | if not rows:
1270 | return None
1271 | return [
1272 | QueueMetrics(
1273 | queue_name=row[0],
1274 | queue_length=row[1],
1275 | newest_msg_age_sec=row[2],
1276 | oldest_msg_age_sec=row[3],
1277 | total_messages=row[4],
1278 | )
1279 | for row in rows
1280 | ]
1281 |
1282 | def metrics_all(self) -> Optional[List[QueueMetrics]]:
1283 | """
1284 |
1285 | .. _read_committed_isolation_level: https://www.postgresql.org/docs/current/transaction-iso.html#XACT-READ-COMMITTED
1286 | .. |read_committed_isolation_level| replace:: **READ COMMITTED**
1287 |
1288 | .. _metrics_all_method: ref:`pgmq_sqlalchemy.PGMQueue.metrics_all`
1289 | .. |metrics_all_method| replace:: :py:meth:`~pgmq_sqlalchemy.PGMQueue.metrics_all`
1290 |
1291 | Get metrics for all queues.
1292 |
1293 | Returns:
1294 | List of |schema_queue_metrics_class|_ or ``None`` if there are no queues.
1295 |
1296 | Usage:
1297 |
1298 | .. code-block:: python
1299 |
1300 | from pgmq_sqlalchemy.schema import QueueMetrics
1301 |
1302 | metrics:List[QueueMetrics] = pgmq_client.metrics_all()
1303 | for m in metrics:
1304 | print(m.queue_name)
1305 | print(m.queue_length)
1306 | print(m.queue_length)
1307 |
1308 | .. warning::
1309 | | You should use a **distributed lock** to avoid **race conditions** when calling |metrics_all_method|_ in **concurrent** |drop_queue_method|_ **scenarios**.
1310 | |
1311 | | Since the default PostgreSQL isolation level is |read_committed_isolation_level|_, the queue metrics to be fetched **may not exist** if there are **concurrent** |drop_queue_method|_ **operations**.
1312 | | Check the `pgmq.metrics_all `_ function for more details.
1313 |
1314 |
1315 | """
1316 | if self.is_async:
1317 | return self.loop.run_until_complete(self._metrics_all_async())
1318 | return self._metrics_all_sync()
1319 |
--------------------------------------------------------------------------------