├── docs
├── _static
│ └── .gitkeep
├── _templates
│ └── .gitkeep
├── apidocs
│ └── .gitignore
├── AUTHORS.rst
├── CHANGELOG.rst
├── Makefile
├── make.bat
├── README.rst
├── index.rst
└── conf.py
├── src
└── sqlalchemy_dlock
│ ├── py.typed
│ ├── statement
│ ├── __init__.py
│ ├── mysql.py
│ └── postgresql.py
│ ├── .gitignore
│ ├── lock
│ ├── __init__.py
│ ├── mysql.py
│ ├── base.py
│ └── postgresql.py
│ ├── exceptions.py
│ ├── __init__.py
│ ├── typing.py
│ ├── registry.py
│ └── factory.py
├── tests
├── .dockerignore
├── __init__.py
├── Dockerfile
├── engines.py
├── asyncio
│ ├── __init__.py
│ ├── engines.py
│ ├── test_session.py
│ ├── test_concurrency.py
│ ├── test_pg.py
│ ├── test_key_convert.py
│ └── test_basic.py
├── docker-compose.yml
├── test_session.py
├── test_scoped_session.py
├── test_pg.py
├── test_key_convert.py
├── test_multithread.py
├── test_multiprocess.py
└── test_basic.py
├── .mypy.ini
├── .idea
├── .gitignore
├── vcs.xml
├── inspectionProfiles
│ ├── profiles_settings.xml
│ └── Project_Default.xml
├── modules.xml
├── misc.xml
└── sqlalchemy-dlock.iml
├── .markdownlint.json
├── .gitattributes
├── codecov.yml
├── .coveragerc
├── AUTHORS.md
├── MANIFEST.in
├── .dockerignore
├── scripts
├── wait-for-postgres.sh
├── wait-for-mysql.sh
└── run-test.sh
├── .editorconfig
├── .ruff.toml
├── db.docker-compose.yml
├── .readthedocs.yaml
├── .pre-commit-config.yaml
├── .vscode
└── tasks.json
├── LICENSE
├── pyproject.toml
├── .github
└── workflows
│ └── python-package.yml
├── CHANGELOG.md
├── README.md
└── .gitignore
/docs/_static/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/_templates/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/py.typed:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/.dockerignore:
--------------------------------------------------------------------------------
1 | *
2 | **/*
3 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/statement/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/.mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | files = src/**/*.py
3 |
--------------------------------------------------------------------------------
/docs/apidocs/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore
3 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/.gitignore:
--------------------------------------------------------------------------------
1 | _version.py
2 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # 默认忽略的文件
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/docs/AUTHORS.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../AUTHORS.md
2 | :parser: myst_parser.sphinx_
3 |
--------------------------------------------------------------------------------
/docs/CHANGELOG.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../CHANGELOG.md
2 | :parser: myst_parser.sphinx_
3 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/lock/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import BaseAsyncSadLock, BaseSadLock
2 |
--------------------------------------------------------------------------------
/.markdownlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "code-block-style": false,
3 | "line-length": false
4 | }
5 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | * text=auto eol=lf
2 | *.{cmd,[cC][mM][dD]} text eol=crlf
3 | *.{bat,[bB][aA][tT]} text eol=crlf
4 | *.{ps1,[pP][sS]1} text eol=crlf
5 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | status:
3 | project:
4 | default:
5 | target: auto # auto compares coverage to the previous base commit
6 |
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | omit =
3 | test/*
4 | tests/*
5 | .venv/*
6 | env/*
7 | venv/*
8 | ENV/*
9 | env.bak/*
10 | venv.bak/*
11 | src/**/_version.py
12 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/AUTHORS.md:
--------------------------------------------------------------------------------
1 | # AUTHORS
2 |
3 | * Liu Xue Yan ()
4 |
5 | [](mailto:liu_xue_yan@foxmail.com)
6 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/statement/mysql.py:
--------------------------------------------------------------------------------
1 | from typing import Final
2 |
3 | from sqlalchemy import text
4 |
5 | LOCK: Final = text("SELECT GET_LOCK(:str, :timeout)")
6 | UNLOCK: Final = text("SELECT RELEASE_LOCK(:str)")
7 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 |
4 |
5 | logging.basicConfig(
6 | level=logging.INFO,
7 | stream=sys.stderr,
8 | format="%(asctime)s [%(levelname).1s] %(name)s %(message)s",
9 | )
10 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | exclude .*
2 | exclude *.docker-compose.yml
3 | exclude *.requirements.txt
4 | exclude codecov.yml
5 | exclude coverage.*
6 |
7 | prune .*
8 | prune docs
9 | prune scripts
10 | prune tests
11 | prune htmlcov
12 |
--------------------------------------------------------------------------------
/tests/Dockerfile:
--------------------------------------------------------------------------------
1 | # build a base python image for multiple-version tests
2 |
3 | FROM quay.io/pypa/manylinux_2_28_x86_64
4 | RUN --mount=type=cache,target=/var/cache/dnf \
5 | dnf install -y mysql mysql-devel postgresql libpq-devel
6 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/exceptions.py:
--------------------------------------------------------------------------------
1 | __all__ = ["SqlAlchemyDLockBaseException", "SqlAlchemyDLockDatabaseError"]
2 |
3 |
4 | class SqlAlchemyDLockBaseException(Exception):
5 | pass
6 |
7 |
8 | class SqlAlchemyDLockDatabaseError(SqlAlchemyDLockBaseException):
9 | pass
10 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git/
2 |
3 | .venv/
4 |
5 | **/__pycache__
6 | *.egg-info
7 | *.egg/
8 | *.pyc
9 |
10 | build/
11 | dist/
12 | docs/_build
13 |
14 | .mypy_cache/
15 | .pytest_cache/
16 | .ruff_cache/
17 |
18 | *.swp
19 |
20 | html/*
21 |
22 | **/Dockerfile
23 | **/Dockerfile.*
24 | **/*.Dockerfile
25 | **/docker-compose.*
26 | **/*.docker-compose.*
27 |
--------------------------------------------------------------------------------
/tests/engines.py:
--------------------------------------------------------------------------------
1 | from os import getenv
2 |
3 | from dotenv import load_dotenv
4 | from sqlalchemy import create_engine
5 |
6 | __all__ = ["ENGINES", "URLS"]
7 |
8 | load_dotenv()
9 |
10 | URLS = (getenv("TEST_URLS") or "mysql://test:test@127.0.0.1/test postgresql://postgres:test@127.0.0.1/").split()
11 |
12 | ENGINES = [create_engine(url) for url in URLS]
13 |
--------------------------------------------------------------------------------
/scripts/wait-for-postgres.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | POSTGRES_HOST="$1"
6 | POSTGRES_PASSWORD="$2"
7 | shift
8 |
9 | psql --version
10 |
11 | until PGPASSWORD=$POSTGRES_PASSWORD psql -h "$POSTGRES_HOST" -U "postgres" -c '\q'
12 | do
13 | >&2 echo "Postgres is unavailable - sleeping"
14 | sleep 5
15 | done
16 |
17 | >&2 echo "Postgres is up!"
18 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Distributed lock based on Database and SQLAlchemy
3 | """
4 |
5 | from . import _version as version
6 | from ._version import __version__, __version_tuple__
7 | from .exceptions import SqlAlchemyDLockBaseException, SqlAlchemyDLockDatabaseError
8 | from .factory import create_async_sadlock, create_sadlock
9 | from .lock import BaseAsyncSadLock, BaseSadLock
10 |
--------------------------------------------------------------------------------
/tests/asyncio/__init__.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import platform
3 |
4 | # Psycopg cannot use the 'ProactorEventLoop' to run in async mode.
5 | # use a compatible event loop, for instance by setting 'asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())'
6 | if platform.system() == "Windows":
7 | asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore[attr-defined]
8 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/typing.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | from sqlalchemy.engine import Connection
4 | from sqlalchemy.ext.asyncio import AsyncConnection, AsyncSession, async_scoped_session
5 | from sqlalchemy.orm import Session, scoped_session
6 |
7 | ConnectionOrSessionT = Union[Connection, Session, scoped_session]
8 | AsyncConnectionOrSessionT = Union[AsyncConnection, AsyncSession, async_scoped_session]
9 |
--------------------------------------------------------------------------------
/scripts/wait-for-mysql.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | MYSQL_HOST="$1"
6 | MYSQL_DATABASE="$2"
7 | MYSQL_USER="$3"
8 | MYSQL_PASSWORD="$4"
9 | shift
10 |
11 | mysql --version
12 |
13 | until mysql -u "$MYSQL_USER" --password="$MYSQL_PASSWORD" -h "$MYSQL_HOST" -e "use $MYSQL_DATABASE"
14 | do
15 | >&2 echo "MySQL is unavailable - sleeping"
16 | sleep 5
17 | done
18 |
19 | >&2 echo "MySQL is up!"
20 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | # EditorConfig is awesome: https://EditorConfig.org
2 |
3 | # top-most EditorConfig file
4 | root = true
5 |
6 | # Unix-style newlines with a newline ending every file
7 | [*]
8 | end_of_line = lf
9 | charset = utf-8
10 | insert_final_newline = true
11 |
12 | # 4 space indentation
13 | [*.py]
14 | indent_style = space
15 | indent_size = 4
16 |
17 | # Tab indentation (no size specified)
18 | [Makefile]
19 | indent_style = tab
20 |
--------------------------------------------------------------------------------
/.ruff.toml:
--------------------------------------------------------------------------------
1 | src = ["src"]
2 | extend-exclude = ["docs", "scripts", "tests"]
3 | line-length = 128
4 |
5 | [lint]
6 | extend-select = ["I"]
7 |
8 | [lint.mccabe]
9 | # Unlike Flake8, default to a complexity level of 10.
10 | max-complexity = 10
11 |
12 | # Ignore `F401`(imported but unused), `F403`(import *` used), `E402`(import violations) in all `__init__.py` files
13 | [lint.per-file-ignores]
14 | "__init__.py" = ["F401"]
15 |
16 | [format]
17 | docstring-code-format = true
18 |
--------------------------------------------------------------------------------
/db.docker-compose.yml:
--------------------------------------------------------------------------------
1 | # run mysql and posgres database service on local docker, for develop/test
2 |
3 | name: sqlalchemy-dlock-db
4 |
5 | services:
6 | mysql:
7 | image: mysql
8 | ports:
9 | - "127.0.0.1:3306:3306"
10 | environment:
11 | MYSQL_RANDOM_ROOT_PASSWORD: "1"
12 | MYSQL_DATABASE: test
13 | MYSQL_USER: test
14 | MYSQL_PASSWORD: test
15 |
16 | postgres:
17 | image: postgres:alpine
18 | ports:
19 | - "127.0.0.1:5432:5432"
20 | environment:
21 | POSTGRES_PASSWORD: test
22 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/.idea/sqlalchemy-dlock.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/tests/asyncio/engines.py:
--------------------------------------------------------------------------------
1 | from os import getenv
2 | from typing import List
3 |
4 | from dotenv import load_dotenv
5 | from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine
6 |
7 | __all__ = ["create_engines", "dispose_engines", "get_engines"]
8 |
9 |
10 | _ENGINES: List[AsyncEngine] = []
11 |
12 |
13 | def create_engines():
14 | global _ENGINES
15 |
16 | load_dotenv()
17 |
18 | urls = (
19 | getenv("TEST_ASYNC_URLS") or "mysql+aiomysql://test:test@127.0.0.1/test postgresql+asyncpg://postgres:test@127.0.0.1/"
20 | ).split()
21 |
22 | for url in urls:
23 | engine = create_async_engine(url)
24 | _ENGINES.append(engine)
25 |
26 | return _ENGINES
27 |
28 |
29 | async def dispose_engines():
30 | for engine in _ENGINES:
31 | await engine.dispose()
32 |
33 |
34 | def get_engines():
35 | return _ENGINES
36 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | sphinx:
9 | configuration: docs/conf.py
10 |
11 | build:
12 | os: ubuntu-lts-latest
13 | tools:
14 | python: latest
15 | jobs:
16 | pre_create_environment: # install uv
17 | - asdf plugin add uv
18 | - asdf install uv latest
19 | - asdf global uv latest
20 | create_environment: # create uv venv
21 | - uv venv "${READTHEDOCS_VIRTUALENV_PATH}"
22 | pre_install: # Avoid having a dirty Git index
23 | - git update-index --assume-unchanged docs/conf.py
24 | install: # install by uv
25 | - UV_PROJECT_ENVIRONMENT="${READTHEDOCS_VIRTUALENV_PATH}" uv sync --no-dev --group docs
26 | pre_build:
27 | - sphinx-apidoc -H "" -feo docs/apidocs src
28 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/statement/postgresql.py:
--------------------------------------------------------------------------------
1 | from typing import Final
2 |
3 | from sqlalchemy import text
4 |
5 | LOCK: Final = text("SELECT pg_advisory_lock(:key)")
6 | LOCK_SHARED: Final = text("SELECT pg_advisory_lock_shared(:key)")
7 | LOCK_XACT: Final = text("SELECT pg_advisory_xact_lock(:key)")
8 | LOCK_XACT_SHARED: Final = text("SELECT pg_advisory_xact_lock_shared(:key)")
9 |
10 | TRY_LOCK: Final = text("SELECT pg_try_advisory_lock(:key)")
11 | TRY_LOCK_SHARED: Final = text("SELECT pg_try_advisory_lock_shared(:key)")
12 | TRY_LOCK_XACT: Final = text("SELECT pg_try_advisory_xact_lock(:key)")
13 | TRY_LOCK_XACT_SHARED: Final = text("SELECT pg_try_advisory_xact_lock_shared(:key)")
14 |
15 | UNLOCK: Final = text("SELECT pg_advisory_unlock(:key)")
16 | UNLOCK_SHARED: Final = text("SELECT pg_advisory_unlock_shared(:key)")
17 |
18 |
19 | SLEEP_INTERVAL_DEFAULT: Final = 1
20 | SLEEP_INTERVAL_MIN: Final = 0.1
21 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | %SPHINXBUILD% >NUL 2>NUL
14 | if errorlevel 9009 (
15 | echo.
16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17 | echo.installed, then set the SPHINXBUILD environment variable to point
18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
19 | echo.may add the Sphinx directory to PATH.
20 | echo.
21 | echo.If you don't have Sphinx installed, grab it from
22 | echo.https://www.sphinx-doc.org/
23 | exit /b 1
24 | )
25 |
26 | if "%1" == "" goto help
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/tests/asyncio/test_session.py:
--------------------------------------------------------------------------------
1 | from unittest import IsolatedAsyncioTestCase
2 | from uuid import uuid1
3 |
4 | from sqlalchemy.ext.asyncio import AsyncSession
5 |
6 | from sqlalchemy_dlock import create_async_sadlock
7 |
8 | from .engines import create_engines, dispose_engines, get_engines
9 |
10 |
11 | class SessionTestCase(IsolatedAsyncioTestCase):
12 | sessions = [] # type: ignore[var-annotated]
13 |
14 | def setUp(self):
15 | create_engines()
16 |
17 | async def asyncTearDown(self):
18 | await dispose_engines()
19 |
20 | async def test_once(self):
21 | key = uuid1().hex
22 | for engine in get_engines():
23 | session = AsyncSession(engine)
24 | async with session.begin():
25 | async with create_async_sadlock(session, key) as lock:
26 | self.assertTrue(lock.locked)
27 | self.assertFalse(lock.locked)
28 |
--------------------------------------------------------------------------------
/tests/docker-compose.yml:
--------------------------------------------------------------------------------
1 | name: sqlalchemy-dlock-tests
2 |
3 | x-common-environment: &common-environment
4 | MYSQL_RANDOM_ROOT_PASSWORD: "1"
5 | MYSQL_DATABASE: test
6 | MYSQL_USER: test
7 | MYSQL_PASSWORD: test
8 | POSTGRES_PASSWORD: test
9 |
10 | services:
11 | mysql:
12 | image: mysql
13 | environment:
14 | <<: *common-environment
15 | ports:
16 | - "3306:3306"
17 | healthcheck:
18 | test: ["CMD-SHELL", "mysqladmin ping -h 127.0.0.1 -u $$MYSQL_USER --password=$$MYSQL_PASSWORD"]
19 |
20 | postgres:
21 | image: postgres
22 | environment:
23 | <<: *common-environment
24 | ports:
25 | - "5432:5432"
26 | healthcheck:
27 | test: ["CMD-SHELL", "pg_isready -U postgres -h 127.0.0.1"]
28 |
29 | python:
30 | build: .
31 | volumes:
32 | - type: bind
33 | source: ..
34 | target: /workspace
35 | working_dir: /workspace
36 | depends_on: [mysql, postgres]
37 | environment:
38 | <<: *common-environment
39 | env_file: .env
40 | command: [/bin/bash, /workspace/scripts/run-test.sh]
41 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v6.0.0
4 | hooks:
5 | - id: check-case-conflict
6 | - id: check-added-large-files
7 | - id: check-symlinks
8 | - id: detect-private-key
9 | - id: fix-byte-order-marker
10 | - id: mixed-line-ending
11 | - id: check-merge-conflict
12 | - id: end-of-file-fixer
13 | - id: trailing-whitespace
14 | args: [--markdown-linebreak-ext=md]
15 | - id: check-yaml
16 | - id: check-toml
17 | - id: check-ast
18 | - id: check-builtin-literals
19 | - id: check-docstring-first
20 |
21 | - repo: https://github.com/astral-sh/ruff-pre-commit
22 | rev: v0.14.0
23 | hooks:
24 | # Run the linter.
25 | - id: ruff
26 | types_or: [python, pyi, jupyter]
27 | args: [--fix]
28 | # Run the formatter.
29 | - id: ruff-format
30 | types_or: [python, pyi, jupyter]
31 |
32 | - repo: https://github.com/pre-commit/mirrors-mypy
33 | rev: v1.18.2
34 | hooks:
35 | - id: mypy
36 | args: [--config-file, .mypy.ini, --ignore-missing-imports]
37 |
38 | - repo: https://github.com/python-jsonschema/check-jsonschema
39 | rev: "0.34.0"
40 | hooks:
41 | - id: check-github-workflows
42 | - id: check-readthedocs
43 |
--------------------------------------------------------------------------------
/tests/test_session.py:
--------------------------------------------------------------------------------
1 | from unittest import TestCase
2 | from uuid import uuid1
3 |
4 | from sqlalchemy.orm import sessionmaker
5 |
6 | from sqlalchemy_dlock import create_sadlock
7 |
8 | from .engines import ENGINES
9 |
10 |
11 | class SessionTestCase(TestCase):
12 | Sessions = [] # type: ignore[var-annotated]
13 |
14 | @classmethod
15 | def setUpClass(cls):
16 | for engine in ENGINES:
17 | Session = sessionmaker(bind=engine)
18 | cls.Sessions.append(Session)
19 |
20 | def tearDown(self):
21 | for engine in ENGINES:
22 | engine.dispose()
23 |
24 | def test_once(self):
25 | key = uuid1().hex
26 | for Session in self.Sessions:
27 | with Session() as session:
28 | with create_sadlock(session, key) as lock:
29 | self.assertTrue(lock.locked)
30 | self.assertFalse(lock.locked)
31 |
32 | def test_cross_transaction(self):
33 | key = uuid1().hex
34 | for Session in self.Sessions:
35 | with Session() as session:
36 | session.commit()
37 | lock = create_sadlock(session, key)
38 | session.rollback()
39 | self.assertTrue(lock.acquire())
40 | session.close()
41 | lock.release()
42 | self.assertFalse(lock.locked)
43 |
--------------------------------------------------------------------------------
/.vscode/tasks.json:
--------------------------------------------------------------------------------
1 | {
2 | // See https://go.microsoft.com/fwlink/?LinkId=733558
3 | // for the documentation about the tasks.json format
4 | "version": "2.0.0",
5 | "tasks": [
6 | {
7 | "label": "python build",
8 | "type": "shell",
9 | "command": "${command:python.interpreterPath} -m build",
10 | "group": {
11 | "kind": "build",
12 | "isDefault": true
13 | },
14 | "runOptions": {
15 | "instanceLimit": 1
16 | },
17 | "problemMatcher": []
18 | },
19 | {
20 | "label": "docs: build",
21 | "type": "shell",
22 | "command": "${command:python.interpreterPath} -m sphinx -j auto -d _build/doctrees . _build/html",
23 | "runOptions": {
24 | "instanceLimit": 1
25 | },
26 | "options": {
27 | "cwd": "${workspaceFolder}/docs"
28 | },
29 | "problemMatcher": []
30 | },
31 | {
32 | "label": "docs: serve",
33 | "type": "shell",
34 | "command": "${command:python.interpreterPath} -m http.server _build/html",
35 | "runOptions": {
36 | "instanceLimit": 1
37 | },
38 | "options": {
39 | "cwd": "${workspaceFolder}/docs"
40 | },
41 | "problemMatcher": []
42 | }
43 | ]
44 | }
45 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/registry.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from importlib import import_module
4 | from string import Template
5 | from typing import TYPE_CHECKING, Type, Union
6 |
7 | if TYPE_CHECKING:
8 | from .lock.base import BaseAsyncSadLock, BaseSadLock
9 |
10 |
11 | REGISTRY = {
12 | "mysql": {
13 | "module": ".lock.mysql",
14 | "package": "${package}", # module name relative to the package
15 | "class": "MysqlSadLock",
16 | },
17 | "postgresql": {
18 | "module": ".lock.postgresql",
19 | "package": "${package}", # module name relative to the package
20 | "class": "PostgresqlSadLock",
21 | },
22 | }
23 |
24 | ASYNCIO_REGISTRY = {
25 | "mysql": {
26 | "module": ".lock.mysql",
27 | "package": "${package}", # module name relative to the package
28 | "class": "MysqlAsyncSadLock",
29 | },
30 | "postgresql": {
31 | "module": ".lock.postgresql",
32 | "package": "${package}", # module name relative to the package
33 | "class": "PostgresqlAsyncSadLock",
34 | },
35 | }
36 |
37 |
38 | def find_lock_class(engine_name, is_asyncio=False) -> Type[Union[BaseSadLock, BaseAsyncSadLock]]:
39 | reg = ASYNCIO_REGISTRY if is_asyncio else REGISTRY
40 | conf = reg[engine_name]
41 | package = conf.get("package")
42 | if package:
43 | package = Template(package).safe_substitute(package=__package__)
44 | module = import_module(conf["module"], package)
45 | class_ = getattr(module, conf["class"])
46 | return class_
47 |
--------------------------------------------------------------------------------
/docs/README.rst:
--------------------------------------------------------------------------------
1 | README
2 | ======
3 |
4 | .. include:: ../README.md
5 | :parser: myst_parser.sphinx_
6 |
7 | How to Build the Documentation
8 | ------------------------------
9 |
10 | #. The documentation is built using `Sphinx `_.
11 | We need to install the package and its requirements for building documentation:
12 |
13 | .. code:: sh
14 |
15 | pip install -e . --group docs
16 |
17 | or if you are using `uv`_:
18 |
19 | .. code:: sh
20 |
21 | uv sync --group docs
22 |
23 | #. Generate API documentation.
24 | If the source tree has changed, you may clear the `docs/apidocs` directory and regenerate the API documentation:
25 |
26 | .. code:: sh
27 |
28 | sphinx-apidoc -H "" -feo docs/apidocs src
29 |
30 | #. Build HTML documentation:
31 |
32 | * Using the Make tool (for Unix/Linux/macOS):
33 |
34 | .. code:: sh
35 |
36 | make -C docs html
37 |
38 | * On Windows:
39 |
40 | .. code:: bat
41 |
42 | docs\make html
43 |
44 | The built static website is located at ``docs/_build/html``. You can serve it with a simple HTTP server:
45 |
46 | .. code:: sh
47 |
48 | python -m http.server --directory docs/_build/html
49 |
50 | Then open http://localhost:8000/ in a web browser.
51 |
52 | .. tip::
53 | Try another port if ``8000`` is already in use.
54 | For example, to serve on port ``8080``:
55 |
56 | .. code:: sh
57 |
58 | python -m http.server --directory docs/_build/html 8080
59 |
60 | .. seealso:: Python ``stdlib``'s :mod:`http.server`
61 |
62 | .. _uv: https://docs.astral.sh/uv/
63 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2020, liu xue yan
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/tests/test_scoped_session.py:
--------------------------------------------------------------------------------
1 | from unittest import TestCase
2 | from uuid import uuid4
3 |
4 | from sqlalchemy.orm import scoped_session, sessionmaker
5 |
6 | from sqlalchemy_dlock import create_sadlock
7 |
8 | from .engines import ENGINES
9 |
10 |
11 | class ScopedSessionTestCase(TestCase):
12 | def setUp(self):
13 | self.Sessions = []
14 | self.sessions = []
15 | for engine in ENGINES:
16 | factory = sessionmaker(bind=engine)
17 | Session = scoped_session(factory)
18 | self.Sessions.append(Session)
19 | self.sessions.append(Session())
20 |
21 | def tearDown(self):
22 | for Session in self.Sessions:
23 | Session.remove()
24 | for engine in ENGINES:
25 | engine.dispose()
26 |
27 | def test_once(self):
28 | key = uuid4().hex
29 | for session in self.sessions:
30 | with create_sadlock(session, key) as lock:
31 | self.assertTrue(lock.locked)
32 | self.assertFalse(lock.locked)
33 |
34 | def test_twice(self):
35 | key = uuid4().hex
36 | for session in self.sessions:
37 | for _ in range(2):
38 | with create_sadlock(session, key) as lock:
39 | self.assertTrue(lock.locked)
40 | self.assertFalse(lock.locked)
41 |
42 | def test_separated_connection(self):
43 | key = uuid4().hex
44 | for session in self.sessions:
45 | session.commit()
46 | lock = create_sadlock(session, key)
47 | session.rollback()
48 | self.assertTrue(lock.acquire())
49 | session.close()
50 | lock.release()
51 | self.assertFalse(lock.locked)
52 |
--------------------------------------------------------------------------------
/scripts/run-test.sh:
--------------------------------------------------------------------------------
1 | set -eu
2 |
3 | export TEST_URLS="mysql://$MYSQL_USER:$MYSQL_PASSWORD@mysql/$MYSQL_DATABASE postgresql://postgres:$POSTGRES_PASSWORD@postgres/"
4 | export TEST_ASYNC_URLS="mysql+aiomysql://$MYSQL_USER:$MYSQL_PASSWORD@mysql/$MYSQL_DATABASE postgresql+asyncpg://postgres:$POSTGRES_PASSWORD@postgres/"
5 |
6 | /bin/bash scripts/wait-for-postgres.sh postgres $POSTGRES_PASSWORD
7 | /bin/bash scripts/wait-for-mysql.sh mysql $MYSQL_DATABASE $MYSQL_USER $MYSQL_PASSWORD
8 |
9 | export SETUPTOOLS_SCM_PRETEND_VERSION=0
10 | export PIP_DISABLE_PIP_VERSION_CHECK=1
11 | export PIP_ROOT_USER_ACTION=ignore
12 | export PIP_NO_WARN_SCRIPT_LOCATION=1
13 |
14 | PYTHON_LIST=(python3.9 python3.10 python3.11 python3.12 python3.13 python3.14)
15 | REQUIRES_LIST=("SQLAlchemy[asyncio]>=1.4.3,<2" "SQLAlchemy[asyncio]>=2,<3")
16 |
17 | trap 'rm -rf /tmp/sqlalchemy-dlock-test-*' EXIT
18 |
19 | for PYTHON in ${PYTHON_LIST[@]}
20 | do
21 | for REQUIRES in ${REQUIRES_LIST[@]}
22 | do
23 | echo
24 | echo "---------------------------------------------------------------"
25 | echo "Begin of ${PYTHON} ${REQUIRES}"
26 | echo "---------------------------------------------------------------"
27 | echo
28 | TMPDIR=$(mktemp -d -t sqlalchemy-dlock-test-${PYTHON}-${REQUIRES//[^a-zA-Z0-9]/-})
29 | (
30 | set -e
31 | cd /workspace
32 | $TMPDIR/bin/python -m pip install -e . --group tests cryptography $REQUIRES
33 | $TMPDIR/bin/python -m coverage run -m unittest -cfv
34 | $TMPDIR/bin/python -m coverage report
35 | ) || {
36 | echo "Test failed for ${PYTHON} ${REQUIRES}"
37 | exit 1
38 | }
39 | rm -rf $TMPDIR
40 | echo
41 | echo "---------------------------------------------------------------"
42 | echo "End of ${PYTHON} ${REQUIRES}"
43 | echo "---------------------------------------------------------------"
44 | echo
45 | done
46 | done
47 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. sqlalchemy-dlock documentation master file, created by
2 | sphinx-quickstart on Mon Jun 12 16:24:20 2023.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | ================
7 | sqlalchemy-dlock
8 | ================
9 |
10 | .. hlist::
11 | :columns: 2
12 |
13 | *
14 | ========== ========= =========
15 | Release Version Built at
16 | ========== ========= =========
17 | |release| |version| |today|
18 | ========== ========= =========
19 |
20 | *
21 | .. image:: https://github.com/tanbro/sqlalchemy-dlock/actions/workflows/python-package.yml/badge.svg
22 | :alt: Python package
23 | :target: https://github.com/tanbro/sqlalchemy-dlock/actions/workflows/python-package.yml
24 |
25 | .. image:: https://img.shields.io/pypi/v/sqlalchemy-dlock
26 | :alt: PyPI
27 | :target: https://pypi.org/project/sqlalchemy-dlock/
28 |
29 | .. image:: https://readthedocs.org/projects/sqlalchemy-dlock/badge/?version=latest
30 | :alt: Documentation Status
31 | :target: https://sqlalchemy-dlock.readthedocs.io/en/latest/
32 |
33 | .. image:: https://codecov.io/gh/tanbro/sqlalchemy-dlock/branch/main/graph/badge.svg
34 | :alt: CodeCov
35 | :target: https://codecov.io/gh/tanbro/sqlalchemy-dlock
36 |
37 | .. rubric::
38 | `sqlalchemy-dlock` is a distributed-lock library based on Database and `SQLAlchemy `_.
39 |
40 | --------
41 | Contents
42 | --------
43 |
44 | .. toctree::
45 | :caption: Documentation
46 | :titlesonly:
47 | :maxdepth: 1
48 |
49 | README
50 | AUTHORS
51 | CHANGELOG
52 |
53 | .. toctree::
54 | :caption: API Reference
55 | :titlesonly:
56 | :maxdepth: 1
57 |
58 | apidocs/modules
59 |
60 | ------------------
61 | Indices and tables
62 | ------------------
63 |
64 | * :ref:`genindex`
65 | * :ref:`modindex`
66 |
67 | .. furo has no search page
68 | .. * :ref:`search`
69 |
--------------------------------------------------------------------------------
/tests/test_pg.py:
--------------------------------------------------------------------------------
1 | from threading import Barrier, Thread
2 | from time import sleep
3 | from unittest import TestCase
4 | from uuid import uuid4
5 |
6 | from sqlalchemy_dlock import create_sadlock
7 |
8 | from .engines import ENGINES
9 |
10 |
11 | class PgTestCase(TestCase):
12 | def tearDown(self):
13 | for engine in ENGINES:
14 | engine.dispose()
15 |
16 | def test_pg_invalid_interval(self):
17 | for engine in ENGINES:
18 | if engine.name != "postgresql":
19 | continue
20 | key = uuid4().hex
21 | with engine.connect() as conn:
22 | lck = create_sadlock(conn, key)
23 | with self.assertRaises(ValueError):
24 | lck.acquire(timeout=0, interval=-1)
25 |
26 | def test_simple_xact(self):
27 | key = uuid4().hex
28 | for engine in ENGINES:
29 | if engine.name != "postgresql":
30 | continue
31 | with engine.connect() as conn:
32 | lck = create_sadlock(conn, key, xact=True)
33 | with conn.begin():
34 | self.assertTrue(lck.acquire())
35 |
36 | def test_xact_thread(self):
37 | key = uuid4().hex
38 | for engine in ENGINES:
39 | if engine.name != "postgresql":
40 | continue
41 |
42 | trd_exc = None
43 | bar = Barrier(2)
44 |
45 | def fn_():
46 | nonlocal trd_exc
47 | try:
48 | with engine.connect() as c_:
49 | l_ = create_sadlock(c_, key, xact=True)
50 | bar.wait(30)
51 | with c_.begin():
52 | self.assertFalse(l_.acquire(block=False))
53 | sleep(10)
54 | self.assertTrue(l_.acquire(block=False))
55 | except Exception as exc:
56 | trd_exc = exc
57 | raise exc
58 |
59 | trd = Thread(target=fn_)
60 | trd.start()
61 |
62 | with engine.connect() as conn:
63 | lck = create_sadlock(conn, key, xact=True)
64 | with conn.begin():
65 | self.assertTrue(lck.acquire(block=False))
66 | bar.wait(30)
67 | sleep(3)
68 |
69 | trd.join()
70 |
71 | if trd_exc is not None:
72 | raise trd_exc # type: ignore
73 |
--------------------------------------------------------------------------------
/tests/asyncio/test_concurrency.py:
--------------------------------------------------------------------------------
1 | # https://github.com/sqlalchemy/sqlalchemy/issues/5581
2 | #
3 | # Multiple Co-routines of SQL executions on a same Engine's Connection/Session will case a deadlock.
4 | # So we shall do that on different Engine objects!
5 |
6 |
7 | import asyncio
8 | from time import time
9 | from unittest import IsolatedAsyncioTestCase
10 | from uuid import uuid4
11 |
12 | from sqlalchemy.ext.asyncio import create_async_engine
13 |
14 | from sqlalchemy_dlock import create_async_sadlock
15 |
16 | from .engines import create_engines, dispose_engines, get_engines
17 |
18 |
19 | class ConcurrencyTestCase(IsolatedAsyncioTestCase):
20 | def setUp(self):
21 | create_engines()
22 |
23 | async def asyncTearDown(self):
24 | await dispose_engines()
25 |
26 | async def test_timeout(self):
27 | key = uuid4().hex
28 | for engine in get_engines():
29 | delay = 3
30 | timeout = 1
31 | event = asyncio.Event()
32 | engine1 = create_async_engine(engine.url)
33 | engine2 = create_async_engine(engine.url)
34 | try:
35 |
36 | async def coro1():
37 | async with engine1.connect() as conn:
38 | async with create_async_sadlock(conn, key) as lck:
39 | self.assertTrue(lck.locked)
40 | event.set()
41 | await asyncio.sleep(delay)
42 | self.assertFalse(lck.locked)
43 |
44 | async def coro2():
45 | async with engine2.connect() as conn:
46 | lck = create_async_sadlock(conn, key)
47 | await event.wait()
48 | t0 = time()
49 | is_ok = await lck.acquire(timeout=timeout)
50 | self.assertFalse(is_ok)
51 | self.assertFalse(lck.locked)
52 | self.assertGreaterEqual(time() - t0, timeout)
53 |
54 | aws = (
55 | asyncio.create_task(coro1()),
56 | asyncio.create_task(coro2()),
57 | )
58 | await asyncio.wait(aws, timeout=delay * 2)
59 | finally:
60 | aws = (
61 | asyncio.create_task(engine1.dispose()),
62 | asyncio.create_task(engine2.dispose()),
63 | )
64 | await asyncio.wait(aws, timeout=delay * 2)
65 |
--------------------------------------------------------------------------------
/tests/asyncio/test_pg.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import sys
3 | from unittest import IsolatedAsyncioTestCase, skipIf
4 | from uuid import uuid4
5 |
6 | from sqlalchemy_dlock import create_async_sadlock
7 |
8 | from .engines import create_engines, dispose_engines, get_engines
9 |
10 |
11 | class PgTestCase(IsolatedAsyncioTestCase):
12 | def setUp(self):
13 | create_engines()
14 |
15 | async def asyncTearDown(self):
16 | await dispose_engines()
17 |
18 | async def test_pg_invalid_interval(self):
19 | for engine in get_engines():
20 | if engine.name != "postgresql":
21 | continue
22 | key = uuid4().hex
23 | async with engine.connect() as conn:
24 | lck = create_async_sadlock(conn, key)
25 | with self.assertRaises(ValueError):
26 | await lck.acquire(timeout=0, interval=-1)
27 |
28 | async def test_simple_xact(self):
29 | key = uuid4().hex
30 | for engine in get_engines():
31 | if engine.name != "postgresql":
32 | continue
33 | async with engine.connect() as conn:
34 | lck = create_async_sadlock(conn, key, xact=True)
35 | async with conn.begin():
36 | self.assertTrue(await lck.acquire())
37 |
38 | @skipIf(sys.version_info < (3, 11), "‘asyncio.Barrier’: New in version 3.11")
39 | async def test_xact_coro(self):
40 | key = uuid4().hex
41 | for engine in get_engines():
42 | if engine.name != "postgresql":
43 | continue
44 |
45 | bar = asyncio.Barrier(2)
46 |
47 | async def coro():
48 | async with engine.connect() as c_:
49 | l_ = create_async_sadlock(c_, key, xact=True)
50 | await asyncio.wait_for(bar.wait(), 10)
51 | async with c_.begin():
52 | self.assertFalse(await l_.acquire(block=False))
53 | await asyncio.sleep(3)
54 | self.assertTrue(await l_.acquire(block=False))
55 |
56 | task = asyncio.create_task(coro())
57 |
58 | async with engine.connect() as conn:
59 | lck = create_async_sadlock(conn, key, xact=True)
60 | async with conn.begin():
61 | self.assertTrue(await lck.acquire(block=False))
62 | await asyncio.wait_for(bar.wait(), 5)
63 | await asyncio.sleep(3)
64 |
65 | await asyncio.wait_for(task, 10)
66 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | build-backend = "setuptools.build_meta"
3 | requires = ["setuptools>=80", "setuptools-scm>=8"]
4 |
5 | [project]
6 | name = "sqlalchemy-dlock"
7 | readme = { file = 'README.md', content-type = 'text/markdown' }
8 | authors = [{ name = "liu xue yan", email = "liu_xue_yan@foxmail.com" }]
9 | description = "A distributed lock implementation based on SQLAlchemy"
10 | keywords = [
11 | "SQLAlchemy",
12 | "lock",
13 | "distributed",
14 | "distributed lock",
15 | "SQL",
16 | "database",
17 | "DBMS",
18 | "RDBMS",
19 | ]
20 | license = "BSD-3-Clause"
21 | license-files = ["LICENSE"]
22 |
23 | classifiers = [
24 | "Development Status :: 5 - Production/Stable",
25 | "Topic :: Database :: Front-Ends",
26 | "Intended Audience :: Developers",
27 | "Programming Language :: Python",
28 | ]
29 | dynamic = ["version"]
30 |
31 | # requires python version
32 | requires-python = ">=3.9"
33 | # requires
34 | dependencies = [
35 | "SQLAlchemy[asyncio]>=1.4.3,<3.0",
36 | "typing-extensions; python_version<'3.12'",
37 | ]
38 | # extra requires
39 | [project.optional-dependencies]
40 | # MySQL
41 | mysqlclient = ["mysqlclient"]
42 | pymysql = ["pymysql"]
43 | # MySQL asyncio
44 | aiomysql = ["aiomysql"]
45 | # Postgres
46 | # psycopg2: sync
47 | psycopg2 = ["psycopg2>=2.8"] # psycopg2 compiling needed when install
48 | psycopg2-binary = [
49 | "psycopg2-binary>=2.8",
50 | ] # psycopg2 with pre-compiled C library
51 | # psycopg3: both sync and asyncio
52 | psycopg3 = ["psycopg"] # psycopg3 dynamik link to libpq
53 | psycopg3-binary = ["psycopg[binary]"] # psycopg3 with pre-compiled C library
54 | psycopg3-c = ["psycopg[c]",] # psycopg3 compiling needed when install
55 | # Postgres asyncio
56 | asyncpg = ["asyncpg"]
57 |
58 | # Project links
59 | [project.urls]
60 | homepage = "https://github.com/tanbro/sqlalchemy-dlock"
61 | source = "https://github.com/tanbro/sqlalchemy-dlock.git"
62 | documentation = "https://sqlalchemy-dlock.readthedocs.io/"
63 |
64 | [tool.setuptools.packages.find]
65 | where = ["src"]
66 |
67 | [tool.setuptools_scm]
68 | write_to = "src/sqlalchemy_dlock/_version.py"
69 |
70 | [dependency-groups]
71 | dev = [
72 | { include-group = "typed" },
73 | { include-group = "test" },
74 | { include-group = "docs" },
75 | ]
76 | docs = [
77 | "sphinx>=7",
78 | "furo",
79 | "linkify-it-py",
80 | "myst-parser",
81 | "nbsphinx",
82 | "sphinx-copybutton",
83 | "sphinx-inline-tabs",
84 | "sphinx-tippy",
85 | "sphinx-version-warning",
86 | "sphinx-autodoc-typehints",
87 | ]
88 | typed = ["mypy"]
89 | test = ["coverage", "python-dotenv"]
90 | ipy = ["ipykernel"]
91 |
92 |
93 | [tool.setuptools.package-data]
94 | sqlalchemy_dlock = ["sqlalchemy_dlock/py.typed"]
95 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 |
6 | import importlib.metadata
7 |
8 | # -- Project information -----------------------------------------------------
9 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
10 |
11 | project = "sqlalchemy-dlock"
12 | copyright = "2023-2024, liu xue yan"
13 | author = "liu xue yan"
14 |
15 | # full version
16 | version = importlib.metadata.version(project)
17 | # major/minor version
18 | release = ".".join(version.split(".")[:2])
19 |
20 | # -- General configuration ---------------------------------------------------
21 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
22 |
23 | extensions = [
24 | "myst_parser",
25 | "sphinx.ext.autodoc",
26 | "sphinx.ext.mathjax",
27 | "sphinx.ext.napoleon",
28 | "sphinx.ext.githubpages",
29 | "sphinx.ext.intersphinx",
30 | "sphinx.ext.viewcode",
31 | "sphinx_tippy",
32 | "sphinx_inline_tabs",
33 | "sphinx_copybutton",
34 | "versionwarning.extension",
35 | "sphinx_autodoc_typehints",
36 | ]
37 | source_suffix = {
38 | ".rst": "restructuredtext",
39 | ".md": "markdown",
40 | }
41 |
42 | templates_path = ["_templates"]
43 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
44 |
45 | # -- Options for HTML output -------------------------------------------------
46 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
47 | html_static_path = ["_static"]
48 | html_theme = "furo"
49 | html_theme_options = {
50 | "source_repository": "https://github.com/tanbro/sqlalchemy-dlock",
51 | "source_branch": "main",
52 | "source_directory": "docs/",
53 | "top_of_page_button": "edit",
54 | }
55 |
56 | # -- Options for autodoc ----------------------------------------------------
57 | # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration
58 | # autodoc_mock_imports = []
59 |
60 | # Automatically extract typehints when specified and place them in
61 | # descriptions of the relevant function/method.
62 | autodoc_typehints = "both"
63 |
64 | # Don't show class signature with the class' name.
65 | # autodoc_class_signature = "separated"
66 |
67 | autoclass_content = "both"
68 | # autodoc_member_order = "bysource"
69 |
70 | # -- Options for myst_parser extension ---------------------------------------
71 |
72 | myst_enable_extensions = [
73 | "amsmath",
74 | "attrs_inline",
75 | "colon_fence",
76 | "deflist",
77 | "dollarmath",
78 | "fieldlist",
79 | "html_image",
80 | "replacements",
81 | "smartquotes",
82 | "strikethrough",
83 | "substitution",
84 | "tasklist",
85 | ]
86 |
87 | # -- Options for intersphinx extension ---------------------------------------
88 |
89 | # Example configuration for intersphinx: refer to the Python standard library.
90 | intersphinx_mapping = {
91 | "python": ("https://docs.python.org/", None),
92 | "sphinx": ("https://docs.sqlalchemy.org/", None),
93 | }
94 |
95 | # -- Options for Napoleon settings ---------------------------------------
96 | napoleon_use_admonition_for_examples = True
97 | napoleon_use_admonition_for_notes = True
98 | napoleon_use_admonition_for_references = True
99 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/factory.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from typing import Type, TypeVar, Union
3 |
4 | if sys.version_info < (3, 10): # pragma: no cover
5 | from typing_extensions import TypeGuard
6 | else: # pragma: no cover
7 | from typing import TypeGuard
8 |
9 | from sqlalchemy.engine import Connection
10 | from sqlalchemy.ext.asyncio import AsyncConnection, AsyncSession, async_scoped_session
11 | from sqlalchemy.orm import Session, scoped_session
12 |
13 | from .lock.base import AsyncConnectionTV, BaseAsyncSadLock, BaseSadLock, ConnectionTV
14 | from .registry import find_lock_class
15 |
16 | __all__ = ("create_sadlock", "create_async_sadlock")
17 |
18 |
19 | KTV = TypeVar("KTV")
20 |
21 |
22 | def create_sadlock(
23 | connection_or_session: ConnectionTV, key: KTV, /, contextual_timeout: Union[float, int, None] = None, **kwargs
24 | ) -> BaseSadLock[KTV, ConnectionTV]:
25 | """Create a database distributed lock object
26 |
27 | All arguments will be passed to a sub-class of :class:`.BaseSadLock`, depend on the type of ``connection_session``'s SQLAlchemy engine.
28 |
29 | Args:
30 |
31 | connection_or_session:
32 | Connection or Session object SQL locking functions will be invoked on it.
33 |
34 | key:
35 | ID or name of the SQL locking function
36 |
37 | contextual_timeout:
38 | Timeout(seconds) for Context Managers.
39 |
40 | When called in a :keyword:`with` statement, the new created lock object will pass it to ``timeout`` argument of :meth:`.BaseSadLock.acquire`.
41 |
42 | A :exc:`TimeoutError` will be thrown if can not acquire after ``contextual_timeout``
43 |
44 | Returns:
45 | New created lock object
46 |
47 | Type of the lock object is a sub-class of :class:`.BaseSadLock`, which depends on the passed-in SQLAlchemy `connection` or `session`.
48 |
49 | MySQL and PostgreSQL connection/session are supported til now.
50 | """
51 | if isinstance(connection_or_session, Connection):
52 | engine_name = connection_or_session.engine.name
53 | elif isinstance(connection_or_session, (Session, scoped_session)):
54 | bind = connection_or_session.get_bind()
55 | if isinstance(bind, Connection):
56 | engine_name = bind.engine.name
57 | else:
58 | engine_name = bind.name
59 | else:
60 | raise TypeError(f"Unsupported connection_or_session type: {type(connection_or_session)}")
61 |
62 | lock_class = find_lock_class(engine_name)
63 | if not is_sadlock_type(lock_class):
64 | raise TypeError(f"Unsupported connection_or_session type: {type(connection_or_session)}")
65 | return lock_class(connection_or_session, key, contextual_timeout=contextual_timeout, **kwargs)
66 |
67 |
68 | def create_async_sadlock(
69 | connection_or_session: AsyncConnectionTV, key: KTV, /, contextual_timeout: Union[float, int, None] = None, **kwargs
70 | ) -> BaseAsyncSadLock[KTV, AsyncConnectionTV]:
71 | """AsyncIO version of :func:`create_sadlock`"""
72 | if isinstance(connection_or_session, AsyncConnection):
73 | engine_name = connection_or_session.engine.name
74 | elif isinstance(connection_or_session, (AsyncSession, async_scoped_session)):
75 | bind = connection_or_session.get_bind()
76 | if isinstance(bind, Connection):
77 | engine_name = bind.engine.name
78 | else:
79 | engine_name = bind.name
80 | else:
81 | raise TypeError(f"Unsupported connection_or_session type: {type(connection_or_session)}")
82 |
83 | class_ = find_lock_class(engine_name, True)
84 | if not is_async_sadlock_type(class_):
85 | raise TypeError(f"Unsupported connection_or_session type: {type(connection_or_session)}")
86 | return class_(connection_or_session, key, contextual_timeout=contextual_timeout, **kwargs)
87 |
88 |
89 | def is_sadlock_type(cls: Type) -> TypeGuard[Type[BaseSadLock]]:
90 | """Check if the passed-in class type is :class:`.BaseSadLock` object"""
91 | return issubclass(cls, BaseSadLock)
92 |
93 |
94 | def is_async_sadlock_type(cls: Type) -> TypeGuard[Type[BaseAsyncSadLock]]:
95 | """Check if the passed-in class type is :class:`.BaseAsyncSadLock` object"""
96 | return issubclass(cls, BaseAsyncSadLock)
97 |
--------------------------------------------------------------------------------
/tests/test_key_convert.py:
--------------------------------------------------------------------------------
1 | from multiprocessing import cpu_count
2 | from random import choice
3 | from unittest import TestCase
4 | from uuid import uuid4
5 | from zlib import crc32
6 |
7 | from sqlalchemy_dlock import create_sadlock
8 | from sqlalchemy_dlock.lock.mysql import MYSQL_LOCK_NAME_MAX_LENGTH
9 |
10 | from .engines import ENGINES
11 |
12 | CPU_COUNT = cpu_count()
13 |
14 |
15 | class KeyConvertTestCase(TestCase):
16 | def tearDown(self):
17 | for engine in ENGINES:
18 | engine.dispose()
19 |
20 | def test_convert(self):
21 | for engine in ENGINES:
22 | key = uuid4().hex
23 |
24 | if engine.name == "mysql":
25 |
26 | def _convert(k): # type: ignore
27 | return f"key is {k!r}"
28 |
29 | elif engine.name == "postgresql":
30 |
31 | def _convert(k): # type: ignore
32 | return crc32(str(k).encode())
33 |
34 | else:
35 | raise NotImplementedError()
36 |
37 | with engine.connect() as conn:
38 | with create_sadlock(conn, key, convert=_convert) as lock:
39 | self.assertTrue(lock.locked)
40 | self.assertFalse(lock.locked)
41 |
42 | def test_mysql_key_max_length(self):
43 | for engine in ENGINES:
44 | if engine.name != "mysql":
45 | continue
46 | key = "".join(choice([chr(n) for n in range(0x20, 0x7F)]) for _ in range(MYSQL_LOCK_NAME_MAX_LENGTH))
47 | with engine.connect() as conn:
48 | with create_sadlock(conn, key) as lock:
49 | self.assertTrue(lock.locked)
50 | self.assertFalse(lock.locked)
51 |
52 | def test_mysql_key_gt_max_length(self):
53 | for engine in ENGINES:
54 | if engine.name != "mysql":
55 | continue
56 | key = "".join(choice([chr(n) for n in range(0x20, 0x7F)]) for _ in range(MYSQL_LOCK_NAME_MAX_LENGTH + 1))
57 | with engine.connect() as conn:
58 | with self.assertRaises(ValueError):
59 | create_sadlock(conn, key)
60 |
61 | def test_mysql_key_not_a_string(self):
62 | keys = None, 1, 0, -1, 0.1, True, False, (), [], set(), {}, object()
63 |
64 | for engine in ENGINES:
65 | if engine.name != "mysql":
66 | continue
67 |
68 | with engine.connect() as conn:
69 | for k in keys:
70 | with self.assertRaises(TypeError):
71 | create_sadlock(conn, k, convert=lambda x: x)
72 |
73 | def test_postgresql_key_max(self):
74 | for engine in ENGINES:
75 | if engine.name != "postgresql":
76 | continue
77 | key = 2**63 - 1
78 | with engine.connect() as conn:
79 | with create_sadlock(conn, key) as lock:
80 | self.assertTrue(lock.locked)
81 | self.assertFalse(lock.locked)
82 |
83 | def test_postgresql_key_over_max(self):
84 | for engine in ENGINES:
85 | if engine.name != "postgresql":
86 | continue
87 | key = 2**63
88 | with engine.connect() as conn:
89 | with self.assertRaises(OverflowError):
90 | create_sadlock(conn, key)
91 |
92 | def test_postgresql_key_min(self):
93 | for engine in ENGINES:
94 | if engine.name != "postgresql":
95 | continue
96 | key = -(2**63)
97 | with engine.connect() as conn:
98 | with create_sadlock(conn, key) as lock:
99 | self.assertTrue(lock.locked)
100 | self.assertFalse(lock.locked)
101 |
102 | def test_postgresql_key_over_min(self):
103 | for engine in ENGINES:
104 | if engine.name != "postgresql":
105 | continue
106 | key = -(2**63) - 1
107 | with engine.connect() as conn:
108 | with self.assertRaises(OverflowError):
109 | create_sadlock(conn, key)
110 |
111 | def test_key_wrong_type(self):
112 | for engine in ENGINES:
113 | with engine.connect() as conn:
114 | for k in ((), {}, set(), [], object()):
115 | with self.assertRaises(TypeError):
116 | create_sadlock(conn, k)
117 |
--------------------------------------------------------------------------------
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Python package
5 |
6 | on:
7 | push:
8 | branches: [main]
9 | tags: ["*"]
10 | paths:
11 | - .github/**
12 | - src/**
13 | - tests/**
14 | - pyproject.toml
15 | pull_request:
16 | branches: [main]
17 | paths:
18 | - .github/**
19 | - src/**
20 | - tests/**
21 | - pyproject.toml
22 |
23 | jobs:
24 | get-version:
25 | runs-on: ubuntu-latest
26 | outputs:
27 | version: ${{ steps.get-version.outputs.version }}
28 | steps:
29 | - name: Check PEP-440 style version
30 | id: get-version
31 | run: |
32 | PEP440_VERSION=""
33 | VERSION_PREFIX="v"
34 | BRANCH_OR_TAG="$(echo ${{ github.event.ref }} | cut -d / -f 3)"
35 | if [[ "${BRANCH_OR_TAG}" =~ ^v?(([1-9][0-9]*!)?(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))*(\.?(a|b|rc)(0|[1-9][0-9]*))?(\.post(0|[1-9][0-9]*))?(\.dev(0|[1-9][0-9]*))?)$ ]]
36 | then
37 | PEP440_VERSION="${BRANCH_OR_TAG#$VERSION_PREFIX}"
38 | fi
39 | echo "PEP440_VERSION: ${PEP440_VERSION}"
40 | echo "version=${PEP440_VERSION}" >> $GITHUB_OUTPUT
41 |
42 | test:
43 | runs-on: ubuntu-latest
44 | strategy:
45 | matrix:
46 | python-version: ["3.9", "3.10", "3.11", "3.12", "3.13", "3.14"]
47 | sqlalchemy-requires:
48 | - SQLAlchemy[asyncio]>=1.4.3,<2.0
49 | - SQLAlchemy[asyncio]>=2.0,<3.0
50 |
51 | services:
52 | mysql:
53 | image: mysql
54 | env:
55 | MYSQL_RANDOM_ROOT_PASSWORD: "1"
56 | MYSQL_DATABASE: test
57 | MYSQL_USER: test
58 | MYSQL_PASSWORD: test
59 | options: >-
60 | --health-cmd "mysqladmin ping -h 127.0.0.1 -u $$MYSQL_USER --password=$$MYSQL_PASSWORD"
61 | ports:
62 | - 3306:3306
63 |
64 | postgres:
65 | image: postgres:alpine
66 | env:
67 | POSTGRES_PASSWORD: test
68 | options: >-
69 | --health-cmd "pg_isready -U postgres -h 127.0.0.1"
70 | ports:
71 | - 5432:5432
72 |
73 | steps:
74 | - uses: actions/checkout@v5
75 |
76 | - name: Install uv
77 | uses: astral-sh/setup-uv@v6
78 | with:
79 | enable-cache: true
80 | python-version: ${{ matrix.python-version }}
81 |
82 | - name: Cache of Ruff and MyPY
83 | uses: actions/cache@v4
84 | with:
85 | path: |
86 | .mypy_cache
87 | .ruff_cache
88 | key: cache-mypy-ruff
89 |
90 | - name: Install the project
91 | env:
92 | SETUPTOOLS_SCM_PRETEND_VERSION: "0"
93 | run: |
94 | uv sync --no-dev --group test --group typed --extra mysqlclient --extra aiomysql --extra psycopg2 --extra asyncpg
95 | uv pip install cryptography "${{ matrix.sqlalchemy-requires }}"
96 |
97 | - name: Lint check with ruff
98 | uses: astral-sh/ruff-action@v3
99 |
100 | - name: Static check with mypy
101 | run: uv run --no-dev mypy
102 |
103 | - name: Run tests
104 | shell: bash
105 | env:
106 | TEST_URLS: mysql://test:test@127.0.0.1:3306/test postgresql://postgres:test@127.0.0.1:5432/
107 | TEST_ASYNC_URLS: mysql+aiomysql://test:test@127.0.0.1:3306/test postgresql+asyncpg://postgres:test@127.0.0.1:5432/
108 | run: |
109 | uv run --no-dev coverage run -m unittest -cfv
110 | uv run --no-dev coverage report -m
111 | uv run --no-dev coverage xml
112 |
113 | - name: Upload coverage to Codecov
114 | uses: codecov/codecov-action@v5
115 | with:
116 | token: ${{ secrets.CODECOV_TOKEN }}
117 |
118 | Publish:
119 | runs-on: ubuntu-latest
120 | needs: [get-version, test]
121 | if: needs.get-version.outputs.version != ''
122 | steps:
123 | - name: Checkout
124 | uses: actions/checkout@v5
125 | with:
126 | fetch-depth: 0
127 | - name: Install uv
128 | uses: astral-sh/setup-uv@v6
129 | with:
130 | enable-cache: true
131 | - name: Build
132 | run: uv build
133 | - name: Publish
134 | run: uv publish --token ${{ secrets.PYPI_API_TOKEN }}
135 |
--------------------------------------------------------------------------------
/tests/asyncio/test_key_convert.py:
--------------------------------------------------------------------------------
1 | from multiprocessing import cpu_count
2 | from random import choice
3 | from unittest import IsolatedAsyncioTestCase
4 | from uuid import uuid4
5 | from zlib import crc32
6 |
7 | from sqlalchemy_dlock import create_async_sadlock
8 | from sqlalchemy_dlock.lock.mysql import MYSQL_LOCK_NAME_MAX_LENGTH
9 |
10 | from .engines import create_engines, dispose_engines, get_engines
11 |
12 | CPU_COUNT = cpu_count()
13 |
14 |
15 | class KeyConvertTestCase(IsolatedAsyncioTestCase):
16 | def setUp(self):
17 | create_engines()
18 |
19 | async def asyncTearDown(self):
20 | await dispose_engines()
21 |
22 | async def test_convert(self):
23 | for engine in get_engines():
24 | key = uuid4().hex
25 |
26 | if engine.name == "mysql":
27 |
28 | def _convert(k): # type: ignore
29 | return f"key is {k!r}"
30 |
31 | elif engine.name == "postgresql":
32 |
33 | def _convert(k): # type: ignore
34 | return crc32(str(k).encode())
35 |
36 | else:
37 | raise NotImplementedError()
38 |
39 | async with engine.connect() as conn:
40 | async with create_async_sadlock(conn, key, convert=_convert) as lock:
41 | self.assertTrue(lock.locked)
42 | self.assertFalse(lock.locked)
43 |
44 | async def test_mysql_key_max_length(self):
45 | for engine in get_engines():
46 | if engine.name != "mysql":
47 | continue
48 | key = "".join(choice([chr(n) for n in range(0x20, 0x7F)]) for _ in range(MYSQL_LOCK_NAME_MAX_LENGTH))
49 | async with engine.connect() as conn:
50 | async with create_async_sadlock(conn, key) as lock:
51 | self.assertTrue(lock.locked)
52 | self.assertFalse(lock.locked)
53 |
54 | async def test_mysql_key_gt_max_length(self):
55 | for engine in get_engines():
56 | if engine.name != "mysql":
57 | continue
58 | key = "".join(choice([chr(n) for n in range(0x20, 0x7F)]) for _ in range(MYSQL_LOCK_NAME_MAX_LENGTH + 1))
59 | async with engine.connect() as conn:
60 | with self.assertRaises(ValueError):
61 | create_async_sadlock(conn, key)
62 |
63 | async def test_mysql_key_not_a_string(self):
64 | keys = None, 1, 0, -1, 0.1, True, False, (), [], set(), {}, object()
65 | for engine in get_engines():
66 | if engine.name != "mysql":
67 | continue
68 | async with engine.connect() as conn:
69 | for k in keys:
70 | with self.assertRaises(TypeError):
71 | create_async_sadlock(conn, k, convert=lambda x: x)
72 |
73 | async def test_postgresql_key_max(self):
74 | for engine in get_engines():
75 | if engine.name != "postgresql":
76 | continue
77 | key = 2**63 - 1
78 | async with engine.connect() as conn:
79 | async with create_async_sadlock(conn, key) as lock:
80 | self.assertTrue(lock.locked)
81 | self.assertFalse(lock.locked)
82 |
83 | async def test_postgresql_key_over_max(self):
84 | for engine in get_engines():
85 | if engine.name != "postgresql":
86 | continue
87 | key = 2**63
88 | async with engine.connect() as conn:
89 | with self.assertRaises(OverflowError):
90 | create_async_sadlock(conn, key)
91 |
92 | async def test_postgresql_key_min(self):
93 | for engine in get_engines():
94 | if engine.name != "postgresql":
95 | continue
96 | key = -(2**63)
97 | async with engine.connect() as conn:
98 | async with create_async_sadlock(conn, key) as lock:
99 | self.assertTrue(lock.locked)
100 | self.assertFalse(lock.locked)
101 |
102 | async def test_postgresql_key_over_min(self):
103 | for engine in get_engines():
104 | if engine.name != "postgresql":
105 | continue
106 | key = -(2**63) - 1
107 | async with engine.connect() as conn:
108 | with self.assertRaises(OverflowError):
109 | create_async_sadlock(conn, key)
110 |
111 | async def test_key_wrong_type(self):
112 | for engine in get_engines():
113 | async with engine.connect() as conn:
114 | for k in ((), {}, set(), [], object()):
115 | with self.assertRaises(TypeError):
116 | create_async_sadlock(conn, k)
117 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # CHANGELOG
2 |
3 | ## v0.7.1.dev0
4 |
5 | > 📅 **Date** TBD
6 |
7 | - 🏗️ **Refactor:**
8 | - Refactored lock base classes to reduce code duplication between synchronous and asynchronous implementations
9 | - Extracted common lock state validation logic into base class methods
10 | - Introduced `do_acquire` and `do_release` abstract methods for concrete implementations
11 | - Added `@final` decorator to `acquire`, `release`, and `close` methods in base classes to prevent override while ensuring consistent behavior
12 | - Improved consistency between MySQL and PostgreSQL lock implementations
13 |
14 | ## v0.7.0
15 |
16 | > 📅 **Date** 2025-10-12
17 |
18 | - 🆕 **New Features:**
19 | - Supported Python 3.14
20 | - 💔 **Breaking Changes:**
21 | - Drop support for Python 3.8
22 | - 📦 **Build:**
23 | - Upgrade build backend to `setuptools>=80`
24 | - Remove all `requirements.txt` files
25 |
26 | ## v0.6.1.post2
27 |
28 | > 📅 **Date** 2024-11-29
29 |
30 | - 🐛 Bug-fix:
31 | - Issue #4: PostgreSQL xact lock in context manager produces warning #4
32 | - ✅ Changes:
33 | - `typing-extensions` required for Python earlier than 3.12
34 | - 🖊️ Modifications:
35 | - Add some `override` decorators
36 | - 🎯 CI:
37 | - update pre-commit hooks
38 |
39 | ## v0.6.1
40 |
41 | > 📅 **Date** 2024-4-6
42 |
43 | - ✅ Changes:
44 | - `typing-extensions` required for Python earlier than 3.10
45 |
46 | ## v0.6
47 |
48 | > 📅 **Date** 2024-3-28
49 |
50 | - ❎ Breaking Changes:
51 | - Remove `level` arguments of PostgreSQL lock class' constructor.
52 | `xact` and `shared` arguments were added.
53 | - 🆕 New Features:
54 | - support `transaction` and `shared` advisory lock for PostgreSQL.
55 | - 🐛 Bug fix:
56 | - PostgreSQL transaction level advisory locks are held until the current transaction ends.
57 | Manual release for that is disabled, and a warning message will be printed.
58 | - 🕐 Optimize
59 | - Reduce duplicated codes
60 | - Better unit tests
61 |
62 | ## v0.5.3
63 |
64 | > 📅 **Date** 2024-3-15
65 |
66 | ## v0.5
67 |
68 | Date: 2023-12-06
69 |
70 | - New:
71 | - `contextual_timeout` parameter for “with” statement
72 | - Support Python 3.12
73 |
74 | ## v0.4
75 |
76 | Date: 2023-06-17
77 |
78 | - Remove:
79 | - remove `acquired` property, it's alias of `locked`
80 | - remove setter of `locked` property
81 |
82 | - Optimize:
83 | - re-arrange package's structure
84 | - Many optimizations
85 |
86 | - CI/Test:
87 | - GitHub action: Python 3.8~3.11 x SQLAlchemy 1.x/2.x matrix testing
88 | - Local compose: Python 3.7~3.11 x SQLAlchemy 1.x/2.x matrix testing
89 |
90 | - Doc: Update to Sphinx 7.x, and Furo theme
91 |
92 | ## v0.3.1
93 |
94 | Date: 2023-06-13
95 |
96 | - A hotfix for project's dependencies setup error.
97 |
98 | ## v0.3
99 |
100 | Date: 2023-06-13
101 |
102 | - Remove:
103 | - Python 3.6 support
104 |
105 | - Tests:
106 | - New docker compose based tests, from python 3.7 to 3.11, both SQLAlchemy 1.x and 2.x
107 |
108 | - Docs:
109 | - Update to newer Sphinx docs
110 |
111 | - Build:
112 | - Move all project meta to pyproject.toml, remove setup.cfg and setup.py
113 |
114 | ## v0.2.1
115 |
116 | Date: 2023-02-25
117 |
118 | - New:
119 | - support SQLAlchemy 2.0
120 |
121 | ## v0.2
122 |
123 | Date: 2021-03-23
124 |
125 | First v0.2.x version released.
126 |
127 | ## v0.2b2/b3
128 |
129 | Date: 2021-03-23
130 |
131 | - Add:
132 | - More unit tests
133 | - Optimized CI
134 |
135 | ## v0.2b1
136 |
137 | Date: 2021-03-16
138 |
139 | - Add:
140 |
141 | - New unit tests
142 | - CI by GitHub workflows
143 |
144 | ## v0.2a3
145 |
146 | Date: 2021-03-14
147 |
148 | - Change:
149 |
150 | - Drop Python 3.5 support.
151 | - Remove SQLAlchemy version requires earlier than 1.4 in setup, it's not supported, actually.
152 | - Adjust PostgreSQL lock's constructor arguments order
153 |
154 | - Add:
155 |
156 | - More test cases, and add test/deploy workflow in GitHub actions.
157 | - Add docker-compose test scripts
158 |
159 | ## v0.2a2
160 |
161 | Date: 2021-03-09
162 |
163 | - Change:
164 |
165 | - Rename a lot of function/class:
166 |
167 | - `sadlock` -> `create_sadlock`
168 | - `asyncio.sadlock` -> `asyncio.create_async_sadlock`
169 |
170 | and some other ...
171 |
172 | ## v0.2a1
173 |
174 | Date: 2021-03-08
175 |
176 | - New:
177 |
178 | - Asynchronous IO Support by:
179 |
180 | - [aiomysql](https://github.com/aio-libs/aiomysql) for MySQL
181 |
182 | Connection URL is like: `"mysql+aiomysql://user:password@host:3306/schema?charset=utf8mb4"`
183 |
184 | - [asyncpg](https://github.com/MagicStack/asyncpg) for PostgreSQL
185 |
186 | Connection URL is like: `"PostgreSQL+asyncpg://user:password@host:5432/db"`
187 |
188 | Read for details
189 |
190 | ## v0.1.2
191 |
192 | Date: 2021-01-26
193 |
194 | Still an early version, not for production.
195 |
196 | - Changes:
197 | - Arguments and it's default value of `acquire` now similar to stdlib's `multiprossing.Lock`, instead of `Threading.Lock`
198 | - MySQL lock now accepts float-point value as `timeout`
199 | - Adds
200 | - Several new test cases
201 | - Other
202 | - Many other small adjustment
203 |
204 | ## v0.1.1
205 |
206 | - A very early version, maybe not stable enough.
207 | - Replace `black2b` with crc64-iso in PostgreSQL key convert function
208 | - Only named arguments as extra parameters allowed in Lock's implementation class
209 |
--------------------------------------------------------------------------------
/tests/test_multithread.py:
--------------------------------------------------------------------------------
1 | from contextlib import closing
2 | from threading import Barrier, Thread
3 | from time import sleep, time
4 | from unittest import TestCase
5 | from uuid import uuid4
6 |
7 | from sqlalchemy_dlock import create_sadlock
8 |
9 | from .engines import ENGINES
10 |
11 |
12 | class MultiThreadTestCase(TestCase):
13 | def tearDown(self):
14 | for engine in ENGINES:
15 | engine.dispose()
16 |
17 | def test_non_blocking_success(self):
18 | key = uuid4().hex
19 | for engine in ENGINES:
20 | bar = Barrier(2)
21 |
22 | def fn1(b):
23 | with engine.connect() as conn:
24 | with create_sadlock(conn, key) as lock:
25 | self.assertTrue(lock.locked)
26 | self.assertFalse(lock.locked)
27 | b.wait()
28 |
29 | def fn2(b):
30 | with engine.connect() as conn:
31 | with closing(create_sadlock(conn, key)) as lock:
32 | b.wait()
33 | self.assertTrue(lock.acquire(False))
34 |
35 | trd1 = Thread(target=fn1, args=(bar,))
36 | trd2 = Thread(target=fn2, args=(bar,))
37 |
38 | trd1.start()
39 | trd2.start()
40 |
41 | trd1.join()
42 | trd2.join()
43 |
44 | def test_non_blocking_fail(self):
45 | key = uuid4().hex
46 | delay = 1.0
47 |
48 | for engine in ENGINES:
49 | bar = Barrier(2)
50 |
51 | def fn1(b):
52 | with engine.connect() as conn:
53 | with create_sadlock(conn, key) as lock:
54 | self.assertTrue(lock.locked)
55 | b.wait()
56 | sleep(delay)
57 | self.assertTrue(lock.locked)
58 | self.assertFalse(lock.locked)
59 |
60 | def fn2(b):
61 | with engine.connect() as conn:
62 | with closing(create_sadlock(conn, key)) as lock:
63 | b.wait()
64 | self.assertFalse(lock.acquire(False))
65 |
66 | trd1 = Thread(target=fn1, args=(bar,))
67 | trd2 = Thread(target=fn2, args=(bar,))
68 |
69 | trd1.start()
70 | trd2.start()
71 |
72 | trd1.join()
73 | trd2.join()
74 |
75 | def test_timeout_fail(self):
76 | key = uuid4().hex
77 | delay = 3.0
78 | timeout = 1.0
79 | for engine in ENGINES:
80 | bar = Barrier(2)
81 |
82 | def fn1(b):
83 | with engine.connect() as conn:
84 | with create_sadlock(conn, key) as lock:
85 | self.assertTrue(lock.locked)
86 | b.wait()
87 | self.assertTrue(lock.locked)
88 | sleep(delay)
89 | self.assertTrue(lock.locked)
90 | self.assertFalse(lock.locked)
91 |
92 | def fn2(b):
93 | with engine.connect() as conn:
94 | with closing(create_sadlock(conn, key)) as lock:
95 | b.wait()
96 | ts = time()
97 | self.assertFalse(lock.acquire(timeout=timeout))
98 | self.assertGreaterEqual(time() - ts, timeout)
99 | self.assertFalse(lock.locked)
100 |
101 | trd1 = Thread(target=fn1, args=(bar,))
102 | trd2 = Thread(target=fn2, args=(bar,))
103 |
104 | trd1.start()
105 | trd2.start()
106 |
107 | trd1.join()
108 | trd2.join()
109 |
110 | def test_timeout_success(self):
111 | key = uuid4().hex
112 | delay = 1.0
113 | timeout = 3.0
114 |
115 | for engine in ENGINES:
116 | bar = Barrier(2)
117 |
118 | def fn1(b):
119 | with engine.connect() as conn:
120 | with create_sadlock(conn, key) as lock:
121 | self.assertTrue(lock.locked)
122 | b.wait()
123 | sleep(delay)
124 | self.assertTrue(lock.locked)
125 | self.assertFalse(lock.locked)
126 |
127 | def fn2(b):
128 | with engine.connect() as conn:
129 | with closing(create_sadlock(conn, key)) as lock:
130 | b.wait()
131 | ts = time()
132 | self.assertTrue(lock.acquire(timeout=timeout))
133 | self.assertGreaterEqual(time() - ts, delay)
134 | self.assertGreaterEqual(timeout, time() - ts)
135 | self.assertTrue(lock.locked)
136 |
137 | trd1 = Thread(target=fn1, args=(bar,))
138 | trd2 = Thread(target=fn2, args=(bar,))
139 |
140 | trd1.start()
141 | trd2.start()
142 |
143 | trd1.join()
144 | trd2.join()
145 |
146 | def test_connection_released(self):
147 | key = uuid4().hex
148 |
149 | for engine in ENGINES:
150 |
151 | def fn1():
152 | with engine.connect() as conn:
153 | lock = create_sadlock(conn, key)
154 | self.assertTrue(lock.acquire(False))
155 |
156 | def fn2():
157 | with engine.connect() as conn:
158 | with closing(create_sadlock(conn, key)) as lock:
159 | self.assertTrue(lock.acquire(False))
160 |
161 | trd1 = Thread(target=fn1)
162 | trd2 = Thread(target=fn2)
163 |
164 | trd1.start()
165 | trd1.join()
166 |
167 | trd2.start()
168 | trd2.join()
169 |
--------------------------------------------------------------------------------
/tests/test_multiprocess.py:
--------------------------------------------------------------------------------
1 | from contextlib import closing
2 | from multiprocessing import Barrier, Process
3 | from time import sleep, time
4 | from unittest import TestCase
5 | from uuid import uuid4
6 |
7 | from sqlalchemy import create_engine
8 |
9 | from sqlalchemy_dlock import create_sadlock
10 |
11 | from .engines import URLS
12 |
13 |
14 | class MpNonBlockingSuccessTestCase(TestCase):
15 | @staticmethod
16 | def fn1(url, k, b):
17 | engine = create_engine(url)
18 | with engine.connect() as conn:
19 | with create_sadlock(conn, k) as lock:
20 | assert lock.locked
21 | assert not lock.locked
22 | b.wait()
23 |
24 | @staticmethod
25 | def fn2(url, k, b):
26 | engine = create_engine(url)
27 | with engine.connect() as conn:
28 | with closing(create_sadlock(conn, k)) as lock:
29 | b.wait()
30 | assert lock.acquire(False)
31 |
32 | def test(self):
33 | key = uuid4().hex
34 | for url in URLS:
35 | bar = Barrier(2)
36 |
37 | p1 = Process(target=self.__class__.fn1, args=(url, key, bar))
38 | p2 = Process(target=self.__class__.fn2, args=(url, key, bar))
39 |
40 | p1.start()
41 | p2.start()
42 |
43 | p1.join()
44 | p2.join()
45 |
46 | self.assertEqual(p1.exitcode, 0)
47 | self.assertEqual(p2.exitcode, 0)
48 |
49 |
50 | class MpNonBlockingFailTestCase(TestCase):
51 | @staticmethod
52 | def fn1(url, k, b, delay):
53 | engine = create_engine(url)
54 | with engine.connect() as conn:
55 | with create_sadlock(conn, k) as lock:
56 | assert lock.locked
57 | b.wait()
58 | sleep(delay)
59 | assert lock.locked
60 | assert not lock.locked
61 |
62 | @staticmethod
63 | def fn2(url, k, b):
64 | engine = create_engine(url)
65 | with engine.connect() as conn:
66 | with closing(create_sadlock(conn, k)) as lock:
67 | b.wait()
68 | assert not lock.acquire(False)
69 |
70 | def test(self):
71 | key = uuid4().hex
72 | delay = 1.0
73 | cls = self.__class__
74 | for url in URLS:
75 | bar = Barrier(2)
76 |
77 | p1 = Process(target=cls.fn1, args=(url, key, bar, delay))
78 | p2 = Process(target=cls.fn2, args=(url, key, bar))
79 |
80 | p1.start()
81 | p2.start()
82 |
83 | p1.join()
84 | p2.join()
85 |
86 | self.assertEqual(p1.exitcode, 0)
87 | self.assertEqual(p2.exitcode, 0)
88 |
89 |
90 | class MpTimeoutSuccessTestCase(TestCase):
91 | @staticmethod
92 | def fn1(url, k, b, delay):
93 | engine = create_engine(url)
94 | with engine.connect() as conn:
95 | with create_sadlock(conn, k) as lock:
96 | assert lock.locked
97 | b.wait()
98 | sleep(delay)
99 | assert lock.locked
100 | assert not lock.locked
101 |
102 | @staticmethod
103 | def fn2(url, k, b, delay, timeout):
104 | engine = create_engine(url)
105 | with engine.connect() as conn:
106 | with closing(create_sadlock(conn, k)) as lock:
107 | b.wait()
108 | ts = time()
109 | assert lock.acquire(timeout=timeout)
110 | assert time() - ts >= delay
111 | assert timeout >= time() - ts
112 | assert lock.locked
113 |
114 | def test(self):
115 | key = uuid4().hex
116 | delay = 1.0
117 | timeout = 3.0
118 | cls = self.__class__
119 |
120 | for url in URLS:
121 | bar = Barrier(2)
122 |
123 | p1 = Process(target=cls.fn1, args=(url, key, bar, delay))
124 | p2 = Process(target=cls.fn2, args=(url, key, bar, delay, timeout))
125 |
126 | p1.start()
127 | p2.start()
128 |
129 | p1.join()
130 | p2.join()
131 |
132 | self.assertEqual(p1.exitcode, 0)
133 | self.assertEqual(p2.exitcode, 0)
134 |
135 |
136 | class MpTimeoutFailTestCase(TestCase):
137 | @staticmethod
138 | def fn1(url, k, b, delay):
139 | engine = create_engine(url)
140 | with engine.connect() as conn:
141 | with create_sadlock(conn, k) as lock:
142 | assert lock.locked
143 | b.wait()
144 | sleep(delay)
145 | assert lock.locked
146 | assert not lock.locked
147 |
148 | @staticmethod
149 | def fn2(url, k, b, timeout):
150 | engine = create_engine(url)
151 | with engine.connect() as conn:
152 | with closing(create_sadlock(conn, k)) as lock:
153 | b.wait()
154 | ts = time()
155 | assert not lock.acquire(timeout=timeout)
156 | assert round(time() - ts) >= timeout
157 | assert not lock.locked
158 |
159 | def test(self):
160 | cls = self.__class__
161 | key = uuid4().hex
162 | delay = 3.0
163 | timeout = 1.0
164 |
165 | for url in URLS:
166 | bar = Barrier(2)
167 |
168 | p1 = Process(target=cls.fn1, args=(url, key, bar, delay))
169 | p2 = Process(target=cls.fn2, args=(url, key, bar, timeout))
170 |
171 | p1.start()
172 | p2.start()
173 |
174 | p1.join()
175 | p2.join()
176 |
177 | self.assertEqual(p1.exitcode, 0)
178 | self.assertEqual(p2.exitcode, 0)
179 |
180 |
181 | class MpReleaseOmittedTestCase(TestCase):
182 | @staticmethod
183 | def fn1(url, k):
184 | engine = create_engine(url)
185 | lock = create_sadlock(engine.connect(), k)
186 | assert lock.acquire(False)
187 |
188 | @staticmethod
189 | def fn2(url, k):
190 | engine = create_engine(url)
191 | with engine.connect() as conn:
192 | with closing(create_sadlock(conn, k)) as lock:
193 | assert lock.acquire(False)
194 |
195 | def test(self):
196 | cls = self.__class__
197 | key = uuid4().hex
198 |
199 | for url in URLS:
200 | p1 = Process(target=cls.fn1, args=(url, key))
201 | p2 = Process(target=cls.fn2, args=(url, key))
202 |
203 | p1.start()
204 | p1.join()
205 |
206 | p2.start()
207 | p2.join()
208 |
209 | self.assertEqual(p1.exitcode, 0)
210 | self.assertEqual(p2.exitcode, 0)
211 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # sqlalchemy-dlock
2 |
3 | [](https://github.com/tanbro/sqlalchemy-dlock/actions/workflows/python-package.yml)
4 | [](https://pypi.org/project/sqlalchemy-dlock/)
5 | [](https://sqlalchemy-dlock.readthedocs.io/en/latest/)
6 | [](https://codecov.io/gh/tanbro/sqlalchemy-dlock)
7 |
8 | `sqlalchemy-dlock` is a distributed-lock library based on Database and [SQLAlchemy][].
9 |
10 | It currently supports below locks:
11 |
12 | | Database | Lock |
13 | |------------|-----------------------------------------------------------------------------------------------|
14 | | MySQL | [named lock](https://dev.mysql.com/doc/refman/en/locking-functions.html) |
15 | | PostgreSQL | [advisory lock](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) |
16 |
17 | ## Install
18 |
19 | ```bash
20 | pip install sqlalchemy-dlock
21 | ```
22 |
23 | ## Usage
24 |
25 | - Work with [SQLAlchemy][] [`Connection`](https://docs.sqlalchemy.org/20/core/connections.html):
26 |
27 | ```python
28 | from sqlalchemy import create_engine
29 | from sqlalchemy_dlock import create_sadlock
30 |
31 | key = 'user/001'
32 |
33 | engine = create_engine('postgresql://scott:tiger@127.0.0.1/')
34 | conn = engine.connect()
35 |
36 | # Create the D-Lock on the connection
37 | lock = create_sadlock(conn, key)
38 |
39 | # it's not lock when constructed
40 | assert not lock.locked
41 |
42 | # lock
43 | lock.acquire()
44 | assert lock.locked
45 |
46 | # un-lock
47 | lock.release()
48 | assert not lock.locked
49 | ```
50 |
51 | - `with` statement
52 |
53 | ```python
54 | from contextlib import closing
55 |
56 | from sqlalchemy import create_engine
57 | from sqlalchemy_dlock import create_sadlock
58 |
59 | key = 'user/001'
60 |
61 | engine = create_engine('postgresql://scott:tiger@127.0.0.1/')
62 | with engine.connect() as conn:
63 |
64 | # Create the D-Lock on the connection
65 | with create_sadlock(conn, key) as lock:
66 | # It's locked
67 | assert lock.locked
68 |
69 | # Auto un-locked
70 | assert not lock.locked
71 |
72 | # If do not want to be locked in `with`, a `closing` wrapper may help
73 | with closing(create_sadlock(conn, key)) as lock2:
74 | # It's NOT locked here !!!
75 | assert not lock2.locked
76 | # lock it now:
77 | lock2.acquire()
78 | assert lock2.locked
79 |
80 | # Auto un-locked
81 | assert not lock2.locked
82 | ```
83 |
84 | - Work with [SQLAlchemy][] [`ORM` `Session`](https://docs.sqlalchemy.org/en/20/orm/session.html):
85 |
86 | ```python
87 | from sqlalchemy import create_engine
88 | from sqlalchemy.orm import sessionmaker
89 | from sqlalchemy_dlock import create_sadlock
90 |
91 | key = 'user/001'
92 |
93 | engine = create_engine('postgresql://scott:tiger@127.0.0.1/')
94 | Session = sessionmaker(bind=engine)
95 |
96 | with Session() as session:
97 | with create_sadlock(session, key) as lock:
98 | assert lock.locked
99 | assert not lock.locked
100 | ```
101 |
102 | - Asynchronous I/O Support
103 |
104 | > 💡 **TIP**
105 | >
106 | > - [SQLAlchemy][] `1.x`'s asynchronous I/O:
107 | > - [SQLAlchemy][] `2.x`'s asynchronous I/O:
108 |
109 | ```python
110 | from sqlalchemy.ext.asyncio import create_async_engine
111 | from sqlalchemy_dlock import create_async_sadlock
112 |
113 | key = 'user/001'
114 |
115 | engine = create_async_engine('postgresql+asyncpg://scott:tiger@127.0.0.1/')
116 |
117 | async with engine.connect() as conn:
118 | async with create_async_sadlock(conn, key) as lock:
119 | assert lock.locked
120 | await lock.release()
121 | assert not lock.locked
122 | await lock.acquire()
123 | assert not lock.locked
124 | ```
125 |
126 | > ℹ️ **NOTE** \
127 | > [aiomysql][], [asyncpg][] and [psycopg][] are tested asynchronous drivers.
128 |
129 | ## Test
130 |
131 | Following drivers are tested:
132 |
133 | - MySQL:
134 | - [mysqlclient][] (synchronous)
135 | - [pymysql][] (synchronous)
136 | - [aiomysql][] (asynchronous)
137 | - Postgres:
138 | - [psycopg2][] (synchronous)
139 | - [asyncpg][] (asynchronous)
140 | - [psycopg][] (synchronous and asynchronous)
141 |
142 | You can run unit-tests
143 |
144 | - on local environment:
145 |
146 | 1. Install the project in editable mode with `asyncio` optional dependencies, and libraries/drivers needed in test. A virtual environment ([venv][]) is strongly advised:
147 |
148 | ```bash
149 | pip install -e . --group dev
150 | ```
151 |
152 | 2. start up mysql and postgresql service
153 |
154 | There is a docker [compose][] file `db.docker-compose.yml` in project's top directory,
155 | which can be used to run mysql and postgresql develop environment conveniently:
156 |
157 | ```bash
158 | docker compose -f db.docker-compose.yml up
159 | ```
160 |
161 | 3. set environment variables `TEST_URLS` and `TEST_ASYNC_URLS` for sync and async database connection url.
162 | Multiple connections separated by space.
163 |
164 | eg: (following values are also the defaults, and can be omitted)
165 |
166 | ```ini
167 | TEST_URLS=mysql://test:test@127.0.0.1/test postgresql://postgres:test@127.0.0.1/
168 | TEST_ASYNC_URLS=mysql+aiomysql://test:test@127.0.0.1/test postgresql+asyncpg://postgres:test@127.0.0.1/
169 | ```
170 |
171 | > ℹ️ **NOTE** \
172 | > The test cases would load environment variables from dot-env file `tests/.env`.
173 |
174 | 4. run unit-test
175 |
176 | ```bash
177 | python -m unittest
178 | ```
179 |
180 | - or on docker [compose][]:
181 |
182 | `tests/docker-compose.yml` defines a Python and [SQLAlchemy][] version matrix -- it combines multiple Python versions and [SQLAlchemy][] `v1`/`v2` for test cases. We can run it by:
183 |
184 | ```bash
185 | cd tests
186 | docker compose up --abort-on-container-exit
187 | ```
188 |
189 | [SQLAlchemy]: https://www.sqlalchemy.org/ "The Python SQL Toolkit and Object Relational Mapper"
190 | [venv]: https://docs.python.org/library/venv.html "The venv module supports creating lightweight “virtual environments”, each with their own independent set of Python packages installed in their site directories. "
191 | [mysqlclient]: https://pypi.org/project/mysqlclient/ "Python interface to MySQL"
192 | [psycopg2]: https://pypi.org/project/psycopg2/ "PostgreSQL database adapter for Python"
193 | [psycopg]: https://pypi.org/project/psycopg/ "Psycopg 3 is a modern implementation of a PostgreSQL adapter for Python."
194 | [aiomysql]: https://pypi.org/project/aiomysql/ "aiomysql is a “driver” for accessing a MySQL database from the asyncio (PEP-3156/tulip) framework."
195 | [asyncpg]: https://pypi.org/project/asyncpg/ "asyncpg is a database interface library designed specifically for PostgreSQL and Python/asyncio. "
196 | [pymysql]: https://pypi.org/project/pymysql/ "Pure Python MySQL Driver"
197 | [compose]: https://docs.docker.com/compose/ "Compose is a tool for defining and running multi-container Docker applications."
198 |
--------------------------------------------------------------------------------
/tests/test_basic.py:
--------------------------------------------------------------------------------
1 | from contextlib import ExitStack, closing
2 | from multiprocessing import cpu_count
3 | from random import randint
4 | from secrets import token_bytes, token_hex
5 | from unittest import TestCase
6 | from uuid import uuid4
7 |
8 | from sqlalchemy_dlock import create_sadlock
9 |
10 | from .engines import ENGINES
11 |
12 | CPU_COUNT = cpu_count()
13 |
14 |
15 | class BasicTestCase(TestCase):
16 | def tearDown(self):
17 | for engine in ENGINES:
18 | engine.dispose()
19 |
20 | def test_enter_exit(self):
21 | for engine in ENGINES:
22 | key = uuid4().hex
23 | with engine.connect() as conn:
24 | lock = create_sadlock(conn, key)
25 | self.assertFalse(lock.locked)
26 | lock.acquire()
27 | self.assertTrue(lock.locked)
28 | lock.release()
29 | self.assertFalse(lock.locked)
30 |
31 | def test_with_statement(self):
32 | for engine in ENGINES:
33 | key = uuid4().hex
34 | with engine.connect() as conn:
35 | with create_sadlock(conn, key) as lock:
36 | self.assertTrue(lock.locked)
37 | self.assertFalse(lock.locked)
38 |
39 | def test_timeout_with_statement(self):
40 | for engine in ENGINES:
41 | key = uuid4().hex
42 | with ExitStack() as stack:
43 | conn0, conn1 = [stack.enter_context(engine.connect()) for _ in range(2)]
44 | lock0 = create_sadlock(conn0, key)
45 | self.assertTrue(lock0.acquire(False))
46 | with self.assertRaises(TimeoutError):
47 | with create_sadlock(conn1, key, contextual_timeout=1):
48 | pass
49 | lock0.release()
50 | self.assertFalse(lock0.locked)
51 |
52 | def test_many_str_key(self):
53 | for engine in ENGINES:
54 | for _ in range(100):
55 | with engine.connect() as conn:
56 | key = uuid4().hex + uuid4().hex
57 | with create_sadlock(conn, key) as lock:
58 | self.assertTrue(lock.locked)
59 | self.assertFalse(lock.locked)
60 |
61 | def test_many_int_key(self):
62 | for engine in ENGINES:
63 | for _ in range(100):
64 | with engine.connect() as conn:
65 | key = randint(-0x8000_0000_0000_0000, 0x7FFF_FFFF_FFFF_FFFF)
66 | with create_sadlock(conn, key) as lock:
67 | self.assertTrue(lock.locked)
68 | self.assertFalse(lock.locked)
69 |
70 | def test_many_bytes_key(self):
71 | for engine in ENGINES:
72 | for _ in range(100):
73 | with engine.connect() as conn:
74 | if engine.name == "mysql":
75 | key = token_hex().encode()
76 | elif engine.name == "postgresql":
77 | key = token_bytes()
78 | else:
79 | raise NotImplementedError()
80 | with create_sadlock(conn, key) as lock:
81 | self.assertTrue(lock.locked)
82 | self.assertFalse(lock.locked)
83 |
84 | def test_closing(self):
85 | for engine in ENGINES:
86 | key = uuid4().hex
87 | with engine.connect() as conn:
88 | with closing(create_sadlock(conn, key)) as lock:
89 | self.assertFalse(lock.locked)
90 | self.assertTrue(lock.acquire())
91 | self.assertTrue(lock.locked)
92 | self.assertFalse(lock.locked)
93 |
94 | def test_no_blocking(self):
95 | for engine in ENGINES:
96 | key = uuid4().hex
97 | with engine.connect() as conn:
98 | with closing(create_sadlock(conn, key)) as lock:
99 | self.assertFalse(lock.locked)
100 | acquired = lock.acquire(False)
101 | self.assertTrue(acquired)
102 | self.assertTrue(lock.locked)
103 | self.assertFalse(lock.locked)
104 |
105 | def test_invoke_locked_lock(self):
106 | for engine in ENGINES:
107 | key = uuid4().hex
108 | with engine.connect() as conn:
109 | with create_sadlock(conn, key) as lock:
110 | self.assertTrue(lock.locked)
111 | self.assertRaisesRegex(ValueError, "invoked on a locked lock", lock.acquire)
112 | self.assertFalse(lock.locked)
113 |
114 | def test_invoke_unlocked_lock(self):
115 | for engine in ENGINES:
116 | key = uuid4().hex
117 | with engine.connect() as conn:
118 | with closing(create_sadlock(conn, key)) as lock:
119 | self.assertFalse(lock.locked)
120 | self.assertRaisesRegex(ValueError, "invoked on an unlocked lock", lock.release)
121 | self.assertFalse(lock.locked)
122 |
123 | def test_timeout_positive(self):
124 | for engine in ENGINES:
125 | key = uuid4().hex
126 | for _ in range(CPU_COUNT + 1):
127 | with engine.connect() as conn:
128 | with closing(create_sadlock(conn, key)) as lock:
129 | self.assertTrue(lock.acquire(timeout=randint(1, 1024)))
130 | self.assertFalse(lock.locked)
131 |
132 | def test_timeout_zero(self):
133 | for engine in ENGINES:
134 | key = uuid4().hex
135 | with engine.connect() as conn:
136 | with closing(create_sadlock(conn, key)) as lock:
137 | self.assertTrue(lock.acquire(timeout=0))
138 | self.assertFalse(lock.locked)
139 |
140 | def test_timeout_negative(self):
141 | for engine in ENGINES:
142 | key = uuid4().hex
143 | for _ in range(CPU_COUNT + 1):
144 | with engine.connect() as conn:
145 | with closing(create_sadlock(conn, key)) as lock:
146 | self.assertTrue(lock.acquire(timeout=-1 * randint(1, 1024)))
147 | self.assertFalse(lock.locked)
148 |
149 | def test_timeout_none(self):
150 | for engine in ENGINES:
151 | key = uuid4().hex
152 | for i in range(CPU_COUNT + 1):
153 | with engine.connect() as conn:
154 | with closing(create_sadlock(conn, key)) as lock:
155 | self.assertTrue(lock.acquire(timeout=None))
156 | self.assertFalse(lock.locked)
157 |
158 | def test_enter_locked(self):
159 | for engine in ENGINES:
160 | key = uuid4().hex
161 | with ExitStack() as stack:
162 | conn0, conn1 = [stack.enter_context(engine.connect()) for _ in range(2)]
163 | lock0 = create_sadlock(conn0, key)
164 | self.assertTrue(lock0.acquire(False))
165 | lock1 = create_sadlock(conn1, key)
166 | self.assertFalse(lock1.acquire(False))
167 | lock0.release()
168 | self.assertFalse(lock0.locked)
169 | self.assertTrue(lock1.acquire(False))
170 | lock1.release()
171 | self.assertFalse(lock1.locked)
172 |
173 | def test_release_unlocked_error(self):
174 | for engine in ENGINES:
175 | key = uuid4().hex
176 | with ExitStack() as stack:
177 | conn0, conn1 = [stack.enter_context(engine.connect()) for _ in range(2)]
178 | lock0 = create_sadlock(conn0, key)
179 | self.assertTrue(lock0.acquire(False))
180 | lock1 = create_sadlock(conn1, key)
181 | with self.assertRaisesRegex(ValueError, "invoked on an unlocked lock"):
182 | lock1.release()
183 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/lock/mysql.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from typing import Any, Callable, Optional, TypeVar, Union
3 |
4 | if sys.version_info < (3, 12): # pragma: no cover
5 | from typing_extensions import override
6 | else: # pragma: no cover
7 | from typing import override
8 |
9 | from ..exceptions import SqlAlchemyDLockDatabaseError
10 | from ..statement.mysql import LOCK, UNLOCK
11 | from ..typing import AsyncConnectionOrSessionT, ConnectionOrSessionT
12 | from .base import AbstractLockMixin, BaseAsyncSadLock, BaseSadLock
13 |
14 | MYSQL_LOCK_NAME_MAX_LENGTH = 64
15 |
16 | ConvertibleKT = Union[bytes, bytearray, memoryview, str, int, float]
17 | KT = Any
18 | KTV = TypeVar("KTV", bound=KT)
19 |
20 |
21 | class MysqlSadLockMixin(AbstractLockMixin[KTV, str]):
22 | """A Mix-in class for MySQL named lock"""
23 |
24 | @override
25 | def __init__(self, *, key: KTV, convert: Optional[Callable[[KTV], str]] = None, **kwargs):
26 | """
27 | Args:
28 | key: MySQL named lock requires the key given by string.
29 |
30 | If ``key`` is not a :class:`str`:
31 |
32 | - When :class:`bytes` or alike, the constructor tries to decode it with default encoding::
33 |
34 | key = key.decode()
35 |
36 | - Otherwise the constructor force convert it to :class:`str`::
37 |
38 | key = str(key)
39 |
40 | - Or you can specify a ``convert`` function to that argument
41 |
42 | convert: Custom function to covert ``key`` to required data type.
43 |
44 | Example:
45 | ::
46 |
47 | def convert(value) -> str:
48 | # get a string key by `value`
49 | return the_string_covert_from_value
50 | """
51 | if convert:
52 | self._actual_key = convert(key)
53 | else:
54 | self._actual_key = self.convert(key)
55 | if not isinstance(self._actual_key, str):
56 | raise TypeError("MySQL named lock requires the key given by string")
57 | if len(self._actual_key) > MYSQL_LOCK_NAME_MAX_LENGTH:
58 | raise ValueError(f"MySQL enforces a maximum length on lock names of {MYSQL_LOCK_NAME_MAX_LENGTH} characters.")
59 |
60 | @override
61 | def get_actual_key(self) -> str:
62 | """The actual key used in MySQL named lock"""
63 | return self._actual_key
64 |
65 | @classmethod
66 | def convert(cls, k: ConvertibleKT) -> str:
67 | """The default key converter for MySQL named lock"""
68 | if isinstance(k, str):
69 | return k
70 | if isinstance(k, (int, float)):
71 | return str(k)
72 | if isinstance(k, (bytes, bytearray)):
73 | return k.decode()
74 | if isinstance(k, memoryview):
75 | return k.tobytes().decode()
76 | raise TypeError(type(k).__name__)
77 |
78 |
79 | class MysqlSadLock(MysqlSadLockMixin, BaseSadLock[str, ConnectionOrSessionT]):
80 | """A distributed lock implemented by MySQL named-lock
81 |
82 | See Also:
83 | https://dev.mysql.com/doc/refman/8.0/en/locking-functions.html
84 |
85 | Caution:
86 | To MySQL locking function, it is even possible for a given session to acquire multiple locks for the same name.
87 | Other sessions cannot acquire a lock with that name until the acquiring session releases all its locks for the name.
88 | When perform multiple :meth:`.acquire` for a key on the **same** SQLAlchemy connection, latter :meth:`.acquire` will success immediately no wait and never block, it causes cascade lock instead!
89 | """ # noqa: E501
90 |
91 | @override
92 | def __init__(self, connection_or_session: ConnectionOrSessionT, key: KT, **kwargs):
93 | """
94 | Args:
95 | connection_or_session: :attr:`.BaseSadLock.connection_or_session`
96 | key: :attr:`.BaseSadLock.key`
97 | **kwargs: other named parameters pass to :class:`.BaseSadLock` and :class:`.MysqlSadLockMixin`
98 | """
99 | MysqlSadLockMixin.__init__(self, key=key, **kwargs)
100 | BaseSadLock.__init__(self, connection_or_session, self.actual_key, **kwargs)
101 |
102 | @override
103 | def do_acquire(self, block: bool = True, timeout: Union[float, int, None] = None, *args, **kwargs) -> bool:
104 | if block:
105 | # None: set the timeout period to infinite.
106 | if timeout is None:
107 | timeout = -1
108 | # negative value for `timeout` are equivalent to a `timeout` of zero
109 | elif timeout < 0:
110 | timeout = 0
111 | else:
112 | timeout = 0
113 | stmt = LOCK.params(str=self.key, timeout=timeout)
114 | ret_val = self.connection_or_session.execute(stmt).scalar_one()
115 | if ret_val == 1:
116 | return True
117 | elif ret_val == 0:
118 | return False # 直到超时也没有成功锁定
119 | elif ret_val is None: # pragma: no cover
120 | raise SqlAlchemyDLockDatabaseError(f"An error occurred while attempting to obtain the lock {self.key!r}")
121 | else: # pragma: no cover
122 | raise SqlAlchemyDLockDatabaseError(f"GET_LOCK({self.key!r}, {timeout}) returns {ret_val}")
123 |
124 | @override
125 | def do_release(self):
126 | stmt = UNLOCK.params(str=self.key)
127 | ret_val = self.connection_or_session.execute(stmt).scalar_one()
128 | if ret_val == 1:
129 | return
130 | elif ret_val == 0:
131 | raise SqlAlchemyDLockDatabaseError(
132 | f"The named lock {self.key!r} was not established by this thread, and the lock is not released."
133 | )
134 | elif ret_val is None:
135 | raise SqlAlchemyDLockDatabaseError(
136 | f"The named lock {self.key!r} did not exist, "
137 | "was never obtained by a call to GET_LOCK(), "
138 | "or has previously been released."
139 | )
140 | else: # pragma: no cover
141 | raise SqlAlchemyDLockDatabaseError(f"RELEASE_LOCK({self.key!r}) returns {ret_val}")
142 |
143 |
144 | class MysqlAsyncSadLock(MysqlSadLockMixin, BaseAsyncSadLock[str, AsyncConnectionOrSessionT]):
145 | """Async IO version of :class:`MysqlSadLock`"""
146 |
147 | @override
148 | def __init__(self, connection_or_session: AsyncConnectionOrSessionT, key: KT, **kwargs):
149 | MysqlSadLockMixin.__init__(self, key=key, **kwargs)
150 | BaseAsyncSadLock.__init__(self, connection_or_session, self.actual_key, **kwargs)
151 |
152 | @override
153 | async def do_acquire(self, block: bool = True, timeout: Union[float, int, None] = None, *args, **kwargs) -> bool:
154 | if block:
155 | # None: set the timeout period to infinite.
156 | if timeout is None:
157 | timeout = -1
158 | # negative value for `timeout` are equivalent to a `timeout` of zero
159 | elif timeout < 0:
160 | timeout = 0
161 | else:
162 | timeout = 0
163 | stmt = LOCK.params(str=self.key, timeout=timeout)
164 | ret_val = (await self.connection_or_session.execute(stmt)).scalar_one()
165 | if ret_val == 1:
166 | return True
167 | elif ret_val == 0:
168 | return False # 直到超时也没有成功锁定
169 | elif ret_val is None: # pragma: no cover
170 | raise SqlAlchemyDLockDatabaseError(f"An error occurred while attempting to obtain the lock {self.key!r}")
171 | else: # pragma: no cover
172 | raise SqlAlchemyDLockDatabaseError(f"GET_LOCK({self.key!r}, {timeout}) returns {ret_val}")
173 |
174 | @override
175 | async def do_release(self):
176 | stmt = UNLOCK.params(str=self.key)
177 | ret_val = (await self.connection_or_session.execute(stmt)).scalar_one()
178 | if ret_val == 1:
179 | return
180 | elif ret_val == 0:
181 | raise SqlAlchemyDLockDatabaseError(
182 | f"The named lock {self.key!r} was not established by this thread, and the lock is not released."
183 | )
184 | elif ret_val is None:
185 | raise SqlAlchemyDLockDatabaseError(
186 | f"The named lock {self.key!r} did not exist, "
187 | "was never obtained by a call to GET_LOCK(), "
188 | "or has previously been released."
189 | )
190 | else: # pragma: no cover
191 | raise SqlAlchemyDLockDatabaseError(f"RELEASE_LOCK({self.key!r}) returns {ret_val}")
192 |
--------------------------------------------------------------------------------
/tests/asyncio/test_basic.py:
--------------------------------------------------------------------------------
1 | from contextlib import AsyncExitStack
2 | from multiprocessing import cpu_count
3 | from random import randint
4 | from secrets import token_bytes, token_hex
5 | from unittest import IsolatedAsyncioTestCase
6 | from uuid import uuid4
7 |
8 | from sqlalchemy_dlock import create_async_sadlock
9 |
10 | from .engines import create_engines, dispose_engines, get_engines
11 |
12 | CPU_COUNT = cpu_count()
13 |
14 |
15 | class BasicTestCase(IsolatedAsyncioTestCase):
16 | def setUp(self):
17 | create_engines()
18 |
19 | async def asyncTearDown(self):
20 | await dispose_engines()
21 |
22 | async def test_enter_exit(self):
23 | for engine in get_engines():
24 | key = uuid4().hex
25 | async with engine.connect() as conn:
26 | assert conn is not None
27 | lock = create_async_sadlock(conn, key)
28 | self.assertFalse(lock.locked)
29 | await lock.acquire()
30 | self.assertTrue(lock.locked)
31 | await lock.release()
32 | self.assertFalse(lock.locked)
33 |
34 | async def test_with_statement(self):
35 | for engine in get_engines():
36 | async with engine.connect() as conn:
37 | assert conn is not None
38 | key = uuid4().hex
39 | async with create_async_sadlock(conn, key) as lock:
40 | self.assertTrue(lock.locked)
41 | self.assertFalse(lock.locked)
42 |
43 | async def test_timeout_in_with_statement(self):
44 | for engine in get_engines():
45 | async with AsyncExitStack() as stack:
46 | conn0, conn1 = [await stack.enter_async_context(engine.connect()) for _ in range(2)]
47 | key = uuid4().hex
48 | lock0 = create_async_sadlock(conn0, key)
49 | self.assertFalse(lock0.locked)
50 | r = await lock0.acquire(False)
51 | self.assertTrue(r)
52 | self.assertTrue(lock0.locked)
53 | with self.assertRaises(TimeoutError):
54 | async with create_async_sadlock(conn1, key, contextual_timeout=1):
55 | pass
56 | self.assertTrue(lock0.locked)
57 | await lock0.release()
58 | self.assertFalse(lock0.locked)
59 |
60 | async def test_many_str_key(self):
61 | for engine in get_engines():
62 | async with engine.connect() as conn:
63 | assert conn is not None
64 | for _ in range(100):
65 | key = uuid4().hex + uuid4().hex
66 | async with create_async_sadlock(conn, key) as lock:
67 | self.assertTrue(lock.locked)
68 | self.assertFalse(lock.locked)
69 |
70 | async def test_many_int_key(self):
71 | for engine in get_engines():
72 | async with engine.connect() as conn:
73 | assert conn is not None
74 | for _ in range(100):
75 | key = randint(-0x8000_0000_0000_0000, 0x7FFF_FFFF_FFFF_FFFF)
76 | async with create_async_sadlock(conn, key) as lock:
77 | self.assertTrue(lock.locked)
78 | self.assertFalse(lock.locked)
79 |
80 | async def test_many_bytes_key(self):
81 | for engine in get_engines():
82 | for _ in range(100):
83 | async with engine.connect() as conn:
84 | if engine.name == "mysql":
85 | key = token_hex().encode()
86 | elif engine.name == "postgresql":
87 | key = token_bytes()
88 | else:
89 | raise NotImplementedError()
90 | async with create_async_sadlock(conn, key) as lock:
91 | self.assertTrue(lock.locked)
92 | self.assertFalse(lock.locked)
93 |
94 | async def test_invoke_locked_lock(self):
95 | for engine in get_engines():
96 | async with engine.connect() as conn:
97 | assert conn is not None
98 | key = uuid4().hex
99 | async with create_async_sadlock(conn, key) as lock:
100 | self.assertTrue(lock.locked)
101 | with self.assertRaisesRegex(ValueError, "invoked on a locked lock"):
102 | await lock.acquire()
103 | self.assertFalse(lock.locked)
104 |
105 | async def test_invoke_unlocked_lock(self):
106 | for engine in get_engines():
107 | async with engine.connect() as conn:
108 | assert conn is not None
109 | key = uuid4().hex
110 | lock = create_async_sadlock(conn, key)
111 | self.assertFalse(lock.locked)
112 | with self.assertRaisesRegex(ValueError, "invoked on an unlocked lock"):
113 | await lock.release()
114 | self.assertFalse(lock.locked)
115 |
116 | async def test_timeout_positive(self):
117 | for engine in get_engines():
118 | key = uuid4().hex
119 | for _ in range(CPU_COUNT + 1):
120 | async with engine.connect() as conn:
121 | assert conn is not None
122 | lock = create_async_sadlock(conn, key)
123 | try:
124 | self.assertFalse(lock.locked)
125 | r = await lock.acquire(timeout=randint(1, 1024))
126 | self.assertTrue(r)
127 | self.assertTrue(lock.locked)
128 | finally:
129 | await lock.release()
130 | self.assertFalse(lock.locked)
131 |
132 | async def test_timeout_zero(self):
133 | for engine in get_engines():
134 | async with engine.connect() as conn:
135 | assert conn is not None
136 | key = uuid4().hex
137 | lock = create_async_sadlock(conn, key)
138 | try:
139 | self.assertFalse(lock.locked)
140 | r = await lock.acquire(timeout=0)
141 | self.assertTrue(r)
142 | self.assertTrue(lock.locked)
143 | finally:
144 | await lock.release()
145 | self.assertFalse(lock.locked)
146 |
147 | async def test_timeout_negative(self):
148 | for engine in get_engines():
149 | for _ in range(CPU_COUNT + 1):
150 | async with engine.connect() as conn:
151 | assert conn is not None
152 | key = uuid4().hex
153 | lock = create_async_sadlock(conn, key)
154 | try:
155 | r = await lock.acquire(timeout=-1 * randint(1, 1024))
156 | self.assertTrue(r)
157 | finally:
158 | await lock.release()
159 | self.assertFalse(lock.locked)
160 |
161 | async def test_timeout_none(self):
162 | for engine in get_engines():
163 | for _ in range(CPU_COUNT + 1):
164 | async with engine.connect() as conn:
165 | assert conn is not None
166 | key = uuid4().hex
167 | lock = create_async_sadlock(conn, key)
168 | try:
169 | r = await lock.acquire(timeout=None)
170 | self.assertTrue(r)
171 | finally:
172 | await lock.release()
173 | self.assertFalse(lock.locked)
174 |
175 | async def test_enter_locked(self):
176 | for engine in get_engines():
177 | key = uuid4().hex
178 | async with AsyncExitStack() as stack:
179 | conn0, conn1 = [await stack.enter_async_context(engine.connect()) for _ in range(2)]
180 |
181 | lock0 = create_async_sadlock(conn0, key)
182 | self.assertFalse(lock0.locked)
183 | r = await lock0.acquire(False)
184 | self.assertTrue(r)
185 | self.assertTrue(lock0.locked)
186 |
187 | lock1 = create_async_sadlock(conn1, key)
188 | self.assertFalse(lock1.locked)
189 | r = await lock1.acquire(block=False)
190 | self.assertFalse(r)
191 | self.assertFalse(lock1.locked)
192 |
193 | self.assertTrue(lock0.locked)
194 | await lock0.release()
195 | self.assertFalse(lock0.locked)
196 |
197 | r = await lock1.acquire(False)
198 | self.assertTrue(r)
199 | self.assertTrue(lock1.locked)
200 | await lock1.release()
201 | self.assertFalse(lock1.locked)
202 |
203 | async def test_release_unlocked_error(self):
204 | for engine in get_engines():
205 | key = uuid4().hex
206 | async with AsyncExitStack() as stack:
207 | conn0, conn1 = [await stack.enter_async_context(engine.connect()) for _ in range(2)]
208 |
209 | lock0 = create_async_sadlock(conn0, key)
210 | r = await lock0.acquire(False)
211 | self.assertTrue(r)
212 | self.assertTrue(lock0.locked)
213 |
214 | lock1 = create_async_sadlock(conn1, key)
215 | with self.assertRaisesRegex(ValueError, "invoked on an unlocked lock"):
216 | await lock1.release()
217 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/lock/base.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from abc import ABC, abstractmethod
3 | from threading import local
4 | from typing import Callable, Generic, Optional, TypeVar, Union, final
5 |
6 | if sys.version_info >= (3, 11): # pragma: no cover
7 | from typing import Self
8 | else: # pragma: no cover
9 | from typing_extensions import Self
10 |
11 | if sys.version_info < (3, 12): # pragma: no cover
12 | from typing_extensions import override
13 | else: # pragma: no cover
14 | from typing import override
15 |
16 | from ..typing import AsyncConnectionOrSessionT, ConnectionOrSessionT
17 |
18 | KeyTV = TypeVar("KeyTV")
19 | ActualKeyTV = TypeVar("ActualKeyTV")
20 | ConnectionTV = TypeVar("ConnectionTV", bound=ConnectionOrSessionT)
21 | AsyncConnectionTV = TypeVar("AsyncConnectionTV", bound=AsyncConnectionOrSessionT)
22 |
23 |
24 | class AbstractLockMixin(Generic[KeyTV, ActualKeyTV], ABC):
25 | @abstractmethod
26 | def __init__(self, *, key: KeyTV, convert: Optional[Callable[[KeyTV], ActualKeyTV]] = None, **kwargs):
27 | raise NotImplementedError()
28 |
29 | @abstractmethod
30 | def get_actual_key(self) -> ActualKeyTV:
31 | raise NotImplementedError()
32 |
33 | @final
34 | @property
35 | def actual_key(self) -> ActualKeyTV:
36 | return self.get_actual_key()
37 |
38 |
39 | class BaseSadLock(AbstractLockMixin, Generic[KeyTV, ConnectionTV], local, ABC):
40 | """Base class of database lock implementation
41 |
42 | Note:
43 | * It's Thread-Local (:class:`threading.local`)
44 | * It's an abstract class, do not manual instantiate
45 |
46 | The :meth:`acquire` and :meth:`release` methods can be used as context managers for a :keyword:`with` statement.
47 | :meth:`acquire` will be called when the block is entered, and :meth:`release` will be called when the block is exited.
48 | Hence, the following snippet::
49 |
50 | with some_lock:
51 | # do something...
52 | pass
53 |
54 | is equivalent to::
55 |
56 | some_lock.acquire()
57 | try:
58 | # do something...
59 | pass
60 | finally:
61 | some_lock.release()
62 |
63 | Note:
64 | A :exc:`TimeoutError` will be thrown if acquire timeout in :keyword:`with` statement.
65 | """
66 |
67 | @override
68 | def __init__(
69 | self, connection_or_session: ConnectionTV, key: KeyTV, /, contextual_timeout: Union[float, int, None] = None, **kwargs
70 | ):
71 | """
72 | Args:
73 |
74 | connection_or_session: Connection or Session object SQL locking functions will be invoked on it
75 |
76 | key: ID or name of the SQL locking function
77 |
78 | contextual_timeout: Timeout(seconds) for Context Managers.
79 |
80 | When called in a :keyword:`with` statement, the new created lock object will pass it to ``timeout`` argument of :meth:`.BaseSadLock.acquire`.
81 |
82 | Attention:
83 | **ONLY** affects :keyword:`with` statements.
84 |
85 | Example:
86 | ::
87 |
88 | try:
89 | with create_sadlock(conn, k, contextual_timeout=5) as lck:
90 | # do something...
91 | pass
92 | except TimeoutError:
93 | # can not acquire after 5 seconds
94 | pass
95 |
96 | Note:
97 | The default value of `timeout` is still :data:`None`, when invoking :meth:`.acquire`
98 | """
99 | self._acquired = False
100 | self._connection_or_session = connection_or_session
101 | self._key = key
102 | self._contextual_timeout = contextual_timeout
103 |
104 | @final
105 | def __enter__(self) -> Self:
106 | if self._contextual_timeout is None: # timeout period is infinite
107 | self.acquire()
108 | elif not self.acquire(timeout=self._contextual_timeout): # the timeout period has elapsed and not acquired
109 | raise TimeoutError()
110 | return self
111 |
112 | @final
113 | def __exit__(self, exc_type, exc_value, exc_tb):
114 | self.close()
115 |
116 | @final
117 | def __str__(self) -> str:
118 | return "<{} {} key={} at 0x{:x}>".format(
119 | "locked" if self._acquired else "unlocked",
120 | self.__class__.__name__,
121 | self._key,
122 | id(self),
123 | )
124 |
125 | @final
126 | @property
127 | def connection_or_session(self) -> ConnectionTV:
128 | """Connection or Session object SQL locking functions will be invoked on it
129 |
130 | It returns ``connection_or_session`` parameter of the class's constructor.
131 | """
132 | return self._connection_or_session
133 |
134 | @final
135 | @property
136 | def key(self) -> KeyTV:
137 | """ID or name of the SQL locking function
138 |
139 | It returns ``key`` parameter of the class's constructor"""
140 | return self._key
141 |
142 | @final
143 | @property
144 | def locked(self) -> bool:
145 | """locked/unlocked state property
146 |
147 | :data:`True` if the lock is acquired, else :data:`False`
148 | """
149 | return self._acquired
150 |
151 | @final
152 | def acquire(self, block: bool = True, timeout: Union[float, int, None] = None, *args, **kwargs) -> bool:
153 | """Acquire the lock in blocking or non-blocking mode.
154 |
155 | The implementation (:meth:`do_acquire`) should provide the following behavior:
156 |
157 | * When ``block`` is :data:`True` (the default), the method blocks until the lock is in an unlocked state,
158 | then sets it to locked and returns :data:`True`.
159 |
160 | * When ``block`` is :data:`False`, the method call is non-blocking.
161 | If the lock is currently locked, it returns :data:`False`; otherwise, it sets the lock to locked state and returns :data:`True`.
162 |
163 | * When invoked with a positive floating-point value for ``timeout``, it blocks for at most the specified number
164 | of seconds until the lock can be acquired.
165 |
166 | * Invocations with a negative ``timeout`` value are equivalent to a ``timeout`` of zero.
167 |
168 | * When ``timeout`` is ``None`` (the default), the timeout period is infinite.
169 | The ``timeout`` parameter has no effect when ``block`` is :data:`False` and is thus ignored.
170 |
171 | * Returns :data:`True` if the lock has been acquired or :data:`False` if the timeout period has elapsed.
172 | """
173 | if self._acquired:
174 | raise ValueError("invoked on a locked lock")
175 | self._acquired = self.do_acquire(block, timeout, *args, **kwargs)
176 | return self._acquired
177 |
178 | @abstractmethod
179 | def do_acquire(self, block: bool = True, timeout: Union[float, int, None] = None, *args, **kwargs) -> bool:
180 | raise NotImplementedError()
181 |
182 | @final
183 | def release(self, *args, **kwargs):
184 | """Release the lock.
185 |
186 | Since the class is thread-local, this method cannot be called from another thread or process,
187 | nor can it be called from another connection.
188 | (Although PostgreSQL's shared advisory lock supports this).
189 |
190 | The implementation (:meth:`do_release`) should provide the following behavior:
191 |
192 | * Reset the lock to unlocked state and return when the lock is currently locked.
193 | * Allow exactly one of any other threads blocked waiting for the lock to become unlocked to proceed.
194 | * Raise a :class:`ValueError` when invoked on an unlocked lock.
195 | * Not return a value.
196 | """
197 | if not self._acquired:
198 | raise ValueError("invoked on an unlocked lock")
199 | self.do_release(*args, **kwargs)
200 | self._acquired = False
201 |
202 | @abstractmethod
203 | def do_release(self, *args, **kwargs):
204 | raise NotImplementedError()
205 |
206 | @final
207 | def close(self, *args, **kwargs):
208 | """Same as :meth:`release`
209 |
210 | Except that the :class:`ValueError` is **NOT** raised when invoked on an unlocked lock.
211 |
212 | An invocation of this method is equivalent to::
213 |
214 | if not some_lock.locked:
215 | some_lock.release()
216 |
217 | This method maybe useful together with :func:`contextlib.closing`, when we need a :keyword:`with` statement, but don't want it to acquire at the beginning of the block.
218 |
219 | Example:
220 | ::
221 |
222 | # ...
223 |
224 | from contextlib import closing
225 | from sqlalchemy_dlock import create_sadlock
226 |
227 | # ...
228 |
229 | with closing(create_sadlock(some_connection, some_key)) as lock:
230 | # will **NOT** acquire at the begin of with-block
231 | assert not lock.locked
232 | # ...
233 | # lock when need
234 | lock.acquire()
235 | assert lock.locked
236 | # ...
237 |
238 | # `close` will be called at the end with-block
239 | assert not lock.locked
240 | """
241 | if self._acquired:
242 | self.release(*args, **kwargs)
243 |
244 |
245 | class BaseAsyncSadLock(AbstractLockMixin, Generic[KeyTV, AsyncConnectionTV], local, ABC):
246 | """Async version of :class:`.BaseSadLock`"""
247 |
248 | @override
249 | def __init__(
250 | self,
251 | connection_or_session: AsyncConnectionTV,
252 | key: KeyTV,
253 | /,
254 | contextual_timeout: Union[float, int, None] = None,
255 | **kwargs,
256 | ):
257 | self._acquired = False
258 | self._connection_or_session = connection_or_session
259 | self._key = key
260 | self._contextual_timeout = contextual_timeout
261 |
262 | @final
263 | async def __aenter__(self) -> Self:
264 | if self._contextual_timeout is None:
265 | await self.acquire()
266 | elif not await self.acquire(timeout=self._contextual_timeout):
267 | # the timeout period has elapsed and not acquired
268 | raise TimeoutError()
269 | return self
270 |
271 | @final
272 | async def __aexit__(self, exc_type, exc_value, exc_tb):
273 | await self.close()
274 |
275 | @final
276 | def __str__(self):
277 | return "<{} {} key={} at 0x{:x}>".format(
278 | "locked" if self._acquired else "unlocked",
279 | self.__class__.__name__,
280 | self._key,
281 | id(self),
282 | )
283 |
284 | @final
285 | @property
286 | def connection_or_session(self) -> AsyncConnectionTV:
287 | return self._connection_or_session
288 |
289 | @final
290 | @property
291 | def key(self) -> KeyTV:
292 | return self._key
293 |
294 | @final
295 | @property
296 | def locked(self) -> bool:
297 | return self._acquired
298 |
299 | @final
300 | async def acquire(self, block: bool = True, timeout: Union[float, int, None] = None, *args, **kwargs) -> bool:
301 | if self._acquired:
302 | raise ValueError("invoked on a locked lock")
303 | self._acquired = await self.do_acquire(block, timeout, *args, **kwargs)
304 | return self._acquired
305 |
306 | @abstractmethod
307 | async def do_acquire(self, block: bool = True, timeout: Union[float, int, None] = None, *args, **kwargs) -> bool:
308 | raise NotImplementedError()
309 |
310 | @final
311 | async def release(self, *args, **kwargs):
312 | if not self._acquired:
313 | raise ValueError("invoked on an unlocked lock")
314 | await self.do_release(*args, **kwargs)
315 | self._acquired = False
316 |
317 | @abstractmethod
318 | async def do_release(self, *args, **kwargs):
319 | raise NotImplementedError()
320 |
321 | @final
322 | async def close(self, *args, **kwargs):
323 | if self._acquired:
324 | await self.release(*args, **kwargs)
325 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/lock/postgresql.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import sys
3 | from hashlib import blake2b
4 | from time import sleep, time
5 | from typing import Any, Callable, Optional, TypeVar, Union
6 | from warnings import catch_warnings, warn
7 |
8 | if sys.version_info < (3, 12): # pragma: no cover
9 | from typing_extensions import override
10 | else: # pragma: no cover
11 | from typing import override
12 |
13 | from ..exceptions import SqlAlchemyDLockDatabaseError
14 | from ..statement.postgresql import (
15 | LOCK,
16 | LOCK_SHARED,
17 | LOCK_XACT,
18 | LOCK_XACT_SHARED,
19 | SLEEP_INTERVAL_DEFAULT,
20 | SLEEP_INTERVAL_MIN,
21 | TRY_LOCK,
22 | TRY_LOCK_SHARED,
23 | TRY_LOCK_XACT,
24 | TRY_LOCK_XACT_SHARED,
25 | UNLOCK,
26 | UNLOCK_SHARED,
27 | )
28 | from ..typing import AsyncConnectionOrSessionT, ConnectionOrSessionT
29 | from .base import AbstractLockMixin, BaseAsyncSadLock, BaseSadLock
30 |
31 | ConvertibleKT = Union[bytes, bytearray, memoryview, str, int, float]
32 | KT = Any
33 | KTV = TypeVar("KTV", bound=KT)
34 |
35 |
36 | class PostgresqlSadLockMixin(AbstractLockMixin[KTV, int]):
37 | """A Mix-in class for PostgreSQL advisory lock"""
38 |
39 | @override
40 | def __init__(
41 | self, *, key: KTV, convert: Optional[Callable[[KTV], int]] = None, shared: bool = False, xact: bool = False, **kwargs
42 | ):
43 | """
44 | Args:
45 | key: PostgreSQL advisory lock requires the key given by ``INT64``.
46 |
47 | * When ``key`` is :class:`int`, the constructor tries to ensure it to be ``INT64``.
48 | :class:`OverflowError` is raised if too big or too small for that.
49 |
50 | * When ``key`` is :class:`str` or :class:`bytes` or alike, the constructor calculates its checksum by :func:`hashlib.blake2b`, and takes the hash result integer value as actual key.
51 |
52 | * Or you can specify a ``convert`` function to that argument::
53 |
54 | def convert(val: Any) -> int:
55 | int64_key: int = do_sth(val)
56 | return int64_key
57 |
58 | shared: :attr:`.shared`
59 | xact: :attr:`.xact`
60 | convert: Custom function to covert ``key`` to required data type.
61 | """
62 | if convert:
63 | self._actual_key = convert(key)
64 | else:
65 | self._actual_key = self.convert(key)
66 | self._actual_key = self.ensure_int64(self._actual_key)
67 | #
68 | self._shared = bool(shared)
69 | self._xact = bool(xact)
70 | #
71 | self._stmt_unlock = None
72 | if not shared and not xact:
73 | self._stmt_lock = LOCK.params(key=self._actual_key)
74 | self._stmt_try_lock = TRY_LOCK.params(key=self._actual_key)
75 | self._stmt_unlock = UNLOCK.params(key=self._actual_key)
76 | elif shared and not xact:
77 | self._stmt_lock = LOCK_SHARED.params(key=self._actual_key)
78 | self._stmt_try_lock = TRY_LOCK_SHARED.params(key=self._actual_key)
79 | self._stmt_unlock = UNLOCK_SHARED.params(key=self._actual_key)
80 | elif not shared and xact:
81 | self._stmt_lock = LOCK_XACT.params(key=self._actual_key)
82 | self._stmt_try_lock = TRY_LOCK_XACT.params(key=self._actual_key)
83 | else:
84 | self._stmt_lock = LOCK_XACT_SHARED.params(key=self._actual_key)
85 | self._stmt_try_lock = TRY_LOCK_XACT_SHARED.params(key=self._actual_key)
86 |
87 | @override
88 | def get_actual_key(self) -> int:
89 | """The actual key used in MySQL named lock"""
90 | return self._actual_key
91 |
92 | @classmethod
93 | def convert(cls, k: ConvertibleKT) -> int:
94 | """The default key converter for PostgreSQL advisory lock"""
95 | if isinstance(k, int):
96 | return k
97 | if isinstance(k, str):
98 | d = k.encode()
99 | elif isinstance(k, (bytes, bytearray)):
100 | d = k
101 | elif isinstance(k, memoryview):
102 | d = k.tobytes()
103 | else:
104 | raise TypeError(type(k).__name__)
105 | return int.from_bytes(blake2b(d, digest_size=8).digest(), sys.byteorder, signed=True)
106 |
107 | @classmethod
108 | def ensure_int64(cls, i: int) -> int:
109 | """ensure the integer in PostgreSQL advisory lock's range (Signed INT64)
110 |
111 | * max of signed int64: ``2**63-1`` (``+0x7FFF_FFFF_FFFF_FFFF``)
112 | * min of signed int64: ``-2**63`` (``-0x8000_0000_0000_0000``)
113 |
114 | Returns:
115 | Signed int64 key
116 | """
117 | ## no force convert UINT greater than 2**63-1 to SINT
118 | # if i > 0x7FFF_FFFF_FFFF_FFFF:
119 | # return int.from_bytes(i.to_bytes(8, byteorder, signed=False), byteorder, signed=True)
120 | if not isinstance(i, int):
121 | raise TypeError(f"int type expected, but actual type is {type(i).__name__}")
122 | if i > 0x7FFF_FFFF_FFFF_FFFF:
123 | raise OverflowError("int too big")
124 | if i < -0x8000_0000_0000_0000:
125 | raise OverflowError("int too small")
126 | return i
127 |
128 | @property
129 | def shared(self) -> bool:
130 | """Is the advisory lock shared or exclusive"""
131 | return self._shared
132 |
133 | @property
134 | def xact(self) -> bool:
135 | """Is the advisory lock transaction level or session level"""
136 | return self._xact
137 |
138 |
139 | class PostgresqlSadLock(PostgresqlSadLockMixin, BaseSadLock[KT, ConnectionOrSessionT]):
140 | """A distributed lock implemented by PostgreSQL advisory lock
141 |
142 | See also:
143 | https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS
144 |
145 | Tip:
146 | Locks can be either shared or exclusive: a shared lock does not conflict with other shared locks on the same resource, only with exclusive locks.
147 | Locks can be taken at session level (so that they are held until released or the session ends) or at transaction level (so that they are held until the current transaction ends; there is no provision for manual release).
148 | Multiple session-level lock requests stack, so that if the same resource identifier is locked three times there must then be three unlock requests to release the resource in advance of session end.
149 | """
150 |
151 | @override
152 | def __init__(self, connection_or_session: ConnectionOrSessionT, key: KT, **kwargs):
153 | """
154 | Args:
155 | connection_or_session: see :attr:`.BaseSadLock.connection_or_session`
156 | key: :attr:`.BaseSadLock.key`
157 | shared: :attr:`.PostgresqlSadLockMixin.shared`
158 | xact: :attr:`.PostgresqlSadLockMixin.xact`
159 | convert: :class:`.PostgresqlSadLockMixin`
160 | **kwargs: other named parameters pass to :class:`.BaseSadLock` and :class:`.PostgresqlSadLockMixin`
161 | """
162 | PostgresqlSadLockMixin.__init__(self, key=key, **kwargs)
163 | BaseSadLock.__init__(self, connection_or_session, self.actual_key, **kwargs)
164 |
165 | @override
166 | def do_acquire(
167 | self,
168 | block: bool = True,
169 | timeout: Union[float, int, None] = None,
170 | interval: Union[float, int, None] = None,
171 | *args,
172 | **kwargs,
173 | ) -> bool:
174 | """
175 | See Also:
176 | :meth:`.BaseSadLock.acquire`
177 |
178 | Attention:
179 | PostgreSQL's advisory lock has no timeout mechanism in itself.
180 | When ``timeout`` is a non-negative number, we simulate it by **looping** and **sleeping**.
181 |
182 | The ``interval`` argument specifies the sleep seconds(``1`` by default).
183 |
184 | That is:
185 | The actual timeout won't be precise when ``interval`` is big;
186 | while small ``interval`` will cause high CPU usage and frequent SQL execution.
187 | """
188 | if block:
189 | if timeout is None:
190 | # None: set the timeout period to infinite.
191 | self.connection_or_session.execute(self._stmt_lock).all()
192 | return True
193 | else:
194 | # negative value for `timeout` are equivalent to a `timeout` of zero.
195 | if timeout < 0:
196 | timeout = 0
197 | interval = SLEEP_INTERVAL_DEFAULT if interval is None else interval
198 | if interval < SLEEP_INTERVAL_MIN: # pragma: no cover
199 | raise ValueError("interval too small")
200 | ts_begin = time()
201 | while True:
202 | ret_val = self.connection_or_session.execute(self._stmt_try_lock).scalar_one()
203 | if ret_val: # succeed
204 | return True
205 | if time() - ts_begin > timeout: # expired
206 | return False
207 | sleep(interval)
208 | else:
209 | # This will either obtain the lock immediately and return true,
210 | # or return false without waiting if the lock cannot be acquired immediately.
211 | ret_val = self.connection_or_session.execute(self._stmt_try_lock).scalar_one()
212 | return bool(ret_val)
213 |
214 | @override
215 | def do_release(self):
216 | if self._stmt_unlock is None:
217 | warn(
218 | "PostgreSQL transaction level advisory locks are held until the current transaction ends; "
219 | "there is no provision for manual release.",
220 | RuntimeWarning,
221 | )
222 | return
223 | ret_val = self.connection_or_session.execute(self._stmt_unlock).scalar_one()
224 | if not ret_val: # pragma: no cover
225 | raise SqlAlchemyDLockDatabaseError(f"The advisory lock {self.key!r} was not held.")
226 |
227 | # Force override close, and disable transaction level advisory locks warning it the method
228 | def close(self): # type: ignore
229 | if self.locked:
230 | if sys.version_info < (3, 11):
231 | with catch_warnings():
232 | return self.release()
233 | else:
234 | with catch_warnings(category=RuntimeWarning):
235 | return self.release()
236 |
237 |
238 | class PostgresqlAsyncSadLock(PostgresqlSadLockMixin, BaseAsyncSadLock[int, AsyncConnectionOrSessionT]):
239 | """Async IO version of :class:`PostgresqlSadLock`"""
240 |
241 | @override
242 | def __init__(self, connection_or_session: AsyncConnectionOrSessionT, key: KT, **kwargs):
243 | PostgresqlSadLockMixin.__init__(self, key=key, **kwargs)
244 | BaseAsyncSadLock.__init__(self, connection_or_session, self.actual_key, **kwargs)
245 |
246 | @override
247 | async def do_acquire(
248 | self,
249 | block: bool = True,
250 | timeout: Union[float, int, None] = None,
251 | interval: Union[float, int, None] = None,
252 | *args,
253 | **kwargs,
254 | ) -> bool:
255 | if block:
256 | if timeout is None:
257 | # None: set the timeout period to infinite.
258 | _ = (await self.connection_or_session.execute(self._stmt_lock)).all()
259 | return True
260 | else:
261 | # negative value for `timeout` are equivalent to a `timeout` of zero.
262 | if timeout < 0:
263 | timeout = 0
264 | interval = SLEEP_INTERVAL_DEFAULT if interval is None else interval
265 | if interval < SLEEP_INTERVAL_MIN: # pragma: no cover
266 | raise ValueError("interval too small")
267 | ts_begin = time()
268 | while True:
269 | ret_val = (await self.connection_or_session.execute(self._stmt_try_lock)).scalar_one()
270 | if ret_val: # succeed
271 | return True
272 | if time() - ts_begin > timeout: # expired
273 | return False
274 | await asyncio.sleep(interval)
275 | else:
276 | # This will either obtain the lock immediately and return true,
277 | # or return false without waiting if the lock cannot be acquired immediately.
278 | ret_val = (await self.connection_or_session.execute(self._stmt_try_lock)).scalar_one()
279 | return bool(ret_val)
280 |
281 | @override
282 | async def do_release(self):
283 | if self._stmt_unlock is None:
284 | warn(
285 | "PostgreSQL transaction level advisory locks are held until the current transaction ends; "
286 | "there is no provision for manual release.",
287 | RuntimeWarning,
288 | )
289 | return
290 | ret_val = (await self.connection_or_session.execute(self._stmt_unlock)).scalar_one()
291 | if not ret_val: # pragma: no cover
292 | raise SqlAlchemyDLockDatabaseError(f"The advisory lock {self.key!r} was not held.")
293 |
294 | # # Force override close, and disable transaction level advisory locks warning it the method
295 | async def close(self): # type: ignore
296 | if self.locked:
297 | if sys.version_info < (3, 11):
298 | with catch_warnings():
299 | return await self.release()
300 | else:
301 | with catch_warnings(category=RuntimeWarning):
302 | return await self.release()
303 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # my ignores
2 | package-lock.json
3 | *.lock
4 | .env.*
5 | *.env
6 |
7 | *~
8 |
9 | # temporary files which can be created if a process still has a handle open of a deleted file
10 | .fuse_hidden*
11 |
12 | # KDE directory preferences
13 | .directory
14 |
15 | # Linux trash folder which might appear on any partition or disk
16 | .Trash-*
17 |
18 | # .nfs files are created when an open file is removed but is still being accessed
19 | .nfs*
20 |
21 |
22 | # General
23 | .DS_Store
24 | .AppleDouble
25 | .LSOverride
26 |
27 | # Icon must end with two \r
28 | Icon
29 |
30 |
31 | # Thumbnails
32 | ._*
33 |
34 | # Files that might appear in the root of a volume
35 | .DocumentRevisions-V100
36 | .fseventsd
37 | .Spotlight-V100
38 | .TemporaryItems
39 | .Trashes
40 | .VolumeIcon.icns
41 | .com.apple.timemachine.donotpresent
42 |
43 | # Directories potentially created on remote AFP share
44 | .AppleDB
45 | .AppleDesktop
46 | Network Trash Folder
47 | Temporary Items
48 | .apdisk
49 | # Windows thumbnail cache files
50 | Thumbs.db
51 | Thumbs.db:encryptable
52 | ehthumbs.db
53 | ehthumbs_vista.db
54 |
55 | # Dump file
56 | *.stackdump
57 |
58 | # Folder config file
59 | [Dd]esktop.ini
60 |
61 | # Recycle Bin used on file shares
62 | $RECYCLE.BIN/
63 |
64 | # Windows Installer files
65 | *.cab
66 | *.msi
67 | *.msix
68 | *.msm
69 | *.msp
70 |
71 | # Windows shortcuts
72 | *.lnk
73 |
74 | # Logs
75 | logs
76 | *.log
77 | npm-debug.log*
78 | yarn-debug.log*
79 | yarn-error.log*
80 | lerna-debug.log*
81 |
82 | # Diagnostic reports (https://nodejs.org/api/report.html)
83 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
84 |
85 | # Runtime data
86 | pids
87 | *.pid
88 | *.seed
89 | *.pid.lock
90 |
91 | # Directory for instrumented libs generated by jscoverage/JSCover
92 | lib-cov
93 |
94 | # Coverage directory used by tools like istanbul
95 | coverage
96 | *.lcov
97 |
98 | # nyc test coverage
99 | .nyc_output
100 |
101 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
102 | .grunt
103 |
104 | # Bower dependency directory (https://bower.io/)
105 | bower_components
106 |
107 | # node-waf configuration
108 | .lock-wscript
109 |
110 | # Compiled binary addons (https://nodejs.org/api/addons.html)
111 | build/Release
112 |
113 | # Dependency directories
114 | node_modules/
115 | jspm_packages/
116 |
117 | # Snowpack dependency directory (https://snowpack.dev/)
118 | web_modules/
119 |
120 | # TypeScript cache
121 | *.tsbuildinfo
122 |
123 | # Optional npm cache directory
124 | .npm
125 |
126 | # Optional eslint cache
127 | .eslintcache
128 |
129 | # Microbundle cache
130 | .rpt2_cache/
131 | .rts2_cache_cjs/
132 | .rts2_cache_es/
133 | .rts2_cache_umd/
134 |
135 | # Optional REPL history
136 | .node_repl_history
137 |
138 | # Output of 'npm pack'
139 | *.tgz
140 |
141 | # Yarn Integrity file
142 | .yarn-integrity
143 |
144 | # dotenv environment variables file
145 | .env
146 | .env.test
147 |
148 | # parcel-bundler cache (https://parceljs.org/)
149 | .cache
150 | .parcel-cache
151 |
152 | # Next.js build output
153 | .next
154 |
155 | # Nuxt.js build / generate output
156 | .nuxt
157 | dist
158 |
159 | # Gatsby files
160 | .cache/
161 | # Comment in the public line in if your project uses Gatsby and not Next.js
162 | # https://nextjs.org/blog/next-9-1#public-directory-support
163 | # public
164 |
165 | # vuepress build output
166 | .vuepress/dist
167 |
168 | # Serverless directories
169 | .serverless/
170 |
171 | # FuseBox cache
172 | .fusebox/
173 |
174 | # DynamoDB Local files
175 | .dynamodb/
176 |
177 | # TernJS port file
178 | .tern-port
179 |
180 | # Stores VSCode versions used for testing VSCode extensions
181 | .vscode-test
182 |
183 | # Remote development
184 | .vscode-server
185 |
186 | # yarn v2
187 |
188 | .yarn/cache
189 | .yarn/unplugged
190 | .yarn/build-state.yml
191 | .pnp.*
192 |
193 | # Byte-compiled / optimized / DLL files
194 | __pycache__/
195 | *.py[cod]
196 | *$py.class
197 |
198 | # C extensions
199 | *.so
200 |
201 | # Distribution / packaging
202 | .Python
203 | build/
204 | develop-eggs/
205 | dist/
206 | downloads/
207 | eggs/
208 | .eggs/
209 | lib/
210 | lib64/
211 | parts/
212 | sdist/
213 | var/
214 | wheels/
215 | pip-wheel-metadata/
216 | share/python-wheels/
217 | *.egg-info/
218 | .installed.cfg
219 | *.egg
220 | MANIFEST
221 |
222 | # PyInstaller
223 | # Usually these files are written by a python script from a template
224 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
225 | *.manifest
226 | *.spec
227 |
228 | # Installer logs
229 | pip-log.txt
230 | pip-delete-this-directory.txt
231 |
232 | # Unit test / coverage reports
233 | htmlcov/
234 | .tox/
235 | .nox/
236 | .coverage
237 | .coverage.*
238 | .cache
239 | nosetests.xml
240 | coverage.xml
241 | *.cover
242 | *.py,cover
243 | .hypothesis/
244 | .pytest_cache/
245 | cover/
246 |
247 | # Translations
248 | *.mo
249 | *.pot
250 |
251 | # Django stuff:
252 | *.log
253 | local_settings.py
254 | db.sqlite3
255 | db.sqlite3-journal
256 |
257 | # Flask stuff:
258 | instance/
259 | .webassets-cache
260 |
261 | # Scrapy stuff:
262 | .scrapy
263 |
264 | # Sphinx documentation
265 | docs/_build/
266 |
267 | # PyBuilder
268 | .pybuilder/
269 | target/
270 |
271 | # Jupyter Notebook
272 | .ipynb_checkpoints
273 |
274 | # IPython
275 | profile_default/
276 | ipython_config.py
277 |
278 | # pyenv
279 | # For a library or package, you might want to ignore these files since the code is
280 | # intended to run in multiple environments; otherwise, check them in:
281 | # .python-version
282 |
283 | # pipenv
284 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
285 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
286 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
287 | # install all needed dependencies.
288 | #Pipfile.lock
289 |
290 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
291 | __pypackages__/
292 |
293 | # Celery stuff
294 | celerybeat-schedule
295 | celerybeat.pid
296 |
297 | # SageMath parsed files
298 | *.sage.py
299 |
300 | # Environments
301 | .env
302 | .venv
303 | env/
304 | venv/
305 | ENV/
306 | env.bak/
307 | venv.bak/
308 |
309 | # Spyder project settings
310 | .spyderproject
311 | .spyproject
312 |
313 | # Rope project settings
314 | .ropeproject
315 |
316 | # mkdocs documentation
317 | /site
318 |
319 | # mypy
320 | .mypy_cache/
321 | .dmypy.json
322 | dmypy.json
323 |
324 | # Pyre type checker
325 | .pyre/
326 |
327 | # pytype static type analyzer
328 | .pytype/
329 |
330 | # Cython debug symbols
331 | cython_debug/
332 |
333 | # static files generated from Django application using `collectstatic`
334 | media
335 | static
336 |
337 | # Google App Engine generated folder
338 | appengine-generated/
339 |
340 | # Swap
341 | [._]*.s[a-v][a-z]
342 | !*.svg # comment out if you don't need vector files
343 | [._]*.sw[a-p]
344 | [._]s[a-rt-v][a-z]
345 | [._]ss[a-gi-z]
346 | [._]sw[a-p]
347 |
348 | # _Session
349 | Session.vim
350 | Sessionx.vim
351 |
352 | # Temporary
353 | .netrwhist
354 | *~
355 | # Auto-generated tag files
356 | tags
357 | # Persistent undo
358 | [._]*.un~
359 |
360 | # -*- mode: gitignore; -*-
361 | *~
362 | \#*\#
363 | /.emacs.desktop
364 | /.emacs.desktop.lock
365 | *.elc
366 | auto-save-list
367 | tramp
368 | .\#*
369 |
370 | # Org-mode
371 | .org-id-locations
372 | *_archive
373 |
374 | # flymake-mode
375 | *_flymake.*
376 |
377 | # eshell files
378 | /eshell/history
379 | /eshell/lastdir
380 |
381 | # elpa packages
382 | /elpa/
383 |
384 | # reftex files
385 | *.rel
386 |
387 | # AUCTeX auto folder
388 | /auto/
389 |
390 | # cask packages
391 | .cask/
392 | dist/
393 |
394 | # Flycheck
395 | flycheck_*.el
396 |
397 | # server auth directory
398 | /server/
399 |
400 | # projectiles files
401 | .projectile
402 |
403 | # directory configuration
404 | .dir-locals.el
405 |
406 | # network security
407 | /network-security.data
408 |
409 |
410 | # Project-level settings
411 | /.tgitconfig
412 |
413 | .vscode/*
414 | # !.vscode/settings.json
415 | !.vscode/tasks.json
416 | !.vscode/launch.json
417 | !.vscode/extensions.json
418 | *.code-workspace
419 |
420 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
421 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
422 |
423 | # User-specific stuff
424 | .idea/**/workspace.xml
425 | .idea/**/tasks.xml
426 | .idea/**/usage.statistics.xml
427 | .idea/**/dictionaries
428 | .idea/**/shelf
429 |
430 | # Generated files
431 | .idea/**/contentModel.xml
432 |
433 | # Sensitive or high-churn files
434 | .idea/**/dataSources/
435 | .idea/**/dataSources.ids
436 | .idea/**/dataSources.local.xml
437 | .idea/**/sqlDataSources.xml
438 | .idea/**/dynamic.xml
439 | .idea/**/uiDesigner.xml
440 | .idea/**/dbnavigator.xml
441 |
442 | # Gradle
443 | .idea/**/gradle.xml
444 | .idea/**/libraries
445 |
446 | # Gradle and Maven with auto-import
447 | # When using Gradle or Maven with auto-import, you should exclude module files,
448 | # since they will be recreated, and may cause churn. Uncomment if using
449 | # auto-import.
450 | # .idea/artifacts
451 | # .idea/compiler.xml
452 | # .idea/jarRepositories.xml
453 | # .idea/modules.xml
454 | # .idea/*.iml
455 | # .idea/modules
456 | # *.iml
457 | # *.ipr
458 |
459 | # CMake
460 | cmake-build-*/
461 |
462 | # Mongo Explorer plugin
463 | .idea/**/mongoSettings.xml
464 |
465 | # File-based project format
466 | *.iws
467 |
468 | # IntelliJ
469 | out/
470 |
471 | # mpeltonen/sbt-idea plugin
472 | .idea_modules/
473 |
474 | # JIRA plugin
475 | atlassian-ide-plugin.xml
476 |
477 | # Cursive Clojure plugin
478 | .idea/replstate.xml
479 |
480 | # Crashlytics plugin (for Android Studio and IntelliJ)
481 | com_crashlytics_export_strings.xml
482 | crashlytics.properties
483 | crashlytics-build.properties
484 | fabric.properties
485 |
486 | # Editor-based Rest Client
487 | .idea/httpRequests
488 |
489 | # Android studio 3.1+ serialized cache file
490 | .idea/caches/build_file_checksums.ser
491 |
492 | # markdown config
493 | .idea/markdown.xml
494 |
495 | .metadata
496 | bin/
497 | tmp/
498 | *.tmp
499 | *.bak
500 | *.swp
501 | *~.nib
502 | local.properties
503 | .settings/
504 | .loadpath
505 | .recommenders
506 |
507 | # External tool builders
508 | .externalToolBuilders/
509 |
510 | # Locally stored "Eclipse launch configurations"
511 | *.launch
512 |
513 | # PyDev specific (Python IDE for Eclipse)
514 | *.pydevproject
515 |
516 | # CDT-specific (C/C++ Development Tooling)
517 | .cproject
518 |
519 | # CDT- autotools
520 | .autotools
521 |
522 | # Java annotation processor (APT)
523 | .factorypath
524 |
525 | # PDT-specific (PHP Development Tools)
526 | .buildpath
527 |
528 | # sbteclipse plugin
529 | .target
530 |
531 | # Tern plugin
532 | .tern-project
533 |
534 | # TeXlipse plugin
535 | .texlipse
536 |
537 | # STS (Spring Tool Suite)
538 | .springBeans
539 |
540 | # Code Recommenders
541 | .recommenders/
542 |
543 | # Annotation Processing
544 | .apt_generated/
545 | .apt_generated_test/
546 |
547 | # Scala IDE specific (Scala & Java development for Eclipse)
548 | .cache-main
549 | .scala_dependencies
550 | .worksheet
551 |
552 | # Uncomment this line if you wish to ignore the project description file.
553 | # Typically, this file would be tracked if it contains build/dependency configurations:
554 | #.project
555 |
556 | ## Ignore Visual Studio temporary files, build results, and
557 | ## files generated by popular Visual Studio add-ons.
558 | ##
559 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
560 |
561 | # User-specific files
562 | *.rsuser
563 | *.suo
564 | *.user
565 | *.userosscache
566 | *.sln.docstates
567 |
568 | # User-specific files (MonoDevelop/Xamarin Studio)
569 | *.userprefs
570 |
571 | # Mono auto generated files
572 | mono_crash.*
573 |
574 | # Build results
575 | [Dd]ebug/
576 | [Dd]ebugPublic/
577 | [Rr]elease/
578 | [Rr]eleases/
579 | x64/
580 | x86/
581 | [Ww][Ii][Nn]32/
582 | [Aa][Rr][Mm]/
583 | [Aa][Rr][Mm]64/
584 | bld/
585 | [Bb]in/
586 | [Oo]bj/
587 | [Ll]og/
588 | [Ll]ogs/
589 |
590 | # Visual Studio 2015/2017 cache/options directory
591 | .vs/
592 | # Uncomment if you have tasks that create the project's static files in wwwroot
593 | #wwwroot/
594 |
595 | # Visual Studio 2017 auto generated files
596 | Generated\ Files/
597 |
598 | # MSTest test Results
599 | [Tt]est[Rr]esult*/
600 | [Bb]uild[Ll]og.*
601 |
602 | # NUnit
603 | *.VisualState.xml
604 | TestResult.xml
605 | nunit-*.xml
606 |
607 | # Build Results of an ATL Project
608 | [Dd]ebugPS/
609 | [Rr]eleasePS/
610 | dlldata.c
611 |
612 | # Benchmark Results
613 | BenchmarkDotNet.Artifacts/
614 |
615 | # .NET Core
616 | project.lock.json
617 | project.fragment.lock.json
618 | artifacts/
619 |
620 | # ASP.NET Scaffolding
621 | ScaffoldingReadMe.txt
622 |
623 | # StyleCop
624 | StyleCopReport.xml
625 |
626 | # Files built by Visual Studio
627 | *_i.c
628 | *_p.c
629 | *_h.h
630 | *.ilk
631 | *.meta
632 | *.obj
633 | *.iobj
634 | *.pch
635 | *.pdb
636 | *.ipdb
637 | *.pgc
638 | *.pgd
639 | *.rsp
640 | *.sbr
641 | *.tlb
642 | *.tli
643 | *.tlh
644 | *.tmp
645 | *.tmp_proj
646 | *_wpftmp.csproj
647 | *.log
648 | *.vspscc
649 | *.vssscc
650 | .builds
651 | *.pidb
652 | *.svclog
653 | *.scc
654 |
655 | # Chutzpah Test files
656 | _Chutzpah*
657 |
658 | # Visual C++ cache files
659 | ipch/
660 | *.aps
661 | *.ncb
662 | *.opendb
663 | *.opensdf
664 | *.sdf
665 | *.cachefile
666 | *.VC.db
667 | *.VC.VC.opendb
668 |
669 | # Visual Studio profiler
670 | *.psess
671 | *.vsp
672 | *.vspx
673 | *.sap
674 |
675 | # Visual Studio Trace Files
676 | *.e2e
677 |
678 | # TFS 2012 Local Workspace
679 | $tf/
680 |
681 | # Guidance Automation Toolkit
682 | *.gpState
683 |
684 | # ReSharper is a .NET coding add-in
685 | _ReSharper*/
686 | *.[Rr]e[Ss]harper
687 | *.DotSettings.user
688 |
689 | # TeamCity is a build add-in
690 | _TeamCity*
691 |
692 | # DotCover is a Code Coverage Tool
693 | *.dotCover
694 |
695 | # AxoCover is a Code Coverage Tool
696 | .axoCover/*
697 | !.axoCover/settings.json
698 |
699 | # Coverlet is a free, cross platform Code Coverage Tool
700 | coverage*[.json, .xml, .info]
701 |
702 | # Visual Studio code coverage results
703 | *.coverage
704 | *.coveragexml
705 |
706 | # NCrunch
707 | _NCrunch_*
708 | .*crunch*.local.xml
709 | nCrunchTemp_*
710 |
711 | # MightyMoose
712 | *.mm.*
713 | AutoTest.Net/
714 |
715 | # Web workbench (sass)
716 | .sass-cache/
717 |
718 | # Installshield output folder
719 | [Ee]xpress/
720 |
721 | # DocProject is a documentation generator add-in
722 | DocProject/buildhelp/
723 | DocProject/Help/*.HxT
724 | DocProject/Help/*.HxC
725 | DocProject/Help/*.hhc
726 | DocProject/Help/*.hhk
727 | DocProject/Help/*.hhp
728 | DocProject/Help/Html2
729 | DocProject/Help/html
730 |
731 | # Click-Once directory
732 | publish/
733 |
734 | # Publish Web Output
735 | *.[Pp]ublish.xml
736 | *.azurePubxml
737 | # Note: Comment the next line if you want to checkin your web deploy settings,
738 | # but database connection strings (with potential passwords) will be unencrypted
739 | *.pubxml
740 | *.publishproj
741 |
742 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
743 | # checkin your Azure Web App publish settings, but sensitive information contained
744 | # in these scripts will be unencrypted
745 | PublishScripts/
746 |
747 | # NuGet Packages
748 | *.nupkg
749 | # NuGet Symbol Packages
750 | *.snupkg
751 | # The packages folder can be ignored because of Package Restore
752 | **/[Pp]ackages/*
753 | # except build/, which is used as an MSBuild target.
754 | !**/[Pp]ackages/build/
755 | # Uncomment if necessary however generally it will be regenerated when needed
756 | #!**/[Pp]ackages/repositories.config
757 | # NuGet v3's project.json files produces more ignorable files
758 | *.nuget.props
759 | *.nuget.targets
760 |
761 | # Microsoft Azure Build Output
762 | csx/
763 | *.build.csdef
764 |
765 | # Microsoft Azure Emulator
766 | ecf/
767 | rcf/
768 |
769 | # Windows Store app package directories and files
770 | AppPackages/
771 | BundleArtifacts/
772 | Package.StoreAssociation.xml
773 | _pkginfo.txt
774 | *.appx
775 | *.appxbundle
776 | *.appxupload
777 |
778 | # Visual Studio cache files
779 | # files ending in .cache can be ignored
780 | *.[Cc]ache
781 | # but keep track of directories ending in .cache
782 | !?*.[Cc]ache/
783 |
784 | # Others
785 | ClientBin/
786 | ~$*
787 | *~
788 | *.dbmdl
789 | *.dbproj.schemaview
790 | *.jfm
791 | *.pfx
792 | *.publishsettings
793 | orleans.codegen.cs
794 |
795 | # Including strong name files can present a security risk
796 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
797 | #*.snk
798 |
799 | # Since there are multiple workflows, uncomment next line to ignore bower_components
800 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
801 | #bower_components/
802 |
803 | # RIA/Silverlight projects
804 | Generated_Code/
805 |
806 | # Backup & report files from converting an old project file
807 | # to a newer Visual Studio version. Backup files are not needed,
808 | # because we have git ;-)
809 | _UpgradeReport_Files/
810 | Backup*/
811 | UpgradeLog*.XML
812 | UpgradeLog*.htm
813 | ServiceFabricBackup/
814 | *.rptproj.bak
815 |
816 | # SQL Server files
817 | *.mdf
818 | *.ldf
819 | *.ndf
820 |
821 | # Business Intelligence projects
822 | *.rdl.data
823 | *.bim.layout
824 | *.bim_*.settings
825 | *.rptproj.rsuser
826 | *- [Bb]ackup.rdl
827 | *- [Bb]ackup ([0-9]).rdl
828 | *- [Bb]ackup ([0-9][0-9]).rdl
829 |
830 | # Microsoft Fakes
831 | FakesAssemblies/
832 |
833 | # GhostDoc plugin setting file
834 | *.GhostDoc.xml
835 |
836 | # Node.js Tools for Visual Studio
837 | .ntvs_analysis.dat
838 | node_modules/
839 |
840 | # Visual Studio 6 build log
841 | *.plg
842 |
843 | # Visual Studio 6 workspace options file
844 | *.opt
845 |
846 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
847 | *.vbw
848 |
849 | # Visual Studio LightSwitch build output
850 | **/*.HTMLClient/GeneratedArtifacts
851 | **/*.DesktopClient/GeneratedArtifacts
852 | **/*.DesktopClient/ModelManifest.xml
853 | **/*.Server/GeneratedArtifacts
854 | **/*.Server/ModelManifest.xml
855 | _Pvt_Extensions
856 |
857 | # Paket dependency manager
858 | .paket/paket.exe
859 | paket-files/
860 |
861 | # FAKE - F# Make
862 | .fake/
863 |
864 | # CodeRush personal settings
865 | .cr/personal
866 |
867 | # Python Tools for Visual Studio (PTVS)
868 | __pycache__/
869 | *.pyc
870 |
871 | # Cake - Uncomment if you are using it
872 | # tools/**
873 | # !tools/packages.config
874 |
875 | # Tabs Studio
876 | *.tss
877 |
878 | # Telerik's JustMock configuration file
879 | *.jmconfig
880 |
881 | # BizTalk build output
882 | *.btp.cs
883 | *.btm.cs
884 | *.odx.cs
885 | *.xsd.cs
886 |
887 | # OpenCover UI analysis results
888 | OpenCover/
889 |
890 | # Azure Stream Analytics local run output
891 | ASALocalRun/
892 |
893 | # MSBuild Binary and Structured Log
894 | *.binlog
895 |
896 | # NVidia Nsight GPU debugger configuration file
897 | *.nvuser
898 |
899 | # MFractors (Xamarin productivity tool) working folder
900 | .mfractor/
901 |
902 | # Local History for Visual Studio
903 | .localhistory/
904 |
905 | # BeatPulse healthcheck temp database
906 | healthchecksdb
907 |
908 | # Backup folder for Package Reference Convert tool in Visual Studio 2017
909 | MigrationBackup/
910 |
911 | # Ionide (cross platform F# VS Code tools) working folder
912 | .ionide/
913 |
914 | *.tmp
915 |
916 | # Word temporary
917 | ~$*.doc*
918 |
919 | # Word Auto Backup File
920 | Backup of *.doc*
921 |
922 | # Excel temporary
923 | ~$*.xls*
924 |
925 | # Excel Backup File
926 | *.xlk
927 |
928 | # PowerPoint temporary
929 | ~$*.ppt*
930 |
931 | # Visio autosave temporary files
932 | *.~vsd*
933 |
934 | # LibreOffice locks
935 | .~lock.*#
936 |
--------------------------------------------------------------------------------