├── .coveragerc
├── .dockerignore
├── .editorconfig
├── .gitattributes
├── .github
├── docker-compose.yml
└── workflows
│ └── python-package.yml
├── .gitignore
├── .idea
├── .gitignore
├── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── misc.xml
├── modules.xml
├── sqlalchemy-dlock.iml
└── vcs.xml
├── .markdownlint.json
├── .mypy.ini
├── .pre-commit-config.yaml
├── .readthedocs.yaml
├── .ruff.toml
├── .vscode
└── tasks.json
├── AUTHORS.md
├── CHANGELOG.md
├── LICENSE
├── MANIFEST.in
├── README.md
├── codecov.yml
├── docker-compose.database.yml
├── docs
├── AUTHORS.rst
├── CHANGELOG.rst
├── Makefile
├── README.rst
├── _static
│ └── .gitkeep
├── _templates
│ └── .gitkeep
├── apidocs
│ └── .gitignore
├── conf.py
├── index.rst
├── make.bat
└── requirements.txt
├── pyproject.toml
├── requirements.txt
├── scripts
├── run-test.sh
├── wait-for-mysql.sh
└── wait-for-postgres.sh
├── src
└── sqlalchemy_dlock
│ ├── .gitignore
│ ├── __init__.py
│ ├── exceptions.py
│ ├── factory.py
│ ├── lock
│ ├── __init__.py
│ ├── base.py
│ ├── mysql.py
│ └── postgresql.py
│ ├── py.typed
│ ├── registry.py
│ ├── statement
│ ├── __init__.py
│ ├── mysql.py
│ └── postgresql.py
│ └── typing.py
└── tests
├── .dockerignore
├── Dockerfile
├── __init__.py
├── asyncio
├── __init__.py
├── engines.py
├── test_basic.py
├── test_concurrency.py
├── test_key_convert.py
├── test_pg.py
└── test_session.py
├── docker-compose.yml
├── engines.py
├── requirements-compose.txt
├── requirements.txt
├── test_basic.py
├── test_key_convert.py
├── test_multiprocess.py
├── test_multithread.py
├── test_pg.py
├── test_scoped_session.py
└── test_session.py
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | omit =
3 | test/*
4 | tests/*
5 | .venv/*
6 | env/*
7 | venv/*
8 | ENV/*
9 | env.bak/*
10 | venv.bak/*
11 | src/**/_version.py
12 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | .git/
2 |
3 | .venv/
4 |
5 | **/__pycache__
6 | *.egg-info
7 | *.egg/
8 | *.pyc
9 |
10 | build/
11 | dist/
12 | docs/_build
13 |
14 | .mypy_cache/
15 | .pytest_cache/
16 | .ruff_cache/
17 |
18 | *.swp
19 |
20 | html/*
21 |
22 | **/Dockerfile
23 | **/Dockerfile.*
24 | **/*.Dockerfile
25 | **/docker-compose.*
26 | **/*.docker-compose.*
27 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | # EditorConfig is awesome: https://EditorConfig.org
2 |
3 | # top-most EditorConfig file
4 | root = true
5 |
6 | # Unix-style newlines with a newline ending every file
7 | [*]
8 | end_of_line = lf
9 | charset = utf-8
10 | insert_final_newline = true
11 |
12 | # 4 space indentation
13 | [*.py]
14 | indent_style = space
15 | indent_size = 4
16 |
17 | # Tab indentation (no size specified)
18 | [Makefile]
19 | indent_style = tab
20 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | * text=auto eol=lf
2 | *.{cmd,[cC][mM][dD]} text eol=crlf
3 | *.{bat,[bB][aA][tT]} text eol=crlf
4 | *.{ps1,[pP][sS]1} text eol=crlf
5 |
--------------------------------------------------------------------------------
/.github/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # For the test on gibhub actions only
2 |
3 | services:
4 |
5 | mysql:
6 | image: mysql
7 | ports:
8 | - "127.0.0.1:3306:3306"
9 | environment:
10 | MYSQL_RANDOM_ROOT_PASSWORD: "1"
11 | MYSQL_DATABASE: test
12 | MYSQL_USER: test
13 | MYSQL_PASSWORD: test
14 |
15 | postgres:
16 | image: postgres
17 | ports:
18 | - "127.0.0.1:5432:5432"
19 | environment:
20 | POSTGRES_PASSWORD: test
21 |
--------------------------------------------------------------------------------
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Python package
5 |
6 | on:
7 | push:
8 | branches: [main]
9 | tags: ["*"]
10 | paths:
11 | - .github/**
12 | - src/**
13 | - tests/**
14 | - pyproject.toml
15 | pull_request:
16 | branches: [main]
17 |
18 | jobs:
19 | get-version:
20 | runs-on: ubuntu-latest
21 | outputs:
22 | version: ${{ steps.get-version.outputs.version }}
23 | steps:
24 | - name: Check PEP-440 style version
25 | id: get-version
26 | run: |
27 | PEP440_VERSION=""
28 | VERSION_PREFIX="v"
29 | BRANCH_OR_TAG="$(echo ${{ github.event.ref }} | cut -d / -f 3)"
30 | if [[ "${BRANCH_OR_TAG}" =~ ^v?(([1-9][0-9]*!)?(0|[1-9][0-9]*)(\.(0|[1-9][0-9]*))*(\.?(a|b|rc)(0|[1-9][0-9]*))?(\.post(0|[1-9][0-9]*))?(\.dev(0|[1-9][0-9]*))?)$ ]]
31 | then
32 | PEP440_VERSION="${BRANCH_OR_TAG#$VERSION_PREFIX}"
33 | fi
34 | echo "PEP440_VERSION: ${PEP440_VERSION}"
35 | echo "version=${PEP440_VERSION}" >> $GITHUB_OUTPUT
36 |
37 | test:
38 | runs-on: ubuntu-latest
39 | strategy:
40 | matrix:
41 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
42 | sqlalchemy-requires:
43 | - SQLAlchemy[asyncio]>=1.4.3,<2.0
44 | - SQLAlchemy[asyncio]>=2.0,<3.0
45 |
46 | steps:
47 | - uses: actions/checkout@v4
48 |
49 | - name: Startup databases
50 | shell: bash
51 | run: |
52 | (cd .github && docker compose up -d)
53 | sh scripts/wait-for-postgres.sh 127.0.0.1 test
54 | sh scripts/wait-for-mysql.sh 127.0.0.1 test test test
55 |
56 | - name: Set up Python ${{ matrix.python-version }}
57 | uses: actions/setup-python@v5
58 | with:
59 | python-version: ${{ matrix.python-version }}
60 | cache: pip
61 |
62 | - name: Install project and dependent tools
63 | run: pip install -e .[asyncio] "${{ matrix.sqlalchemy-requires }}" -r tests/requirements.txt ruff coverage
64 |
65 | - name: Check with ruff
66 | run: |
67 | ruff check .
68 |
69 | - name: Test with coverage
70 | shell: bash
71 | run: |
72 | export TEST_URLS="mysql://test:test@127.0.0.1:3306/test postgresql://postgres:test@127.0.0.1:5432/"
73 | export TEST_ASYNC_URLS="mysql+aiomysql://test:test@127.0.0.1:3306/test postgresql+asyncpg://postgres:test@127.0.0.1:5432/"
74 | coverage run -m unittest -cfv
75 | coverage report -m
76 | coverage xml
77 |
78 | - name: Shutdown databases
79 | shell: bash
80 | run: (cd .github && docker compose down -v)
81 |
82 | - name: Upload coverage reports to CodeCov with GitHub Action
83 | uses: codecov/codecov-action@v4
84 | env:
85 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
86 |
87 | build:
88 | runs-on: ubuntu-latest
89 | needs: [get-version, test]
90 | if: needs.get-version.outputs.version != ''
91 | steps:
92 | - name: Checkout
93 | uses: actions/checkout@v4
94 | - name: Set up Python
95 | uses: actions/setup-python@v5
96 | with:
97 | cache: pip
98 | - name: Install builder
99 | run: pip install build
100 | - name: Build package distributions
101 | run: pyproject-build
102 | - name: Upload package distributions to artifact
103 | uses: actions/upload-artifact@v4
104 | with:
105 | name: sqlalchemy_dlock-dist-${{ needs.get-version.outputs.version }}
106 | path: dist
107 | if-no-files-found: error
108 | retention-days: 1
109 |
110 | publish:
111 | runs-on: ubuntu-latest
112 | needs: [get-version, build]
113 | if: needs.get-version.outputs.version != ''
114 |
115 | steps:
116 | - name: Download package distributions from artifact
117 | uses: actions/download-artifact@v4
118 | with:
119 | name: sqlalchemy_dlock-dist-${{needs.get-version.outputs.version}}
120 | path: dist
121 | - name: Publish package distributions to PyPI
122 | uses: pypa/gh-action-pypi-publish@release/v1
123 | with:
124 | user: __token__
125 | password: ${{ secrets.PYPI_API_TOKEN }}
126 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # my ignores
2 | package-lock.json
3 | *.lock
4 | .env.*
5 | *.env
6 |
7 | *~
8 |
9 | # temporary files which can be created if a process still has a handle open of a deleted file
10 | .fuse_hidden*
11 |
12 | # KDE directory preferences
13 | .directory
14 |
15 | # Linux trash folder which might appear on any partition or disk
16 | .Trash-*
17 |
18 | # .nfs files are created when an open file is removed but is still being accessed
19 | .nfs*
20 |
21 |
22 | # General
23 | .DS_Store
24 | .AppleDouble
25 | .LSOverride
26 |
27 | # Icon must end with two \r
28 | Icon
29 |
30 |
31 | # Thumbnails
32 | ._*
33 |
34 | # Files that might appear in the root of a volume
35 | .DocumentRevisions-V100
36 | .fseventsd
37 | .Spotlight-V100
38 | .TemporaryItems
39 | .Trashes
40 | .VolumeIcon.icns
41 | .com.apple.timemachine.donotpresent
42 |
43 | # Directories potentially created on remote AFP share
44 | .AppleDB
45 | .AppleDesktop
46 | Network Trash Folder
47 | Temporary Items
48 | .apdisk
49 | # Windows thumbnail cache files
50 | Thumbs.db
51 | Thumbs.db:encryptable
52 | ehthumbs.db
53 | ehthumbs_vista.db
54 |
55 | # Dump file
56 | *.stackdump
57 |
58 | # Folder config file
59 | [Dd]esktop.ini
60 |
61 | # Recycle Bin used on file shares
62 | $RECYCLE.BIN/
63 |
64 | # Windows Installer files
65 | *.cab
66 | *.msi
67 | *.msix
68 | *.msm
69 | *.msp
70 |
71 | # Windows shortcuts
72 | *.lnk
73 |
74 | # Logs
75 | logs
76 | *.log
77 | npm-debug.log*
78 | yarn-debug.log*
79 | yarn-error.log*
80 | lerna-debug.log*
81 |
82 | # Diagnostic reports (https://nodejs.org/api/report.html)
83 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
84 |
85 | # Runtime data
86 | pids
87 | *.pid
88 | *.seed
89 | *.pid.lock
90 |
91 | # Directory for instrumented libs generated by jscoverage/JSCover
92 | lib-cov
93 |
94 | # Coverage directory used by tools like istanbul
95 | coverage
96 | *.lcov
97 |
98 | # nyc test coverage
99 | .nyc_output
100 |
101 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
102 | .grunt
103 |
104 | # Bower dependency directory (https://bower.io/)
105 | bower_components
106 |
107 | # node-waf configuration
108 | .lock-wscript
109 |
110 | # Compiled binary addons (https://nodejs.org/api/addons.html)
111 | build/Release
112 |
113 | # Dependency directories
114 | node_modules/
115 | jspm_packages/
116 |
117 | # Snowpack dependency directory (https://snowpack.dev/)
118 | web_modules/
119 |
120 | # TypeScript cache
121 | *.tsbuildinfo
122 |
123 | # Optional npm cache directory
124 | .npm
125 |
126 | # Optional eslint cache
127 | .eslintcache
128 |
129 | # Microbundle cache
130 | .rpt2_cache/
131 | .rts2_cache_cjs/
132 | .rts2_cache_es/
133 | .rts2_cache_umd/
134 |
135 | # Optional REPL history
136 | .node_repl_history
137 |
138 | # Output of 'npm pack'
139 | *.tgz
140 |
141 | # Yarn Integrity file
142 | .yarn-integrity
143 |
144 | # dotenv environment variables file
145 | .env
146 | .env.test
147 |
148 | # parcel-bundler cache (https://parceljs.org/)
149 | .cache
150 | .parcel-cache
151 |
152 | # Next.js build output
153 | .next
154 |
155 | # Nuxt.js build / generate output
156 | .nuxt
157 | dist
158 |
159 | # Gatsby files
160 | .cache/
161 | # Comment in the public line in if your project uses Gatsby and not Next.js
162 | # https://nextjs.org/blog/next-9-1#public-directory-support
163 | # public
164 |
165 | # vuepress build output
166 | .vuepress/dist
167 |
168 | # Serverless directories
169 | .serverless/
170 |
171 | # FuseBox cache
172 | .fusebox/
173 |
174 | # DynamoDB Local files
175 | .dynamodb/
176 |
177 | # TernJS port file
178 | .tern-port
179 |
180 | # Stores VSCode versions used for testing VSCode extensions
181 | .vscode-test
182 |
183 | # Remote development
184 | .vscode-server
185 |
186 | # yarn v2
187 |
188 | .yarn/cache
189 | .yarn/unplugged
190 | .yarn/build-state.yml
191 | .pnp.*
192 |
193 | # Byte-compiled / optimized / DLL files
194 | __pycache__/
195 | *.py[cod]
196 | *$py.class
197 |
198 | # C extensions
199 | *.so
200 |
201 | # Distribution / packaging
202 | .Python
203 | build/
204 | develop-eggs/
205 | dist/
206 | downloads/
207 | eggs/
208 | .eggs/
209 | lib/
210 | lib64/
211 | parts/
212 | sdist/
213 | var/
214 | wheels/
215 | pip-wheel-metadata/
216 | share/python-wheels/
217 | *.egg-info/
218 | .installed.cfg
219 | *.egg
220 | MANIFEST
221 |
222 | # PyInstaller
223 | # Usually these files are written by a python script from a template
224 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
225 | *.manifest
226 | *.spec
227 |
228 | # Installer logs
229 | pip-log.txt
230 | pip-delete-this-directory.txt
231 |
232 | # Unit test / coverage reports
233 | htmlcov/
234 | .tox/
235 | .nox/
236 | .coverage
237 | .coverage.*
238 | .cache
239 | nosetests.xml
240 | coverage.xml
241 | *.cover
242 | *.py,cover
243 | .hypothesis/
244 | .pytest_cache/
245 | cover/
246 |
247 | # Translations
248 | *.mo
249 | *.pot
250 |
251 | # Django stuff:
252 | *.log
253 | local_settings.py
254 | db.sqlite3
255 | db.sqlite3-journal
256 |
257 | # Flask stuff:
258 | instance/
259 | .webassets-cache
260 |
261 | # Scrapy stuff:
262 | .scrapy
263 |
264 | # Sphinx documentation
265 | docs/_build/
266 |
267 | # PyBuilder
268 | .pybuilder/
269 | target/
270 |
271 | # Jupyter Notebook
272 | .ipynb_checkpoints
273 |
274 | # IPython
275 | profile_default/
276 | ipython_config.py
277 |
278 | # pyenv
279 | # For a library or package, you might want to ignore these files since the code is
280 | # intended to run in multiple environments; otherwise, check them in:
281 | # .python-version
282 |
283 | # pipenv
284 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
285 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
286 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
287 | # install all needed dependencies.
288 | #Pipfile.lock
289 |
290 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
291 | __pypackages__/
292 |
293 | # Celery stuff
294 | celerybeat-schedule
295 | celerybeat.pid
296 |
297 | # SageMath parsed files
298 | *.sage.py
299 |
300 | # Environments
301 | .env
302 | .venv
303 | env/
304 | venv/
305 | ENV/
306 | env.bak/
307 | venv.bak/
308 |
309 | # Spyder project settings
310 | .spyderproject
311 | .spyproject
312 |
313 | # Rope project settings
314 | .ropeproject
315 |
316 | # mkdocs documentation
317 | /site
318 |
319 | # mypy
320 | .mypy_cache/
321 | .dmypy.json
322 | dmypy.json
323 |
324 | # Pyre type checker
325 | .pyre/
326 |
327 | # pytype static type analyzer
328 | .pytype/
329 |
330 | # Cython debug symbols
331 | cython_debug/
332 |
333 | # static files generated from Django application using `collectstatic`
334 | media
335 | static
336 |
337 | # Google App Engine generated folder
338 | appengine-generated/
339 |
340 | # Swap
341 | [._]*.s[a-v][a-z]
342 | !*.svg # comment out if you don't need vector files
343 | [._]*.sw[a-p]
344 | [._]s[a-rt-v][a-z]
345 | [._]ss[a-gi-z]
346 | [._]sw[a-p]
347 |
348 | # _Session
349 | Session.vim
350 | Sessionx.vim
351 |
352 | # Temporary
353 | .netrwhist
354 | *~
355 | # Auto-generated tag files
356 | tags
357 | # Persistent undo
358 | [._]*.un~
359 |
360 | # -*- mode: gitignore; -*-
361 | *~
362 | \#*\#
363 | /.emacs.desktop
364 | /.emacs.desktop.lock
365 | *.elc
366 | auto-save-list
367 | tramp
368 | .\#*
369 |
370 | # Org-mode
371 | .org-id-locations
372 | *_archive
373 |
374 | # flymake-mode
375 | *_flymake.*
376 |
377 | # eshell files
378 | /eshell/history
379 | /eshell/lastdir
380 |
381 | # elpa packages
382 | /elpa/
383 |
384 | # reftex files
385 | *.rel
386 |
387 | # AUCTeX auto folder
388 | /auto/
389 |
390 | # cask packages
391 | .cask/
392 | dist/
393 |
394 | # Flycheck
395 | flycheck_*.el
396 |
397 | # server auth directory
398 | /server/
399 |
400 | # projectiles files
401 | .projectile
402 |
403 | # directory configuration
404 | .dir-locals.el
405 |
406 | # network security
407 | /network-security.data
408 |
409 |
410 | # Project-level settings
411 | /.tgitconfig
412 |
413 | .vscode/*
414 | # !.vscode/settings.json
415 | !.vscode/tasks.json
416 | !.vscode/launch.json
417 | !.vscode/extensions.json
418 | *.code-workspace
419 |
420 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
421 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
422 |
423 | # User-specific stuff
424 | .idea/**/workspace.xml
425 | .idea/**/tasks.xml
426 | .idea/**/usage.statistics.xml
427 | .idea/**/dictionaries
428 | .idea/**/shelf
429 |
430 | # Generated files
431 | .idea/**/contentModel.xml
432 |
433 | # Sensitive or high-churn files
434 | .idea/**/dataSources/
435 | .idea/**/dataSources.ids
436 | .idea/**/dataSources.local.xml
437 | .idea/**/sqlDataSources.xml
438 | .idea/**/dynamic.xml
439 | .idea/**/uiDesigner.xml
440 | .idea/**/dbnavigator.xml
441 |
442 | # Gradle
443 | .idea/**/gradle.xml
444 | .idea/**/libraries
445 |
446 | # Gradle and Maven with auto-import
447 | # When using Gradle or Maven with auto-import, you should exclude module files,
448 | # since they will be recreated, and may cause churn. Uncomment if using
449 | # auto-import.
450 | # .idea/artifacts
451 | # .idea/compiler.xml
452 | # .idea/jarRepositories.xml
453 | # .idea/modules.xml
454 | # .idea/*.iml
455 | # .idea/modules
456 | # *.iml
457 | # *.ipr
458 |
459 | # CMake
460 | cmake-build-*/
461 |
462 | # Mongo Explorer plugin
463 | .idea/**/mongoSettings.xml
464 |
465 | # File-based project format
466 | *.iws
467 |
468 | # IntelliJ
469 | out/
470 |
471 | # mpeltonen/sbt-idea plugin
472 | .idea_modules/
473 |
474 | # JIRA plugin
475 | atlassian-ide-plugin.xml
476 |
477 | # Cursive Clojure plugin
478 | .idea/replstate.xml
479 |
480 | # Crashlytics plugin (for Android Studio and IntelliJ)
481 | com_crashlytics_export_strings.xml
482 | crashlytics.properties
483 | crashlytics-build.properties
484 | fabric.properties
485 |
486 | # Editor-based Rest Client
487 | .idea/httpRequests
488 |
489 | # Android studio 3.1+ serialized cache file
490 | .idea/caches/build_file_checksums.ser
491 |
492 | # markdown config
493 | .idea/markdown.xml
494 |
495 | .metadata
496 | bin/
497 | tmp/
498 | *.tmp
499 | *.bak
500 | *.swp
501 | *~.nib
502 | local.properties
503 | .settings/
504 | .loadpath
505 | .recommenders
506 |
507 | # External tool builders
508 | .externalToolBuilders/
509 |
510 | # Locally stored "Eclipse launch configurations"
511 | *.launch
512 |
513 | # PyDev specific (Python IDE for Eclipse)
514 | *.pydevproject
515 |
516 | # CDT-specific (C/C++ Development Tooling)
517 | .cproject
518 |
519 | # CDT- autotools
520 | .autotools
521 |
522 | # Java annotation processor (APT)
523 | .factorypath
524 |
525 | # PDT-specific (PHP Development Tools)
526 | .buildpath
527 |
528 | # sbteclipse plugin
529 | .target
530 |
531 | # Tern plugin
532 | .tern-project
533 |
534 | # TeXlipse plugin
535 | .texlipse
536 |
537 | # STS (Spring Tool Suite)
538 | .springBeans
539 |
540 | # Code Recommenders
541 | .recommenders/
542 |
543 | # Annotation Processing
544 | .apt_generated/
545 | .apt_generated_test/
546 |
547 | # Scala IDE specific (Scala & Java development for Eclipse)
548 | .cache-main
549 | .scala_dependencies
550 | .worksheet
551 |
552 | # Uncomment this line if you wish to ignore the project description file.
553 | # Typically, this file would be tracked if it contains build/dependency configurations:
554 | #.project
555 |
556 | ## Ignore Visual Studio temporary files, build results, and
557 | ## files generated by popular Visual Studio add-ons.
558 | ##
559 | ## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
560 |
561 | # User-specific files
562 | *.rsuser
563 | *.suo
564 | *.user
565 | *.userosscache
566 | *.sln.docstates
567 |
568 | # User-specific files (MonoDevelop/Xamarin Studio)
569 | *.userprefs
570 |
571 | # Mono auto generated files
572 | mono_crash.*
573 |
574 | # Build results
575 | [Dd]ebug/
576 | [Dd]ebugPublic/
577 | [Rr]elease/
578 | [Rr]eleases/
579 | x64/
580 | x86/
581 | [Ww][Ii][Nn]32/
582 | [Aa][Rr][Mm]/
583 | [Aa][Rr][Mm]64/
584 | bld/
585 | [Bb]in/
586 | [Oo]bj/
587 | [Ll]og/
588 | [Ll]ogs/
589 |
590 | # Visual Studio 2015/2017 cache/options directory
591 | .vs/
592 | # Uncomment if you have tasks that create the project's static files in wwwroot
593 | #wwwroot/
594 |
595 | # Visual Studio 2017 auto generated files
596 | Generated\ Files/
597 |
598 | # MSTest test Results
599 | [Tt]est[Rr]esult*/
600 | [Bb]uild[Ll]og.*
601 |
602 | # NUnit
603 | *.VisualState.xml
604 | TestResult.xml
605 | nunit-*.xml
606 |
607 | # Build Results of an ATL Project
608 | [Dd]ebugPS/
609 | [Rr]eleasePS/
610 | dlldata.c
611 |
612 | # Benchmark Results
613 | BenchmarkDotNet.Artifacts/
614 |
615 | # .NET Core
616 | project.lock.json
617 | project.fragment.lock.json
618 | artifacts/
619 |
620 | # ASP.NET Scaffolding
621 | ScaffoldingReadMe.txt
622 |
623 | # StyleCop
624 | StyleCopReport.xml
625 |
626 | # Files built by Visual Studio
627 | *_i.c
628 | *_p.c
629 | *_h.h
630 | *.ilk
631 | *.meta
632 | *.obj
633 | *.iobj
634 | *.pch
635 | *.pdb
636 | *.ipdb
637 | *.pgc
638 | *.pgd
639 | *.rsp
640 | *.sbr
641 | *.tlb
642 | *.tli
643 | *.tlh
644 | *.tmp
645 | *.tmp_proj
646 | *_wpftmp.csproj
647 | *.log
648 | *.vspscc
649 | *.vssscc
650 | .builds
651 | *.pidb
652 | *.svclog
653 | *.scc
654 |
655 | # Chutzpah Test files
656 | _Chutzpah*
657 |
658 | # Visual C++ cache files
659 | ipch/
660 | *.aps
661 | *.ncb
662 | *.opendb
663 | *.opensdf
664 | *.sdf
665 | *.cachefile
666 | *.VC.db
667 | *.VC.VC.opendb
668 |
669 | # Visual Studio profiler
670 | *.psess
671 | *.vsp
672 | *.vspx
673 | *.sap
674 |
675 | # Visual Studio Trace Files
676 | *.e2e
677 |
678 | # TFS 2012 Local Workspace
679 | $tf/
680 |
681 | # Guidance Automation Toolkit
682 | *.gpState
683 |
684 | # ReSharper is a .NET coding add-in
685 | _ReSharper*/
686 | *.[Rr]e[Ss]harper
687 | *.DotSettings.user
688 |
689 | # TeamCity is a build add-in
690 | _TeamCity*
691 |
692 | # DotCover is a Code Coverage Tool
693 | *.dotCover
694 |
695 | # AxoCover is a Code Coverage Tool
696 | .axoCover/*
697 | !.axoCover/settings.json
698 |
699 | # Coverlet is a free, cross platform Code Coverage Tool
700 | coverage*[.json, .xml, .info]
701 |
702 | # Visual Studio code coverage results
703 | *.coverage
704 | *.coveragexml
705 |
706 | # NCrunch
707 | _NCrunch_*
708 | .*crunch*.local.xml
709 | nCrunchTemp_*
710 |
711 | # MightyMoose
712 | *.mm.*
713 | AutoTest.Net/
714 |
715 | # Web workbench (sass)
716 | .sass-cache/
717 |
718 | # Installshield output folder
719 | [Ee]xpress/
720 |
721 | # DocProject is a documentation generator add-in
722 | DocProject/buildhelp/
723 | DocProject/Help/*.HxT
724 | DocProject/Help/*.HxC
725 | DocProject/Help/*.hhc
726 | DocProject/Help/*.hhk
727 | DocProject/Help/*.hhp
728 | DocProject/Help/Html2
729 | DocProject/Help/html
730 |
731 | # Click-Once directory
732 | publish/
733 |
734 | # Publish Web Output
735 | *.[Pp]ublish.xml
736 | *.azurePubxml
737 | # Note: Comment the next line if you want to checkin your web deploy settings,
738 | # but database connection strings (with potential passwords) will be unencrypted
739 | *.pubxml
740 | *.publishproj
741 |
742 | # Microsoft Azure Web App publish settings. Comment the next line if you want to
743 | # checkin your Azure Web App publish settings, but sensitive information contained
744 | # in these scripts will be unencrypted
745 | PublishScripts/
746 |
747 | # NuGet Packages
748 | *.nupkg
749 | # NuGet Symbol Packages
750 | *.snupkg
751 | # The packages folder can be ignored because of Package Restore
752 | **/[Pp]ackages/*
753 | # except build/, which is used as an MSBuild target.
754 | !**/[Pp]ackages/build/
755 | # Uncomment if necessary however generally it will be regenerated when needed
756 | #!**/[Pp]ackages/repositories.config
757 | # NuGet v3's project.json files produces more ignorable files
758 | *.nuget.props
759 | *.nuget.targets
760 |
761 | # Microsoft Azure Build Output
762 | csx/
763 | *.build.csdef
764 |
765 | # Microsoft Azure Emulator
766 | ecf/
767 | rcf/
768 |
769 | # Windows Store app package directories and files
770 | AppPackages/
771 | BundleArtifacts/
772 | Package.StoreAssociation.xml
773 | _pkginfo.txt
774 | *.appx
775 | *.appxbundle
776 | *.appxupload
777 |
778 | # Visual Studio cache files
779 | # files ending in .cache can be ignored
780 | *.[Cc]ache
781 | # but keep track of directories ending in .cache
782 | !?*.[Cc]ache/
783 |
784 | # Others
785 | ClientBin/
786 | ~$*
787 | *~
788 | *.dbmdl
789 | *.dbproj.schemaview
790 | *.jfm
791 | *.pfx
792 | *.publishsettings
793 | orleans.codegen.cs
794 |
795 | # Including strong name files can present a security risk
796 | # (https://github.com/github/gitignore/pull/2483#issue-259490424)
797 | #*.snk
798 |
799 | # Since there are multiple workflows, uncomment next line to ignore bower_components
800 | # (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
801 | #bower_components/
802 |
803 | # RIA/Silverlight projects
804 | Generated_Code/
805 |
806 | # Backup & report files from converting an old project file
807 | # to a newer Visual Studio version. Backup files are not needed,
808 | # because we have git ;-)
809 | _UpgradeReport_Files/
810 | Backup*/
811 | UpgradeLog*.XML
812 | UpgradeLog*.htm
813 | ServiceFabricBackup/
814 | *.rptproj.bak
815 |
816 | # SQL Server files
817 | *.mdf
818 | *.ldf
819 | *.ndf
820 |
821 | # Business Intelligence projects
822 | *.rdl.data
823 | *.bim.layout
824 | *.bim_*.settings
825 | *.rptproj.rsuser
826 | *- [Bb]ackup.rdl
827 | *- [Bb]ackup ([0-9]).rdl
828 | *- [Bb]ackup ([0-9][0-9]).rdl
829 |
830 | # Microsoft Fakes
831 | FakesAssemblies/
832 |
833 | # GhostDoc plugin setting file
834 | *.GhostDoc.xml
835 |
836 | # Node.js Tools for Visual Studio
837 | .ntvs_analysis.dat
838 | node_modules/
839 |
840 | # Visual Studio 6 build log
841 | *.plg
842 |
843 | # Visual Studio 6 workspace options file
844 | *.opt
845 |
846 | # Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
847 | *.vbw
848 |
849 | # Visual Studio LightSwitch build output
850 | **/*.HTMLClient/GeneratedArtifacts
851 | **/*.DesktopClient/GeneratedArtifacts
852 | **/*.DesktopClient/ModelManifest.xml
853 | **/*.Server/GeneratedArtifacts
854 | **/*.Server/ModelManifest.xml
855 | _Pvt_Extensions
856 |
857 | # Paket dependency manager
858 | .paket/paket.exe
859 | paket-files/
860 |
861 | # FAKE - F# Make
862 | .fake/
863 |
864 | # CodeRush personal settings
865 | .cr/personal
866 |
867 | # Python Tools for Visual Studio (PTVS)
868 | __pycache__/
869 | *.pyc
870 |
871 | # Cake - Uncomment if you are using it
872 | # tools/**
873 | # !tools/packages.config
874 |
875 | # Tabs Studio
876 | *.tss
877 |
878 | # Telerik's JustMock configuration file
879 | *.jmconfig
880 |
881 | # BizTalk build output
882 | *.btp.cs
883 | *.btm.cs
884 | *.odx.cs
885 | *.xsd.cs
886 |
887 | # OpenCover UI analysis results
888 | OpenCover/
889 |
890 | # Azure Stream Analytics local run output
891 | ASALocalRun/
892 |
893 | # MSBuild Binary and Structured Log
894 | *.binlog
895 |
896 | # NVidia Nsight GPU debugger configuration file
897 | *.nvuser
898 |
899 | # MFractors (Xamarin productivity tool) working folder
900 | .mfractor/
901 |
902 | # Local History for Visual Studio
903 | .localhistory/
904 |
905 | # BeatPulse healthcheck temp database
906 | healthchecksdb
907 |
908 | # Backup folder for Package Reference Convert tool in Visual Studio 2017
909 | MigrationBackup/
910 |
911 | # Ionide (cross platform F# VS Code tools) working folder
912 | .ionide/
913 |
914 | *.tmp
915 |
916 | # Word temporary
917 | ~$*.doc*
918 |
919 | # Word Auto Backup File
920 | Backup of *.doc*
921 |
922 | # Excel temporary
923 | ~$*.xls*
924 |
925 | # Excel Backup File
926 | *.xlk
927 |
928 | # PowerPoint temporary
929 | ~$*.ppt*
930 |
931 | # Visio autosave temporary files
932 | *.~vsd*
933 |
934 | # LibreOffice locks
935 | .~lock.*#
936 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # 默认忽略的文件
2 | /shelf/
3 | /workspace.xml
4 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.idea/sqlalchemy-dlock.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.markdownlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "code-block-style": false,
3 | "line-length": false
4 | }
5 |
--------------------------------------------------------------------------------
/.mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | files = src/**/*.py
3 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v5.0.0
4 | hooks:
5 | - id: check-case-conflict
6 | - id: check-added-large-files
7 | - id: check-symlinks
8 | - id: detect-private-key
9 | - id: fix-byte-order-marker
10 | - id: mixed-line-ending
11 | - id: check-merge-conflict
12 | - id: end-of-file-fixer
13 | - id: trailing-whitespace
14 | args: [--markdown-linebreak-ext=md]
15 | - id: check-yaml
16 | - id: check-toml
17 | - id: check-ast
18 | - id: check-builtin-literals
19 | - id: check-docstring-first
20 |
21 | - repo: https://github.com/astral-sh/ruff-pre-commit
22 | rev: v0.9.2
23 | hooks:
24 | # Run the linter.
25 | - id: ruff
26 | types_or: [python, pyi, jupyter]
27 | args: [--fix]
28 | # Run the formatter.
29 | - id: ruff-format
30 | types_or: [python, pyi, jupyter]
31 |
32 | - repo: https://github.com/pre-commit/mirrors-mypy
33 | rev: v1.14.1
34 | hooks:
35 | - id: mypy
36 | args: [--config-file, .mypy.ini, --ignore-missing-imports]
37 |
38 | - repo: https://github.com/python-jsonschema/check-jsonschema
39 | rev: "0.31.0"
40 | hooks:
41 | - id: check-github-workflows
42 | - id: check-readthedocs
43 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | build:
9 | os: ubuntu-22.04
10 | tools:
11 | python: "3"
12 |
13 | python:
14 | install:
15 | - requirements: docs/requirements.txt
16 | - method: pip
17 | path: .
18 | extra_requirements:
19 | - asyncio
20 |
--------------------------------------------------------------------------------
/.ruff.toml:
--------------------------------------------------------------------------------
1 | src = ["src"]
2 | extend-exclude = ["docs", "scripts", "tests"]
3 | line-length = 128
4 |
5 | [lint]
6 | extend-select = ["I"]
7 |
8 | [lint.mccabe]
9 | # Unlike Flake8, default to a complexity level of 10.
10 | max-complexity = 10
11 |
12 | # Ignore `F401`(imported but unused), `F403`(import *` used), `E402`(import violations) in all `__init__.py` files
13 | [lint.per-file-ignores]
14 | "__init__.py" = ["F401"]
15 |
16 | [format]
17 | docstring-code-format = true
18 |
--------------------------------------------------------------------------------
/.vscode/tasks.json:
--------------------------------------------------------------------------------
1 | {
2 | // See https://go.microsoft.com/fwlink/?LinkId=733558
3 | // for the documentation about the tasks.json format
4 | "version": "2.0.0",
5 | "tasks": [
6 | {
7 | "label": "python build",
8 | "type": "shell",
9 | "command": "${command:python.interpreterPath} -m build",
10 | "group": {
11 | "kind": "build",
12 | "isDefault": true
13 | },
14 | "runOptions": {
15 | "instanceLimit": 1
16 | },
17 | "problemMatcher": []
18 | },
19 | {
20 | "label": "docs: build",
21 | "type": "shell",
22 | "command": "${command:python.interpreterPath} -m sphinx -j auto -d _build/doctrees . _build/html",
23 | "runOptions": {
24 | "instanceLimit": 1
25 | },
26 | "options": {
27 | "cwd": "${workspaceFolder}/docs"
28 | },
29 | "problemMatcher": []
30 | },
31 | {
32 | "label": "docs: serve",
33 | "type": "shell",
34 | "command": "${command:python.interpreterPath} -m http.server _build/html",
35 | "runOptions": {
36 | "instanceLimit": 1
37 | },
38 | "options": {
39 | "cwd": "${workspaceFolder}/docs"
40 | },
41 | "problemMatcher": []
42 | }
43 | ]
44 | }
45 |
--------------------------------------------------------------------------------
/AUTHORS.md:
--------------------------------------------------------------------------------
1 | # AUTHORS
2 |
3 | * Liu Xue Yan ()
4 |
5 | [](mailto:liu_xue_yan@foxmail.com)
6 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # CHANGELOG
2 |
3 | ## v0.6.1.post2
4 |
5 | > 📅 **Date** 2024-11-29
6 |
7 | - 🐛 Bug-fix:
8 | - Issue #4: PostgreSQL xact lock in context manager produces warning #4
9 | - ✅ Changes:
10 | - `typing-extensions` required for Python earlier than 3.12
11 | - 🖊️ Modifications:
12 | - Add some `override` decorators
13 | - 🎯 CI:
14 | - update pre-commit hooks
15 |
16 | ## v0.6.1
17 |
18 | > 📅 **Date** 2024-4-6
19 |
20 | - ✅ Changes:
21 | - `typing-extensions` required for Python earlier than 3.10
22 |
23 | ## v0.6
24 |
25 | > 📅 **Date** 2024-3-28
26 |
27 | - ❎ Breaking Changes:
28 | - Remove `level` arguments of PostgreSQL lock class' constructor.
29 | `xact` and `shared` arguments were added.
30 | - 🆕 New Features:
31 | - support `transaction` and `shared` advisory lock for PostgreSQL.
32 | - 🐛 Bug fix:
33 | - PostgreSQL transaction level advisory locks are held until the current transaction ends.
34 | Manual release for that is disabled, and a warning message will be printed.
35 | - 🕐 Optimize
36 | - Reduce duplicated codes
37 | - Better unit tests
38 |
39 | ## v0.5.3
40 |
41 | > 📅 **Date** 2024-3-15
42 |
43 | ## v0.5
44 |
45 | Date: 2023-12-06
46 |
47 | - New:
48 | - `contextual_timeout` parameter for “with” statement
49 | - Support Python 3.12
50 |
51 | ## v0.4
52 |
53 | Date: 2023-06-17
54 |
55 | - Remove:
56 | - remove `acquired` property, it's alias of `locked`
57 | - remove setter of `locked` property
58 |
59 | - Optimize:
60 | - re-arrange package's structure
61 | - Many optimizations
62 |
63 | - CI/Test:
64 | - GitHub action: Python 3.8~3.11 x SQLAlchemy 1.x/2.x matrix testing
65 | - Local compose: Python 3.7~3.11 x SQLAlchemy 1.x/2.x matrix testing
66 |
67 | - Doc: Update to Sphinx 7.x, and Furo theme
68 |
69 | ## v0.3.1
70 |
71 | Date: 2023-06-13
72 |
73 | - A hotfix for project's dependencies setup error.
74 |
75 | ## v0.3
76 |
77 | Date: 2023-06-13
78 |
79 | - Remove:
80 | - Python 3.6 support
81 |
82 | - Tests:
83 | - New docker compose based tests, from python 3.7 to 3.11, both SQLAlchemy 1.x and 2.x
84 |
85 | - Docs:
86 | - Update to newer Sphinx docs
87 |
88 | - Build:
89 | - Move all project meta to pyproject.toml, remove setup.cfg and setup.py
90 |
91 | ## v0.2.1
92 |
93 | Date: 2023-02-25
94 |
95 | - New:
96 | - support SQLAlchemy 2.0
97 |
98 | ## v0.2
99 |
100 | Date: 2021-03-23
101 |
102 | First v0.2.x version released.
103 |
104 | ## v0.2b2/b3
105 |
106 | Date: 2021-03-23
107 |
108 | - Add:
109 | - More unit tests
110 | - Optimized CI
111 |
112 | ## v0.2b1
113 |
114 | Date: 2021-03-16
115 |
116 | - Add:
117 |
118 | - New unit tests
119 | - CI by GitHub workflows
120 |
121 | ## v0.2a3
122 |
123 | Date: 2021-03-14
124 |
125 | - Change:
126 |
127 | - Drop Python 3.5 support.
128 | - Remove SQLAlchemy version requires earlier than 1.4 in setup, it's not supported, actually.
129 | - Adjust PostgreSQL lock's constructor arguments order
130 |
131 | - Add:
132 |
133 | - More test cases, and add test/deploy workflow in GitHub actions.
134 | - Add docker-compose test scripts
135 |
136 | ## v0.2a2
137 |
138 | Date: 2021-03-09
139 |
140 | - Change:
141 |
142 | - Rename a lot of function/class:
143 |
144 | - `sadlock` -> `create_sadlock`
145 | - `asyncio.sadlock` -> `asyncio.create_async_sadlock`
146 |
147 | and some other ...
148 |
149 | ## v0.2a1
150 |
151 | Date: 2021-03-08
152 |
153 | - New:
154 |
155 | - Asynchronous IO Support by:
156 |
157 | - [aiomysql](https://github.com/aio-libs/aiomysql) for MySQL
158 |
159 | Connection URL is like: `"mysql+aiomysql://user:password@host:3306/schema?charset=utf8mb4"`
160 |
161 | - [asyncpg](https://github.com/MagicStack/asyncpg) for PostgreSQL
162 |
163 | Connection URL is like: `"PostgreSQL+asyncpg://user:password@host:5432/db"`
164 |
165 | Read for details
166 |
167 | ## v0.1.2
168 |
169 | Date: 2021-01-26
170 |
171 | Still an early version, not for production.
172 |
173 | - Changes:
174 | - Arguments and it's default value of `acquire` now similar to stdlib's `multiprossing.Lock`, instead of `Threading.Lock`
175 | - MySQL lock now accepts float-point value as `timeout`
176 | - Adds
177 | - Several new test cases
178 | - Other
179 | - Many other small adjustment
180 |
181 | ## v0.1.1
182 |
183 | - A very early version, maybe not stable enough.
184 | - Replace `black2b` with crc64-iso in PostgreSQL key convert function
185 | - Only named arguments as extra parameters allowed in Lock's implementation class
186 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2020, liu xue yan
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | exclude .*
2 | exclude *.docker-compose.yml
3 | exclude *.requirements.txt
4 | exclude codecov.yml
5 | exclude coverage.*
6 |
7 | prune .*
8 | prune docs
9 | prune scripts
10 | prune tests
11 | prune htmlcov
12 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # sqlalchemy-dlock
2 |
3 | [](https://github.com/tanbro/sqlalchemy-dlock/actions/workflows/python-package.yml)
4 | [](https://pypi.org/project/sqlalchemy-dlock/)
5 | [](https://sqlalchemy-dlock.readthedocs.io/en/latest/)
6 | [](https://codecov.io/gh/tanbro/sqlalchemy-dlock)
7 |
8 | `sqlalchemy-dlock` is a distributed-lock library based on Database and [SQLAlchemy][].
9 |
10 | It currently supports below locks:
11 |
12 | | Database | Lock |
13 | |------------|-----------------------------------------------------------------------------------------------|
14 | | MySQL | [named lock](https://dev.mysql.com/doc/refman/en/locking-functions.html) |
15 | | PostgreSQL | [advisory lock](https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS) |
16 |
17 | ## Install
18 |
19 | ```bash
20 | pip install sqlalchemy-dlock
21 | ```
22 |
23 | ## Usage
24 |
25 | - Work with [SQLAlchemy][] [`Connection`](https://docs.sqlalchemy.org/20/core/connections.html):
26 |
27 | ```python
28 | from sqlalchemy import create_engine
29 | from sqlalchemy_dlock import create_sadlock
30 |
31 | key = 'user/001'
32 |
33 | engine = create_engine('postgresql://scott:tiger@127.0.0.1/')
34 | conn = engine.connect()
35 |
36 | # Create the D-Lock on the connection
37 | lock = create_sadlock(conn, key)
38 |
39 | # it's not lock when constructed
40 | assert not lock.locked
41 |
42 | # lock
43 | lock.acquire()
44 | assert lock.locked
45 |
46 | # un-lock
47 | lock.release()
48 | assert not lock.locked
49 | ```
50 |
51 | - `with` statement
52 |
53 | ```python
54 | from contextlib import closing
55 |
56 | from sqlalchemy import create_engine
57 | from sqlalchemy_dlock import create_sadlock
58 |
59 | key = 'user/001'
60 |
61 | engine = create_engine('postgresql://scott:tiger@127.0.0.1/')
62 | with engine.connect() as conn:
63 |
64 | # Create the D-Lock on the connection
65 | with create_sadlock(conn, key) as lock:
66 | # It's locked
67 | assert lock.locked
68 |
69 | # Auto un-locked
70 | assert not lock.locked
71 |
72 | # If do not want to be locked in `with`, a `closing` wrapper may help
73 | with closing(create_sadlock(conn, key)) as lock2:
74 | # It's NOT locked here !!!
75 | assert not lock2.locked
76 | # lock it now:
77 | lock2.acquire()
78 | assert lock2.locked
79 |
80 | # Auto un-locked
81 | assert not lock2.locked
82 | ```
83 |
84 | - Work with [SQLAlchemy][] [`ORM` `Session`](https://docs.sqlalchemy.org/en/20/orm/session.html):
85 |
86 | ```python
87 | from sqlalchemy import create_engine
88 | from sqlalchemy.orm import sessionmaker
89 | from sqlalchemy_dlock import create_sadlock
90 |
91 | key = 'user/001'
92 |
93 | engine = create_engine('postgresql://scott:tiger@127.0.0.1/')
94 | Session = sessionmaker(bind=engine)
95 |
96 | with Session() as session:
97 | with create_sadlock(session, key) as lock:
98 | assert lock.locked
99 | assert not lock.locked
100 | ```
101 |
102 | - Asynchronous I/O Support
103 |
104 | > 💡 **TIP**
105 | >
106 | > - [SQLAlchemy][] `1.x`'s asynchronous I/O:
107 | > - [SQLAlchemy][] `2.x`'s asynchronous I/O:
108 |
109 | ```python
110 | from sqlalchemy.ext.asyncio import create_async_engine
111 | from sqlalchemy_dlock import create_async_sadlock
112 |
113 | key = 'user/001'
114 |
115 | engine = create_async_engine('postgresql+asyncpg://scott:tiger@127.0.0.1/')
116 |
117 | async with engine.connect() as conn:
118 | async with create_async_sadlock(conn, key) as lock:
119 | assert lock.locked
120 | await lock.release()
121 | assert not lock.locked
122 | await lock.acquire()
123 | assert not lock.locked
124 | ```
125 |
126 | > ℹ️ **NOTE** \
127 | > [aiomysql][], [asyncpg][] and [psycopg][] are tested asynchronous drivers.
128 | >
129 | > We can install it with asynchronous DB libraries:
130 | >
131 | > ```bash
132 | > pip install SQLAlchemy[asyncio] aiomysql sqlalchemy-dlock
133 | > ```
134 | >
135 | > or
136 | >
137 | > ```bash
138 | > pip install SQLAlchemy[asyncio] asyncpg sqlalchemy-dlock
139 | > ```
140 |
141 | ## Test
142 |
143 | Following drivers are tested:
144 |
145 | - MySQL:
146 | - [mysqlclient][] (synchronous)
147 | - [pymysql][] (synchronous)
148 | - [aiomysql][] (asynchronous)
149 | - Postgres:
150 | - [psycopg2][] (synchronous)
151 | - [asyncpg][] (asynchronous)
152 | - [psycopg][] (synchronous and asynchronous)
153 |
154 | You can run unit-tests
155 |
156 | - on local environment:
157 |
158 | 1. Install the project in editable mode with `asyncio` optional dependencies, and libraries/drivers needed in test. A virtual environment ([venv][]) is strongly advised:
159 |
160 | ```bash
161 | pip install -e .[asyncio] -r tests/requirements.txt
162 | ```
163 |
164 | 2. start up mysql and postgresql service
165 |
166 | There is a docker [compose][] file `db.docker-compose.yml` in project's top directory,
167 | which can be used to run mysql and postgresql develop environment conveniently:
168 |
169 | ```bash
170 | docker compose -f db.docker-compose.yml up
171 | ```
172 |
173 | 3. set environment variables `TEST_URLS` and `TEST_ASYNC_URLS` for sync and async database connection url.
174 | Multiple connections separated by space.
175 |
176 | eg: (following values are also the defaults, and can be omitted)
177 |
178 | ```ini
179 | TEST_URLS=mysql://test:test@127.0.0.1/test postgresql://postgres:test@127.0.0.1/
180 | TEST_ASYNC_URLS=mysql+aiomysql://test:test@127.0.0.1/test postgresql+asyncpg://postgres:test@127.0.0.1/
181 | ```
182 |
183 | > ℹ️ **NOTE** \
184 | > The test cases would load environment variables from dot-env file `tests/.env`.
185 |
186 | 4. run unit-test
187 |
188 | ```bash
189 | python -m unittest
190 | ```
191 |
192 | - or on docker [compose][]:
193 |
194 | `tests/docker-compose.yml` defines a Python and [SQLAlchemy][] version matrix -- it combines Python `3.8` to `3.12` and [SQLAlchemy][] `v1`/`v2` for test cases. We can run it by:
195 |
196 | ```bash
197 | cd tests
198 | docker compose up --abort-on-container-exit
199 | ```
200 |
201 | [SQLAlchemy]: https://www.sqlalchemy.org/ "The Python SQL Toolkit and Object Relational Mapper"
202 | [venv]: https://docs.python.org/library/venv.html "The venv module supports creating lightweight “virtual environments”, each with their own independent set of Python packages installed in their site directories. "
203 | [mysqlclient]: https://pypi.org/project/mysqlclient/ "Python interface to MySQL"
204 | [psycopg2]: https://pypi.org/project/psycopg2/ "PostgreSQL database adapter for Python"
205 | [psycopg]: https://pypi.org/project/psycopg/ "Psycopg 3 is a modern implementation of a PostgreSQL adapter for Python."
206 | [aiomysql]: https://pypi.org/project/aiomysql/ "aiomysql is a “driver” for accessing a MySQL database from the asyncio (PEP-3156/tulip) framework."
207 | [asyncpg]: https://pypi.org/project/asyncpg/ "asyncpg is a database interface library designed specifically for PostgreSQL and Python/asyncio. "
208 | [pymysql]: https://pypi.org/project/pymysql/ "Pure Python MySQL Driver"
209 | [compose]: https://docs.docker.com/compose/ "Compose is a tool for defining and running multi-container Docker applications."
210 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | status:
3 | project:
4 | default:
5 | target: auto # auto compares coverage to the previous base commit
6 |
--------------------------------------------------------------------------------
/docker-compose.database.yml:
--------------------------------------------------------------------------------
1 | # run mysql and posgres database service on local docker, for develop/test
2 |
3 | name: sqlalchemy-dlock-db
4 |
5 | services:
6 | mysql:
7 | image: mysql
8 | ports:
9 | - "127.0.0.1:3306:3306"
10 | environment:
11 | MYSQL_RANDOM_ROOT_PASSWORD: "1"
12 | MYSQL_DATABASE: test
13 | MYSQL_USER: test
14 | MYSQL_PASSWORD: test
15 |
16 | postgres:
17 | image: postgres
18 | ports:
19 | - "127.0.0.1:5432:5432"
20 | environment:
21 | POSTGRES_PASSWORD: test
22 |
--------------------------------------------------------------------------------
/docs/AUTHORS.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../AUTHORS.md
2 | :parser: myst_parser.sphinx_
3 |
--------------------------------------------------------------------------------
/docs/CHANGELOG.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../CHANGELOG.md
2 | :parser: myst_parser.sphinx_
3 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/README.rst:
--------------------------------------------------------------------------------
1 | README
2 | ======
3 |
4 | .. include:: ../README.md
5 | :parser: myst_parser.sphinx_
6 |
7 | How to Build the Documentation
8 | ------------------------------
9 |
10 | #. The documentation is built using `Sphinx `_.
11 | We need to install the package and its testing requirements:
12 |
13 | .. code:: sh
14 |
15 | pip install -e . -r docs/requirements.txt
16 |
17 | #. Generate API documentation.
18 | If the source tree has changed, you may clear the `docs/apidocs` directory and regenerate the API documentation:
19 |
20 | .. code:: sh
21 |
22 | sphinx-apidoc -o docs/apidocs -f -e -H APIs src
23 |
24 | #. Build HTML documentation:
25 |
26 | * Using the Make tool (for Unix/Linux/macOS):
27 |
28 | .. code:: sh
29 |
30 | make -C docs html
31 |
32 | * On Windows:
33 |
34 | .. code:: bat
35 |
36 | docs\make html
37 |
38 | The built static website is located at ``docs/_build/html``. You can serve it with a simple HTTP server:
39 |
40 | .. code:: sh
41 |
42 | python -m http.server --directory docs/_build/html
43 |
44 | Then open http://localhost:8000/ in a web browser.
45 |
46 | .. tip::
47 | Try another port if ``8000`` is already in use.
48 | For example, to serve on port ``8080``:
49 |
50 | .. code:: sh
51 |
52 | python -m http.server --directory docs/_build/html 8080
53 |
54 | .. seealso:: Python ``stdlib``'s :mod:`http.server`
55 |
--------------------------------------------------------------------------------
/docs/_static/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tanbro/sqlalchemy-dlock/ce5d2173c278d593cd4b5338bd24dd47478eb69a/docs/_static/.gitkeep
--------------------------------------------------------------------------------
/docs/_templates/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tanbro/sqlalchemy-dlock/ce5d2173c278d593cd4b5338bd24dd47478eb69a/docs/_templates/.gitkeep
--------------------------------------------------------------------------------
/docs/apidocs/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore
3 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 |
6 | import importlib.metadata
7 |
8 | # -- Project information -----------------------------------------------------
9 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
10 |
11 | project = "sqlalchemy-dlock"
12 | copyright = "2023-2024, liu xue yan"
13 | author = "liu xue yan"
14 |
15 | # full version
16 | version = importlib.metadata.version(project)
17 | # major/minor version
18 | release = ".".join(version.split(".")[:2])
19 |
20 | # -- General configuration ---------------------------------------------------
21 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
22 |
23 | extensions = [
24 | "myst_parser",
25 | "sphinx.ext.autodoc",
26 | "sphinx.ext.mathjax",
27 | "sphinx.ext.napoleon",
28 | "sphinx.ext.githubpages",
29 | "sphinx.ext.intersphinx",
30 | "sphinx.ext.viewcode",
31 | "sphinx_tippy",
32 | "sphinx_inline_tabs",
33 | "sphinx_copybutton",
34 | "versionwarning.extension",
35 | ]
36 | source_suffix = {
37 | ".rst": "restructuredtext",
38 | ".md": "markdown",
39 | }
40 |
41 | templates_path = ["_templates"]
42 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
43 |
44 | # -- Options for HTML output -------------------------------------------------
45 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
46 | html_static_path = ["_static"]
47 | html_theme = "furo"
48 | html_theme_options = {
49 | "source_repository": "https://github.com/tanbro/sqlalchemy-dlock",
50 | "source_branch": "main",
51 | "source_directory": "docs/",
52 | "top_of_page_button": "edit",
53 | }
54 |
55 | # -- Options for autodoc ----------------------------------------------------
56 | # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#configuration
57 | # autodoc_mock_imports = []
58 |
59 | # Automatically extract typehints when specified and place them in
60 | # descriptions of the relevant function/method.
61 | autodoc_typehints = "both"
62 |
63 | # Don't show class signature with the class' name.
64 | # autodoc_class_signature = "separated"
65 |
66 | autoclass_content = "both"
67 | autodoc_member_order = "bysource"
68 |
69 | # -- Options for myst_parser extension ---------------------------------------
70 |
71 | myst_enable_extensions = [
72 | "amsmath",
73 | "attrs_inline",
74 | "colon_fence",
75 | "deflist",
76 | "dollarmath",
77 | "fieldlist",
78 | "html_image",
79 | "replacements",
80 | "smartquotes",
81 | "strikethrough",
82 | "substitution",
83 | "tasklist",
84 | ]
85 |
86 | # -- Options for intersphinx extension ---------------------------------------
87 |
88 | # Example configuration for intersphinx: refer to the Python standard library.
89 | intersphinx_mapping = {
90 | "python": ("https://docs.python.org/", None),
91 | "sphinx": ("https://docs.sqlalchemy.org/", None),
92 | }
93 |
94 | # -- Options for Napoleon settings ---------------------------------------
95 | napoleon_use_admonition_for_examples = True
96 | napoleon_use_admonition_for_notes = True
97 | napoleon_use_admonition_for_references = True
98 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. sqlalchemy-dlock documentation master file, created by
2 | sphinx-quickstart on Mon Jun 12 16:24:20 2023.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | ================
7 | sqlalchemy-dlock
8 | ================
9 |
10 | .. hlist::
11 | :columns: 2
12 |
13 | *
14 | ========== ========= =========
15 | Release Version Built at
16 | ========== ========= =========
17 | |release| |version| |today|
18 | ========== ========= =========
19 |
20 | *
21 | .. image:: https://github.com/tanbro/sqlalchemy-dlock/actions/workflows/python-package.yml/badge.svg
22 | :alt: Python package
23 | :target: https://github.com/tanbro/sqlalchemy-dlock/actions/workflows/python-package.yml
24 |
25 | .. image:: https://img.shields.io/pypi/v/sqlalchemy-dlock
26 | :alt: PyPI
27 | :target: https://pypi.org/project/sqlalchemy-dlock/
28 |
29 | .. image:: https://readthedocs.org/projects/sqlalchemy-dlock/badge/?version=latest
30 | :alt: Documentation Status
31 | :target: https://sqlalchemy-dlock.readthedocs.io/en/latest/
32 |
33 | .. image:: https://codecov.io/gh/tanbro/sqlalchemy-dlock/branch/main/graph/badge.svg
34 | :alt: CodeCov
35 | :target: https://codecov.io/gh/tanbro/sqlalchemy-dlock
36 |
37 | .. rubric::
38 | `sqlalchemy-dlock` is a distributed-lock library based on Database and `SQLAlchemy `_.
39 |
40 | --------
41 | Contents
42 | --------
43 |
44 | .. toctree::
45 | :caption: Documentation
46 | :titlesonly:
47 | :maxdepth: 1
48 |
49 | README
50 | AUTHORS
51 | CHANGELOG
52 |
53 | .. toctree::
54 | :caption: API Reference
55 | :titlesonly:
56 | :maxdepth: 1
57 |
58 | apidocs/modules
59 |
60 | ------------------
61 | Indices and tables
62 | ------------------
63 |
64 | * :ref:`genindex`
65 | * :ref:`modindex`
66 |
67 | .. furo has no search page
68 | .. * :ref:`search`
69 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | %SPHINXBUILD% >NUL 2>NUL
14 | if errorlevel 9009 (
15 | echo.
16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17 | echo.installed, then set the SPHINXBUILD environment variable to point
18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
19 | echo.may add the Sphinx directory to PATH.
20 | echo.
21 | echo.If you don't have Sphinx installed, grab it from
22 | echo.https://www.sphinx-doc.org/
23 | exit /b 1
24 | )
25 |
26 | if "%1" == "" goto help
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | importlib_metadata;python_version<"3.8"
2 |
3 | Sphinx>=7.2.6
4 | myst-parser
5 |
6 | furo
7 |
8 | linkify-it-py
9 | # sphinx-autodoc2
10 | sphinx-copybutton
11 | sphinx-inline-tabs
12 | sphinx_tippy
13 | sphinx-version-warning
14 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | build-backend = "setuptools.build_meta"
3 | requires = ["setuptools>=64", "setuptools-scm>=8"]
4 |
5 | [project]
6 | name = "sqlalchemy-dlock"
7 | readme = { file = 'README.md', content-type = 'text/markdown' }
8 | authors = [{ name = "liu xue yan", email = "liu_xue_yan@foxmail.com" }]
9 | description = "A distributed lock implementation based on SQLAlchemy"
10 | keywords = [
11 | "SQLAlchemy",
12 | "lock",
13 | "distributed",
14 | "distributed lock",
15 | "SQL",
16 | "database",
17 | "DBMS",
18 | "RDBMS",
19 | ]
20 | license = { text = "BSD-3-Clause" }
21 | classifiers = [
22 | "Development Status :: 5 - Production/Stable",
23 | "License :: OSI Approved :: BSD License",
24 | "Topic :: Database :: Front-Ends",
25 | "Intended Audience :: Developers",
26 | "Programming Language :: Python",
27 | ]
28 | dynamic = ["version"]
29 |
30 | # requires python version
31 | requires-python = ">=3.8"
32 | # requires
33 | dependencies = [
34 | "SQLAlchemy>=1.4,<3.0",
35 | "typing-extensions; python_version<'3.12'",
36 | ]
37 | # extra requires
38 | [project.optional-dependencies]
39 | # SQLAlchemy with asyncio, no engines
40 | asyncio = ["SQLAlchemy[asyncio]>=1.4,<3.0"]
41 | # MySQL
42 | mysqlclient = ["mysqlclient"]
43 | pymysql = ["pymysql"]
44 | # MySQL asyncio
45 | aiomysql = ["SQLAlchemy[asyncio]>=1.4,<3.0", "aiomysql"]
46 | # Postgres
47 | # psycopg2: sync
48 | psycopg2 = ["psycopg2>=2.8"] # psycopg2 compiling needed when install
49 | psycopg2-binary = [
50 | "psycopg2-binary>=2.8",
51 | ] # psycopg2 with pre-compiled C library
52 | # psycopg3: both sync and asyncio
53 | psycopg3 = ["SQLAlchemy>=2.0,<3.0", "psycopg"] # psycopg3 dynamik link to libpq
54 | psycopg3-binary = [
55 | "SQLAlchemy>=2.0,<3.0",
56 | "psycopg[binary]",
57 | ] # psycopg3 with pre-compiled C library
58 | psycopg3-c = [
59 | "SQLAlchemy>=2.0,<3.0",
60 | "psycopg[c]",
61 | ] # psycopg3 compiling needed when install
62 | # Postgres asyncio
63 | asyncpg = ["SQLAlchemy[asyncio]>=1.4,<3.0", "asyncpg"]
64 |
65 | # Project links
66 | [project.urls]
67 | homepage = "https://github.com/tanbro/sqlalchemy-dlock"
68 | documentation = "https://sqlalchemy-dlock.readthedocs.io/"
69 | repository = "https://github.com/tanbro/sqlalchemy-dlock.git"
70 |
71 | [tool.setuptools.packages.find]
72 | where = ["src"]
73 |
74 | [tool.setuptools_scm]
75 | write_to = "src/sqlalchemy_dlock/_version.py"
76 |
77 | [tool.setuptools.package-data]
78 | sqlalchemy_dlock = ["sqlalchemy_dlock/py.typed"]
79 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # The file defines requirements for the project's local development.
2 | # It's NOT package install dependencies.
3 |
4 | -e .[asyncio]
5 |
6 | -r docs/requirements.txt
7 | -r tests/requirements.txt
8 |
9 | pre-commit
10 |
--------------------------------------------------------------------------------
/scripts/run-test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Python unittest running script for the docker-compose tests.
4 | # DO NOT run it alone.
5 |
6 | set -eu
7 |
8 | export TEST_URLS="mysql://$MYSQL_USER:$MYSQL_PASSWORD@mysql/$MYSQL_DATABASE postgresql://postgres:$POSTGRES_PASSWORD@postgres/"
9 | export TEST_ASYNC_URLS="mysql+aiomysql://$MYSQL_USER:$MYSQL_PASSWORD@mysql/$MYSQL_DATABASE postgresql+asyncpg://postgres:$POSTGRES_PASSWORD@postgres/"
10 |
11 | /bin/bash scripts/wait-for-postgres.sh postgres $POSTGRES_PASSWORD
12 | /bin/bash scripts/wait-for-mysql.sh mysql $MYSQL_DATABASE $MYSQL_USER $MYSQL_PASSWORD
13 |
14 | export SETUPTOOLS_SCM_PRETEND_VERSION=0
15 | export PIP_DISABLE_PIP_VERSION_CHECK=1
16 | export PIP_ROOT_USER_ACTION=ignore
17 | export PIP_NO_WARN_SCRIPT_LOCATION=1
18 |
19 | PYTHON_LIST=(python3.8 python3.9 python3.10 python3.11 python3.12 python3.13)
20 | REQUIRES_LIST=("SQLAlchemy[asyncio]>=1.4.3,<2" "SQLAlchemy[asyncio]>=2,<3")
21 |
22 | for PYTHON in ${PYTHON_LIST[@]}
23 | do
24 | for REQUIRES in ${REQUIRES_LIST[@]}
25 | do
26 | echo
27 | echo "---------------------------------------------------------------"
28 | echo "Begin of ${PYTHON} ${REQUIRES}"
29 | echo "---------------------------------------------------------------"
30 | echo
31 | TMPDIR=$(mktemp -d)
32 | trap 'rm -rf $TMPDIR' EXIT
33 | $PYTHON -m venv $TMPDIR
34 | (
35 | cd /workspace
36 | $TMPDIR/bin/python -m pip install -e .[asyncio] -r tests/requirements-compose.txt $REQUIRES
37 | $TMPDIR/bin/python -m coverage run -m unittest -cfv
38 | $TMPDIR/bin/python -m coverage report
39 | )
40 | echo
41 | echo "---------------------------------------------------------------"
42 | echo "End of ${PYTHON} ${REQUIRES}"
43 | echo "---------------------------------------------------------------"
44 | echo
45 | done
46 | done
47 |
--------------------------------------------------------------------------------
/scripts/wait-for-mysql.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | MYSQL_HOST="$1"
6 | MYSQL_DATABASE="$2"
7 | MYSQL_USER="$3"
8 | MYSQL_PASSWORD="$4"
9 | shift
10 |
11 | mysql --version
12 |
13 | until mysql -u "$MYSQL_USER" --password="$MYSQL_PASSWORD" -h "$MYSQL_HOST" -e "use $MYSQL_DATABASE"
14 | do
15 | >&2 echo "MySQL is unavailable - sleeping"
16 | sleep 5
17 | done
18 |
19 | >&2 echo "MySQL is up!"
20 |
--------------------------------------------------------------------------------
/scripts/wait-for-postgres.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | POSTGRES_HOST="$1"
6 | POSTGRES_PASSWORD="$2"
7 | shift
8 |
9 | psql --version
10 |
11 | until PGPASSWORD=$POSTGRES_PASSWORD psql -h "$POSTGRES_HOST" -U "postgres" -c '\q'
12 | do
13 | >&2 echo "Postgres is unavailable - sleeping"
14 | sleep 5
15 | done
16 |
17 | >&2 echo "Postgres is up!"
18 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/.gitignore:
--------------------------------------------------------------------------------
1 | _version.py
2 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Distributed lock based on Database and SQLAlchemy
3 | """
4 |
5 | from . import _version as version
6 | from ._version import __version__, __version_tuple__
7 | from .exceptions import SqlAlchemyDLockBaseException, SqlAlchemyDLockDatabaseError
8 | from .factory import create_async_sadlock, create_sadlock
9 | from .lock import BaseAsyncSadLock, BaseSadLock
10 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/exceptions.py:
--------------------------------------------------------------------------------
1 | __all__ = ["SqlAlchemyDLockBaseException", "SqlAlchemyDLockDatabaseError"]
2 |
3 |
4 | class SqlAlchemyDLockBaseException(Exception):
5 | pass
6 |
7 |
8 | class SqlAlchemyDLockDatabaseError(SqlAlchemyDLockBaseException):
9 | pass
10 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/factory.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from typing import Type, TypeVar, Union
3 |
4 | if sys.version_info < (3, 10): # pragma: no cover
5 | from typing_extensions import TypeGuard
6 | else: # pragma: no cover
7 | from typing import TypeGuard
8 |
9 | from sqlalchemy.engine import Connection
10 | from sqlalchemy.ext.asyncio import AsyncConnection, AsyncSession, async_scoped_session
11 | from sqlalchemy.orm import Session, scoped_session
12 |
13 | from .lock.base import AsyncConnT, BaseAsyncSadLock, BaseSadLock, ConnT
14 | from .registry import find_lock_class
15 |
16 | __all__ = ("create_sadlock", "create_async_sadlock")
17 |
18 |
19 | KTV = TypeVar("KTV")
20 |
21 |
22 | def create_sadlock(
23 | connection_or_session: ConnT, key: KTV, /, contextual_timeout: Union[float, int, None] = None, **kwargs
24 | ) -> BaseSadLock[KTV, ConnT]:
25 | """Create a database distributed lock object
26 |
27 | All arguments will be passed to a sub-class of :class:`.BaseSadLock`, depend on the type of ``connection_session``'s SQLAlchemy engine.
28 |
29 | Args:
30 |
31 | connection_or_session:
32 | Connection or Session object SQL locking functions will be invoked on it.
33 |
34 | key:
35 | ID or name of the SQL locking function
36 |
37 | contextual_timeout:
38 | Timeout(seconds) for Context Managers.
39 |
40 | When called in a :keyword:`with` statement, the new created lock object will pass it to ``timeout`` argument of :meth:`.BaseSadLock.acquire`.
41 |
42 | A :exc:`TimeoutError` will be thrown if can not acquire after ``contextual_timeout``
43 |
44 | Returns:
45 | New created lock object
46 |
47 | Type of the lock object is a sub-class of :class:`.BaseSadLock`, which depends on the passed-in SQLAlchemy `connection` or `session`.
48 |
49 | MySQL and PostgreSQL connection/session are supported til now.
50 | """ # noqa: E501
51 | if isinstance(connection_or_session, Connection):
52 | engine_name = connection_or_session.engine.name
53 | elif isinstance(connection_or_session, (Session, scoped_session)):
54 | bind = connection_or_session.get_bind()
55 | if isinstance(bind, Connection):
56 | engine_name = bind.engine.name
57 | else:
58 | engine_name = bind.name
59 | else:
60 | raise TypeError(f"Unsupported connection_or_session type: {type(connection_or_session)}")
61 |
62 | class_ = find_lock_class(engine_name)
63 | if not is_sadlock_type(class_):
64 | raise TypeError(f"Unsupported connection_or_session type: {type(connection_or_session)}")
65 | return class_(connection_or_session, key, contextual_timeout=contextual_timeout, **kwargs)
66 |
67 |
68 | def create_async_sadlock(
69 | connection_or_session: AsyncConnT, key: KTV, /, contextual_timeout: Union[float, int, None] = None, **kwargs
70 | ) -> BaseAsyncSadLock[KTV, AsyncConnT]:
71 | """AsyncIO version of :func:`create_sadlock`"""
72 | if isinstance(connection_or_session, AsyncConnection):
73 | engine_name = connection_or_session.engine.name
74 | elif isinstance(connection_or_session, (AsyncSession, async_scoped_session)):
75 | bind = connection_or_session.get_bind()
76 | if isinstance(bind, Connection):
77 | engine_name = bind.engine.name
78 | else:
79 | engine_name = bind.name
80 | else:
81 | raise TypeError(f"Unsupported connection_or_session type: {type(connection_or_session)}")
82 |
83 | class_ = find_lock_class(engine_name, True)
84 | if not is_async_sadlock_type(class_):
85 | raise TypeError(f"Unsupported connection_or_session type: {type(connection_or_session)}")
86 | return class_(connection_or_session, key, contextual_timeout=contextual_timeout, **kwargs)
87 |
88 |
89 | def is_sadlock_type(cls: Type) -> TypeGuard[Type[BaseSadLock]]:
90 | """Check if the passed-in class type is :class:`.BaseSadLock` object"""
91 | return issubclass(cls, BaseSadLock)
92 |
93 |
94 | def is_async_sadlock_type(cls: Type) -> TypeGuard[Type[BaseAsyncSadLock]]:
95 | """Check if the passed-in class type is :class:`.BaseAsyncSadLock` object"""
96 | return issubclass(cls, BaseAsyncSadLock)
97 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/lock/__init__.py:
--------------------------------------------------------------------------------
1 | from .base import BaseAsyncSadLock, BaseSadLock
2 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/lock/base.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from abc import ABC, abstractmethod
3 | from threading import local
4 | from typing import Callable, Generic, Optional, TypeVar, Union
5 |
6 | if sys.version_info >= (3, 11): # pragma: no cover
7 | from typing import Self
8 | else: # pragma: no cover
9 | from typing_extensions import Self
10 |
11 | if sys.version_info < (3, 12): # pragma: no cover
12 | from typing_extensions import override
13 | else: # pragma: no cover
14 | from typing import override
15 |
16 | from ..typing import AsyncConnectionOrSessionT, ConnectionOrSessionT
17 |
18 | VKTV = TypeVar("VKTV")
19 | AKTV = TypeVar("AKTV")
20 | ConnT = TypeVar("ConnT", bound=ConnectionOrSessionT)
21 | AsyncConnT = TypeVar("AsyncConnT", bound=AsyncConnectionOrSessionT)
22 |
23 |
24 | class AbstractLockMixin(Generic[VKTV, AKTV], ABC):
25 | @abstractmethod
26 | def __init__(self, *, key: VKTV, convert: Optional[Callable[[VKTV], AKTV]] = None, **kwargs):
27 | pass
28 |
29 | @abstractmethod
30 | def get_actual_key(self) -> AKTV:
31 | pass
32 |
33 | @property
34 | def actual_key(self) -> AKTV:
35 | return self.get_actual_key()
36 |
37 |
38 | class BaseSadLock(AbstractLockMixin, Generic[VKTV, ConnT], local, ABC):
39 | """Base class of database lock implementation
40 |
41 | Note:
42 | * It's Thread-Local (:class:`threading.local`)
43 | * It's an abstract class, do not manual instantiate
44 |
45 | The :meth:`acquire` and :meth:`release` methods can be used as context managers for a :keyword:`with` statement.
46 | :meth:`acquire` will be called when the block is entered, and :meth:`release` will be called when the block is exited.
47 | Hence, the following snippet::
48 |
49 | with some_lock:
50 | # do something...
51 | pass
52 |
53 | is equivalent to::
54 |
55 | some_lock.acquire()
56 | try:
57 | # do something...
58 | pass
59 | finally:
60 | some_lock.release()
61 |
62 | Note:
63 | A :exc:`TimeoutError` will be thrown if acquire timeout in :keyword:`with` statement.
64 | """
65 |
66 | @override
67 | def __init__(
68 | self, connection_or_session: ConnT, key: VKTV, /, contextual_timeout: Union[float, int, None] = None, **kwargs
69 | ):
70 | """
71 | Args:
72 |
73 | connection_or_session: Connection or Session object SQL locking functions will be invoked on it
74 |
75 | key: ID or name of the SQL locking function
76 |
77 | contextual_timeout: Timeout(seconds) for Context Managers.
78 |
79 | When called in a :keyword:`with` statement, the new created lock object will pass it to ``timeout`` argument of :meth:`.BaseSadLock.acquire`.
80 |
81 | Attention:
82 | **ONLY** affects :keyword:`with` statements.
83 |
84 | Example:
85 | ::
86 |
87 | try:
88 | with create_sadlock(conn, k, contextual_timeout=5) as lck:
89 | # do something...
90 | pass
91 | except TimeoutError:
92 | # can not acquire after 5 seconds
93 | pass
94 |
95 | Note:
96 | The default value of `timeout` is still :data:`None`, when invoking :meth:`.acquire`
97 | """
98 | self._acquired = False
99 | self._connection_or_session = connection_or_session
100 | self._key = key
101 | self._contextual_timeout = contextual_timeout
102 |
103 | def __enter__(self) -> Self:
104 | if self._contextual_timeout is None: # timeout period is infinite
105 | self.acquire()
106 | elif not self.acquire(timeout=self._contextual_timeout): # the timeout period has elapsed and not acquired
107 | raise TimeoutError()
108 | return self
109 |
110 | def __exit__(self, exc_type, exc_value, exc_tb):
111 | self.close()
112 |
113 | def __str__(self) -> str:
114 | return "<{} {} key={} at 0x{:x}>".format(
115 | "locked" if self._acquired else "unlocked",
116 | self.__class__.__name__,
117 | self._key,
118 | id(self),
119 | )
120 |
121 | @property
122 | def connection_or_session(self) -> ConnT:
123 | """Connection or Session object SQL locking functions will be invoked on it
124 |
125 | It returns ``connection_or_session`` parameter of the class's constructor.
126 | """
127 | return self._connection_or_session
128 |
129 | @property
130 | def key(self) -> VKTV:
131 | """ID or name of the SQL locking function
132 |
133 | It returns ``key`` parameter of the class's constructor"""
134 | return self._key
135 |
136 | @property
137 | def locked(self) -> bool:
138 | """locked/unlocked state property
139 |
140 | :data:`True` if the lock is acquired, else :data:`False`
141 | """
142 | return self._acquired
143 |
144 | @abstractmethod
145 | def acquire(self, block: bool = True, timeout: Union[float, int, None] = None, *args, **kwargs) -> bool:
146 | """Acquire a lock, blocking or non-blocking.
147 |
148 | * With the ``block`` argument set to :data:`True` (the default), the method call will block until the lock is in an unlocked state, then set it to locked and return :data:`True`.
149 |
150 | * With the ``block`` argument set to :data:`False`, the method call does not block.
151 | If the lock is currently in a locked state, return :data:`False`; otherwise set the lock to a locked state and return :data:`True`.
152 |
153 | * When invoked with a positive, floating-point value for `timeout`, block for at most the number of seconds specified by timeout as long as the lock can not be acquired.
154 | Invocations with a negative value for `timeout` are equivalent to a `timeout` of zero.
155 | Invocations with a `timeout` value of ``None`` (the default) set the timeout period to infinite.
156 | The ``timeout`` parameter has no practical implications if the ``block`` argument is set to :data:`False` and is thus ignored.
157 | Returns :data:`True` if the lock has been acquired or :data:`False` if the timeout period has elapsed.
158 | """
159 | pass
160 |
161 | @abstractmethod
162 | def release(self, *args, **kwargs) -> None:
163 | """Release a lock.
164 |
165 | Since the class is thread-local, this cannot be called from other thread or process,
166 | and also can not be called from other connection.
167 | (Although PostgreSQL's shared advisory lock supports so).
168 |
169 | When the lock is locked, reset it to unlocked, and return.
170 | If any other threads are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed.
171 |
172 | When invoked on an unlocked lock, a :class:`ValueError` is raised.
173 |
174 | There is no return value.
175 | """
176 | pass
177 |
178 | def close(self, *args, **kwargs) -> None:
179 | """Same as :meth:`release`
180 |
181 | Except that the :class:`ValueError` is **NOT** raised when invoked on an unlocked lock.
182 |
183 | An invocation of this method is equivalent to::
184 |
185 | if not some_lock.locked:
186 | some_lock.release()
187 |
188 | This method maybe useful together with :func:`contextlib.closing`, when we need a :keyword:`with` statement, but don't want it to acquire at the beginning of the block.
189 |
190 | Example:
191 | ::
192 |
193 | # ...
194 |
195 | from contextlib import closing
196 | from sqlalchemy_dlock import create_sadlock
197 |
198 | # ...
199 |
200 | with closing(create_sadlock(some_connection, some_key)) as lock:
201 | # will **NOT** acquire at the begin of with-block
202 | assert not lock.locked
203 | # ...
204 | # lock when need
205 | lock.acquire()
206 | assert lock.locked
207 | # ...
208 |
209 | # `close` will be called at the end with-block
210 | assert not lock.locked
211 | """
212 | if self._acquired:
213 | self.release(*args, **kwargs)
214 |
215 |
216 | class BaseAsyncSadLock(AbstractLockMixin, Generic[VKTV, AsyncConnT], local, ABC):
217 | """Async version of :class:`.BaseSadLock`"""
218 |
219 | def __init__(
220 | self, connection_or_session: AsyncConnT, key: VKTV, /, contextual_timeout: Union[float, int, None] = None, **kwargs
221 | ):
222 | self._acquired = False
223 | self._connection_or_session = connection_or_session
224 | self._key = key
225 | self._contextual_timeout = contextual_timeout
226 |
227 | async def __aenter__(self) -> Self:
228 | if self._contextual_timeout is None:
229 | await self.acquire()
230 | elif not await self.acquire(timeout=self._contextual_timeout):
231 | # the timeout period has elapsed and not acquired
232 | raise TimeoutError()
233 | return self
234 |
235 | async def __aexit__(self, exc_type, exc_value, exc_tb):
236 | await self.close()
237 |
238 | def __str__(self):
239 | return "<{} {} key={} at 0x{:x}>".format(
240 | "locked" if self._acquired else "unlocked",
241 | self.__class__.__name__,
242 | self._key,
243 | id(self),
244 | )
245 |
246 | @property
247 | def connection_or_session(self) -> AsyncConnT:
248 | return self._connection_or_session
249 |
250 | @property
251 | def key(self) -> VKTV:
252 | return self._key
253 |
254 | @property
255 | def locked(self) -> bool:
256 | return self._acquired
257 |
258 | @abstractmethod
259 | async def acquire(self, block: bool = True, timeout: Union[float, int, None] = None, *args, **kwargs) -> bool:
260 | pass
261 |
262 | @abstractmethod
263 | async def release(self, *args, **kwargs) -> None:
264 | pass
265 |
266 | async def close(self, *args, **kwargs) -> None:
267 | if self._acquired:
268 | await self.release(*args, **kwargs)
269 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/lock/mysql.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from typing import Callable, Optional, TypeVar, Union
3 |
4 | if sys.version_info < (3, 12): # pragma: no cover
5 | from typing_extensions import override
6 | else: # pragma: no cover
7 | from typing import override
8 |
9 | from ..exceptions import SqlAlchemyDLockDatabaseError
10 | from ..statement.mysql import LOCK, UNLOCK
11 | from ..typing import AsyncConnectionOrSessionT, ConnectionOrSessionT
12 | from .base import AbstractLockMixin, BaseAsyncSadLock, BaseSadLock
13 |
14 | MYSQL_LOCK_NAME_MAX_LENGTH = 64
15 |
16 | KT = Union[bytes, bytearray, memoryview, str, int, float]
17 | KTV = TypeVar("KTV", bound=KT)
18 |
19 |
20 | class MysqlSadLockMixin(AbstractLockMixin[KTV, str]):
21 | """A Mix-in class for MySQL named lock"""
22 |
23 | @override
24 | def __init__(self, *, key: KTV, convert: Optional[Callable[[KTV], str]] = None, **kwargs):
25 | """
26 | Args:
27 | key: MySQL named lock requires the key given by string.
28 |
29 | If ``key`` is not a :class:`str`:
30 |
31 | - When :class:`bytes` or alike, the constructor tries to decode it with default encoding::
32 |
33 | key = key.decode()
34 |
35 | - Otherwise the constructor force convert it to :class:`str`::
36 |
37 | key = str(key)
38 |
39 | - Or you can specify a ``convert`` function to that argument
40 |
41 | convert: Custom function to covert ``key`` to required data type.
42 |
43 | Example:
44 | ::
45 |
46 | def convert(value) -> str:
47 | # get a string key by `value`
48 | return the_string_covert_from_value
49 | """
50 | if convert:
51 | self._actual_key = convert(key)
52 | else:
53 | self._actual_key = self.convert(key)
54 | if not isinstance(self._actual_key, str):
55 | raise TypeError("MySQL named lock requires the key given by string")
56 | if len(self._actual_key) > MYSQL_LOCK_NAME_MAX_LENGTH:
57 | raise ValueError(f"MySQL enforces a maximum length on lock names of {MYSQL_LOCK_NAME_MAX_LENGTH} characters.")
58 |
59 | @override
60 | def get_actual_key(self) -> str:
61 | """The actual key used in MySQL named lock"""
62 | return self._actual_key
63 |
64 | @classmethod
65 | def convert(cls, k) -> str:
66 | if isinstance(k, str):
67 | return k
68 | if isinstance(k, (int, float)):
69 | return str(k)
70 | if isinstance(k, (bytes, bytearray)):
71 | return k.decode()
72 | if isinstance(k, memoryview):
73 | return k.tobytes().decode()
74 | else:
75 | raise TypeError(type(k).__name__)
76 |
77 |
78 | class MysqlSadLock(MysqlSadLockMixin, BaseSadLock[str, ConnectionOrSessionT]):
79 | """A distributed lock implemented by MySQL named-lock
80 |
81 | See Also:
82 | https://dev.mysql.com/doc/refman/8.0/en/locking-functions.html
83 |
84 | Caution:
85 | To MySQL locking function, it is even possible for a given session to acquire multiple locks for the same name.
86 | Other sessions cannot acquire a lock with that name until the acquiring session releases all its locks for the name.
87 | When perform multiple :meth:`.acquire` for a key on the **same** SQLAlchemy connection, latter :meth:`.acquire` will success immediately no wait and never block, it causes cascade lock instead!
88 | """ # noqa: E501
89 |
90 | @override
91 | def __init__(self, connection_or_session: ConnectionOrSessionT, key: KT, **kwargs):
92 | """
93 | Args:
94 | connection_or_session: :attr:`.BaseSadLock.connection_or_session`
95 | key: :attr:`.BaseSadLock.key`
96 | **kwargs: other named parameters pass to :class:`.BaseSadLock` and :class:`.MysqlSadLockMixin`
97 | """
98 | MysqlSadLockMixin.__init__(self, key=key, **kwargs)
99 | BaseSadLock.__init__(self, connection_or_session, self.actual_key, **kwargs)
100 |
101 | @override
102 | def acquire(self, block: bool = True, timeout: Union[float, int, None] = None, *args, **kwargs) -> bool:
103 | if self._acquired:
104 | raise ValueError("invoked on a locked lock")
105 | if block:
106 | # None: set the timeout period to infinite.
107 | if timeout is None:
108 | timeout = -1
109 | # negative value for `timeout` are equivalent to a `timeout` of zero
110 | elif timeout < 0:
111 | timeout = 0
112 | else:
113 | timeout = 0
114 | stmt = LOCK.params(str=self.key, timeout=timeout)
115 | ret_val = self.connection_or_session.execute(stmt).scalar_one()
116 | if ret_val == 1:
117 | self._acquired = True
118 | elif ret_val == 0:
119 | pass # 直到超时也没有成功锁定
120 | elif ret_val is None: # pragma: no cover
121 | raise SqlAlchemyDLockDatabaseError(f"An error occurred while attempting to obtain the lock {self.key!r}")
122 | else: # pragma: no cover
123 | raise SqlAlchemyDLockDatabaseError(f"GET_LOCK({self.key!r}, {timeout}) returns {ret_val}")
124 | return self._acquired
125 |
126 | @override
127 | def release(self):
128 | if not self._acquired:
129 | raise ValueError("invoked on an unlocked lock")
130 | stmt = UNLOCK.params(str=self.key)
131 | ret_val = self.connection_or_session.execute(stmt).scalar_one()
132 | if ret_val == 1:
133 | self._acquired = False
134 | elif ret_val == 0:
135 | self._acquired = False
136 | raise SqlAlchemyDLockDatabaseError(
137 | f"The named lock {self.key!r} was not established by this thread, and the lock is not released."
138 | )
139 | elif ret_val is None:
140 | self._acquired = False
141 | raise SqlAlchemyDLockDatabaseError(
142 | f"The named lock {self.key!r} did not exist, "
143 | "was never obtained by a call to GET_LOCK(), "
144 | "or has previously been released."
145 | )
146 | else: # pragma: no cover
147 | raise SqlAlchemyDLockDatabaseError(f"RELEASE_LOCK({self.key!r}) returns {ret_val}")
148 |
149 |
150 | class MysqlAsyncSadLock(MysqlSadLockMixin, BaseAsyncSadLock[str, AsyncConnectionOrSessionT]):
151 | """Async IO version of :class:`MysqlSadLock`"""
152 |
153 | @override
154 | def __init__(self, connection_or_session: AsyncConnectionOrSessionT, key, **kwargs):
155 | MysqlSadLockMixin.__init__(self, key=key, **kwargs)
156 | BaseAsyncSadLock.__init__(self, connection_or_session, self.actual_key, **kwargs)
157 |
158 | @override
159 | async def acquire(self, block: bool = True, timeout: Union[float, int, None] = None, *args, **kwargs) -> bool:
160 | if self._acquired:
161 | raise ValueError("invoked on a locked lock")
162 | if block:
163 | # None: set the timeout period to infinite.
164 | if timeout is None:
165 | timeout = -1
166 | # negative value for `timeout` are equivalent to a `timeout` of zero
167 | elif timeout < 0:
168 | timeout = 0
169 | else:
170 | timeout = 0
171 | stmt = LOCK.params(str=self.key, timeout=timeout)
172 | ret_val = (await self.connection_or_session.execute(stmt)).scalar_one()
173 | if ret_val == 1:
174 | self._acquired = True
175 | elif ret_val == 0:
176 | pass # 直到超时也没有成功锁定
177 | elif ret_val is None: # pragma: no cover
178 | raise SqlAlchemyDLockDatabaseError(f"An error occurred while attempting to obtain the lock {self.key!r}")
179 | else: # pragma: no cover
180 | raise SqlAlchemyDLockDatabaseError(f"GET_LOCK({self.key!r}, {timeout}) returns {ret_val}")
181 | return self._acquired
182 |
183 | @override
184 | async def release(self):
185 | if not self._acquired:
186 | raise ValueError("invoked on an unlocked lock")
187 | stmt = UNLOCK.params(str=self.key)
188 | ret_val = (await self.connection_or_session.execute(stmt)).scalar_one()
189 | if ret_val == 1:
190 | self._acquired = False
191 | elif ret_val == 0:
192 | self._acquired = False
193 | raise SqlAlchemyDLockDatabaseError(
194 | f"The named lock {self.key!r} was not established by this thread, and the lock is not released."
195 | )
196 | elif ret_val is None:
197 | self._acquired = False
198 | raise SqlAlchemyDLockDatabaseError(
199 | f"The named lock {self.key!r} did not exist, "
200 | "was never obtained by a call to GET_LOCK(), "
201 | "or has previously been released."
202 | )
203 | else: # pragma: no cover
204 | raise SqlAlchemyDLockDatabaseError(f"RELEASE_LOCK({self.key!r}) returns {ret_val}")
205 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/lock/postgresql.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import sys
3 | from hashlib import blake2b
4 | from time import sleep, time
5 | from typing import Callable, Optional, TypeVar, Union
6 | from warnings import catch_warnings, warn
7 |
8 | if sys.version_info < (3, 12): # pragma: no cover
9 | from typing_extensions import override
10 | else: # pragma: no cover
11 | from typing import override
12 |
13 | from ..exceptions import SqlAlchemyDLockDatabaseError
14 | from ..statement.postgresql import (
15 | LOCK,
16 | LOCK_SHARED,
17 | LOCK_XACT,
18 | LOCK_XACT_SHARED,
19 | SLEEP_INTERVAL_DEFAULT,
20 | SLEEP_INTERVAL_MIN,
21 | TRY_LOCK,
22 | TRY_LOCK_SHARED,
23 | TRY_LOCK_XACT,
24 | TRY_LOCK_XACT_SHARED,
25 | UNLOCK,
26 | UNLOCK_SHARED,
27 | )
28 | from ..typing import AsyncConnectionOrSessionT, ConnectionOrSessionT
29 | from .base import AbstractLockMixin, BaseAsyncSadLock, BaseSadLock
30 |
31 | KT = Union[bytes, bytearray, memoryview, str, int, float]
32 | KTV = TypeVar("KTV", bound=KT)
33 |
34 |
35 | class PostgresqlSadLockMixin(AbstractLockMixin[KTV, int]):
36 | """A Mix-in class for PostgreSQL advisory lock"""
37 |
38 | @override
39 | def __init__(
40 | self, *, key: KTV, convert: Optional[Callable[[KTV], int]] = None, shared: bool = False, xact: bool = False, **kwargs
41 | ):
42 | """
43 | Args:
44 | key: PostgreSQL advisory lock requires the key given by ``INT64``.
45 |
46 | * When ``key`` is :class:`int`, the constructor tries to ensure it to be ``INT64``.
47 | :class:`OverflowError` is raised if too big or too small for that.
48 |
49 | * When ``key`` is :class:`str` or :class:`bytes` or alike, the constructor calculates its checksum by :func:`hashlib.blake2b`, and takes the hash result integer value as actual key.
50 |
51 | * Or you can specify a ``convert`` function to that argument::
52 |
53 | def convert(val: Any) -> int:
54 | int64_key: int = do_sth(val)
55 | return int64_key
56 |
57 | shared: :attr:`.shared`
58 | xact: :attr:`.xact`
59 | convert: Custom function to covert ``key`` to required data type.
60 | """
61 | if convert:
62 | self._actual_key = convert(key)
63 | else:
64 | self._actual_key = self.convert(key)
65 | self._actual_key = self.ensure_int64(self._actual_key)
66 | #
67 | self._shared = bool(shared)
68 | self._xact = bool(xact)
69 | #
70 | self._stmt_unlock = None
71 | if not shared and not xact:
72 | self._stmt_lock = LOCK.params(key=self._actual_key)
73 | self._stmt_try_lock = TRY_LOCK.params(key=self._actual_key)
74 | self._stmt_unlock = UNLOCK.params(key=self._actual_key)
75 | elif shared and not xact:
76 | self._stmt_lock = LOCK_SHARED.params(key=self._actual_key)
77 | self._stmt_try_lock = TRY_LOCK_SHARED.params(key=self._actual_key)
78 | self._stmt_unlock = UNLOCK_SHARED.params(key=self._actual_key)
79 | elif not shared and xact:
80 | self._stmt_lock = LOCK_XACT.params(key=self._actual_key)
81 | self._stmt_try_lock = TRY_LOCK_XACT.params(key=self._actual_key)
82 | else:
83 | self._stmt_lock = LOCK_XACT_SHARED.params(key=self._actual_key)
84 | self._stmt_try_lock = TRY_LOCK_XACT_SHARED.params(key=self._actual_key)
85 |
86 | @override
87 | def get_actual_key(self) -> int:
88 | """The actual key used in MySQL named lock"""
89 | return self._actual_key
90 |
91 | @classmethod
92 | def convert(cls, k) -> int:
93 | """To int64"""
94 | if isinstance(k, int):
95 | return k
96 | if isinstance(k, str):
97 | d = k.encode()
98 | elif isinstance(k, (bytes, bytearray)):
99 | d = k
100 | elif isinstance(k, memoryview):
101 | d = k.tobytes()
102 | else:
103 | raise TypeError(type(k).__name__)
104 | return int.from_bytes(blake2b(d, digest_size=8).digest(), sys.byteorder, signed=True)
105 |
106 | @classmethod
107 | def ensure_int64(cls, i: int) -> int:
108 | """ensure the integer in PostgreSQL advisory lock's range (Signed INT64)
109 |
110 | * max of signed int64: ``2**63-1`` (``+0x7FFF_FFFF_FFFF_FFFF``)
111 | * min of signed int64: ``-2**63`` (``-0x8000_0000_0000_0000``)
112 |
113 | Returns:
114 | Signed int64 key
115 | """
116 | ## no force convert UINT greater than 2**63-1 to SINT
117 | # if i > 0x7FFF_FFFF_FFFF_FFFF:
118 | # return int.from_bytes(i.to_bytes(8, byteorder, signed=False), byteorder, signed=True)
119 | if not isinstance(i, int):
120 | raise TypeError(f"int type expected, but actual type is {type(i)}")
121 | if i > 0x7FFF_FFFF_FFFF_FFFF:
122 | raise OverflowError("int too big")
123 | if i < -0x8000_0000_0000_0000:
124 | raise OverflowError("int too small")
125 | return i
126 |
127 | @property
128 | def shared(self) -> bool:
129 | """Is the advisory lock shared or exclusive"""
130 | return self._shared
131 |
132 | @property
133 | def xact(self) -> bool:
134 | """Is the advisory lock transaction level or session level"""
135 | return self._xact
136 |
137 | @property
138 | def actual_key(self) -> int:
139 | """The actual key used in PostgreSQL advisory lock"""
140 | return self._actual_key
141 |
142 |
143 | class PostgresqlSadLock(PostgresqlSadLockMixin, BaseSadLock[KT, ConnectionOrSessionT]):
144 | """A distributed lock implemented by PostgreSQL advisory lock
145 |
146 | See also:
147 | https://www.postgresql.org/docs/current/explicit-locking.html#ADVISORY-LOCKS
148 |
149 | Tip:
150 | Locks can be either shared or exclusive: a shared lock does not conflict with other shared locks on the same resource, only with exclusive locks.
151 | Locks can be taken at session level (so that they are held until released or the session ends) or at transaction level (so that they are held until the current transaction ends; there is no provision for manual release).
152 | Multiple session-level lock requests stack, so that if the same resource identifier is locked three times there must then be three unlock requests to release the resource in advance of session end.
153 | """
154 |
155 | @override
156 | def __init__(self, connection_or_session: ConnectionOrSessionT, key: KT, **kwargs):
157 | """
158 | Args:
159 | connection_or_session: see :attr:`.BaseSadLock.connection_or_session`
160 | key: :attr:`.BaseSadLock.key`
161 | shared: :attr:`.PostgresqlSadLockMixin.shared`
162 | xact: :attr:`.PostgresqlSadLockMixin.xact`
163 | convert: :class:`.PostgresqlSadLockMixin`
164 | **kwargs: other named parameters pass to :class:`.BaseSadLock` and :class:`.PostgresqlSadLockMixin`
165 | """
166 | PostgresqlSadLockMixin.__init__(self, key=key, **kwargs)
167 | BaseSadLock.__init__(self, connection_or_session, self.actual_key, **kwargs)
168 |
169 | @override
170 | def acquire(
171 | self,
172 | block: bool = True,
173 | timeout: Union[float, int, None] = None,
174 | interval: Union[float, int, None] = None,
175 | *args,
176 | **kwargs,
177 | ) -> bool:
178 | """
179 | See Also:
180 | :meth:`.BaseSadLock.acquire`
181 |
182 | Attention:
183 | PostgreSQL's advisory lock has no timeout mechanism in itself.
184 | When ``timeout`` is a non-negative number, we simulate it by **looping** and **sleeping**.
185 |
186 | The ``interval`` argument specifies the sleep seconds(``1`` by default).
187 |
188 | That is:
189 | The actual timeout won't be precise when ``interval`` is big;
190 | while small ``interval`` will cause high CPU usage and frequent SQL execution.
191 | """
192 | if self._acquired:
193 | raise ValueError("invoked on a locked lock")
194 | if block:
195 | if timeout is None:
196 | # None: set the timeout period to infinite.
197 | self.connection_or_session.execute(self._stmt_lock).all()
198 | self._acquired = True
199 | else:
200 | # negative value for `timeout` are equivalent to a `timeout` of zero.
201 | if timeout < 0:
202 | timeout = 0
203 | interval = SLEEP_INTERVAL_DEFAULT if interval is None else interval
204 | if interval < SLEEP_INTERVAL_MIN: # pragma: no cover
205 | raise ValueError("interval too small")
206 | ts_begin = time()
207 | while True:
208 | ret_val = self.connection_or_session.execute(self._stmt_try_lock).scalar_one()
209 | if ret_val: # succeed
210 | self._acquired = True
211 | break
212 | if time() - ts_begin > timeout: # expired
213 | break
214 | sleep(interval)
215 | else:
216 | # This will either obtain the lock immediately and return true,
217 | # or return false without waiting if the lock cannot be acquired immediately.
218 | ret_val = self.connection_or_session.execute(self._stmt_try_lock).scalar_one()
219 | self._acquired = bool(ret_val)
220 | #
221 | return self._acquired
222 |
223 | @override
224 | def release(self):
225 | if not self._acquired:
226 | raise ValueError("invoked on an unlocked lock")
227 | if self._stmt_unlock is None:
228 | warn(
229 | "PostgreSQL transaction level advisory locks are held until the current transaction ends; "
230 | "there is no provision for manual release.",
231 | RuntimeWarning,
232 | )
233 | return
234 | ret_val = self.connection_or_session.execute(self._stmt_unlock).scalar_one()
235 | if ret_val:
236 | self._acquired = False
237 | else: # pragma: no cover
238 | self._acquired = False
239 | raise SqlAlchemyDLockDatabaseError(f"The advisory lock {self.key!r} was not held.")
240 |
241 | @override
242 | def close(self):
243 | if self._acquired:
244 | if sys.version_info < (3, 11):
245 | with catch_warnings():
246 | return self.release()
247 | else:
248 | with catch_warnings(category=RuntimeWarning):
249 | return self.release()
250 |
251 |
252 | class PostgresqlAsyncSadLock(PostgresqlSadLockMixin, BaseAsyncSadLock[int, AsyncConnectionOrSessionT]):
253 | """Async IO version of :class:`PostgresqlSadLock`"""
254 |
255 | @override
256 | def __init__(self, connection_or_session: AsyncConnectionOrSessionT, key, **kwargs):
257 | PostgresqlSadLockMixin.__init__(self, key=key, **kwargs)
258 | BaseAsyncSadLock.__init__(self, connection_or_session, self.actual_key, **kwargs)
259 |
260 | @override
261 | async def acquire(
262 | self,
263 | block: bool = True,
264 | timeout: Union[float, int, None] = None,
265 | interval: Union[float, int, None] = None,
266 | *args,
267 | **kwargs,
268 | ) -> bool:
269 | if self._acquired:
270 | raise ValueError("invoked on a locked lock")
271 | if block:
272 | if timeout is None:
273 | # None: set the timeout period to infinite.
274 | _ = (await self.connection_or_session.execute(self._stmt_lock)).all()
275 | self._acquired = True
276 | else:
277 | # negative value for `timeout` are equivalent to a `timeout` of zero.
278 | if timeout < 0:
279 | timeout = 0
280 | interval = SLEEP_INTERVAL_DEFAULT if interval is None else interval
281 | if interval < SLEEP_INTERVAL_MIN: # pragma: no cover
282 | raise ValueError("interval too small")
283 | ts_begin = time()
284 | while True:
285 | ret_val = (await self.connection_or_session.execute(self._stmt_try_lock)).scalar_one()
286 | if ret_val: # succeed
287 | self._acquired = True
288 | break
289 | if time() - ts_begin > timeout: # expired
290 | break
291 | await asyncio.sleep(interval)
292 | else:
293 | # This will either obtain the lock immediately and return true,
294 | # or return false without waiting if the lock cannot be acquired immediately.
295 | ret_val = (await self.connection_or_session.execute(self._stmt_try_lock)).scalar_one()
296 | self._acquired = bool(ret_val)
297 | #
298 | return self._acquired
299 |
300 | @override
301 | async def release(self):
302 | if not self._acquired:
303 | raise ValueError("invoked on an unlocked lock")
304 | if self._stmt_unlock is None:
305 | warn(
306 | "PostgreSQL transaction level advisory locks are held until the current transaction ends; "
307 | "there is no provision for manual release.",
308 | RuntimeWarning,
309 | )
310 | return
311 | ret_val = (await self.connection_or_session.execute(self._stmt_unlock)).scalar_one()
312 | if ret_val:
313 | self._acquired = False
314 | else: # pragma: no cover
315 | self._acquired = False
316 | raise SqlAlchemyDLockDatabaseError(f"The advisory lock {self.key!r} was not held.")
317 |
318 | @override
319 | async def close(self):
320 | if self._acquired:
321 | if sys.version_info < (3, 11):
322 | with catch_warnings():
323 | return await self.release()
324 | else:
325 | with catch_warnings(category=RuntimeWarning):
326 | return await self.release()
327 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tanbro/sqlalchemy-dlock/ce5d2173c278d593cd4b5338bd24dd47478eb69a/src/sqlalchemy_dlock/py.typed
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/registry.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import sys
4 | from importlib import import_module
5 | from string import Template
6 | from typing import TYPE_CHECKING, Type, Union
7 |
8 | if sys.version_info >= (3, 9): # pragma: no cover
9 | from functools import cache
10 | else: # pragma: no cover
11 | from functools import lru_cache as cache
12 |
13 | if TYPE_CHECKING:
14 | from .lock.base import BaseAsyncSadLock, BaseSadLock
15 |
16 |
17 | REGISTRY = {
18 | "mysql": {
19 | "module": ".lock.mysql",
20 | "package": "${package}", # module name relative to the package
21 | "class": "MysqlSadLock",
22 | },
23 | "postgresql": {
24 | "module": ".lock.postgresql",
25 | "package": "${package}", # module name relative to the package
26 | "class": "PostgresqlSadLock",
27 | },
28 | }
29 |
30 | ASYNCIO_REGISTRY = {
31 | "mysql": {
32 | "module": ".lock.mysql",
33 | "package": "${package}", # module name relative to the package
34 | "class": "MysqlAsyncSadLock",
35 | },
36 | "postgresql": {
37 | "module": ".lock.postgresql",
38 | "package": "${package}", # module name relative to the package
39 | "class": "PostgresqlAsyncSadLock",
40 | },
41 | }
42 |
43 |
44 | @cache
45 | def find_lock_class(engine_name, is_asyncio=False) -> Type[Union[BaseSadLock, BaseAsyncSadLock]]:
46 | reg = ASYNCIO_REGISTRY if is_asyncio else REGISTRY
47 | conf = reg[engine_name]
48 | package = conf.get("package")
49 | if package:
50 | package = Template(package).safe_substitute(package=__package__)
51 | module = import_module(conf["module"], package)
52 | class_ = getattr(module, conf["class"])
53 | return class_
54 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/statement/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/tanbro/sqlalchemy-dlock/ce5d2173c278d593cd4b5338bd24dd47478eb69a/src/sqlalchemy_dlock/statement/__init__.py
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/statement/mysql.py:
--------------------------------------------------------------------------------
1 | from sqlalchemy import text
2 |
3 | LOCK = text("SELECT GET_LOCK(:str, :timeout)")
4 | UNLOCK = text("SELECT RELEASE_LOCK(:str)")
5 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/statement/postgresql.py:
--------------------------------------------------------------------------------
1 | from sqlalchemy import text
2 |
3 | LOCK = text("SELECT pg_advisory_lock(:key)")
4 | LOCK_SHARED = text("SELECT pg_advisory_lock_shared(:key)")
5 | LOCK_XACT = text("SELECT pg_advisory_xact_lock(:key)")
6 | LOCK_XACT_SHARED = text("SELECT pg_advisory_xact_lock_shared(:key)")
7 |
8 | TRY_LOCK = text("SELECT pg_try_advisory_lock(:key)")
9 | TRY_LOCK_SHARED = text("SELECT pg_try_advisory_lock_shared(:key)")
10 | TRY_LOCK_XACT = text("SELECT pg_try_advisory_xact_lock(:key)")
11 | TRY_LOCK_XACT_SHARED = text("SELECT pg_try_advisory_xact_lock_shared(:key)")
12 |
13 | UNLOCK = text("SELECT pg_advisory_unlock(:key)")
14 | UNLOCK_SHARED = text("SELECT pg_advisory_unlock_shared(:key)")
15 |
16 |
17 | SLEEP_INTERVAL_DEFAULT = 1
18 | SLEEP_INTERVAL_MIN = 0.1
19 |
--------------------------------------------------------------------------------
/src/sqlalchemy_dlock/typing.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | from sqlalchemy.engine import Connection
4 | from sqlalchemy.ext.asyncio import AsyncConnection, AsyncSession, async_scoped_session
5 | from sqlalchemy.orm import Session, scoped_session
6 |
7 | ConnectionOrSessionT = Union[Connection, Session, scoped_session]
8 | AsyncConnectionOrSessionT = Union[AsyncConnection, AsyncSession, async_scoped_session]
9 |
--------------------------------------------------------------------------------
/tests/.dockerignore:
--------------------------------------------------------------------------------
1 | *
2 | **/*
3 |
--------------------------------------------------------------------------------
/tests/Dockerfile:
--------------------------------------------------------------------------------
1 | # build a base python image for multiple-version tests
2 |
3 | FROM quay.io/pypa/manylinux_2_28_x86_64
4 | RUN --mount=type=cache,target=/var/cache/dnf \
5 | dnf install -y mysql mysql-devel postgresql libpq-devel
6 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 |
4 |
5 | logging.basicConfig(
6 | level=logging.INFO,
7 | stream=sys.stderr,
8 | format="%(asctime)s [%(levelname).1s] %(name)s %(message)s",
9 | )
10 |
--------------------------------------------------------------------------------
/tests/asyncio/__init__.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import platform
3 |
4 | # Psycopg cannot use the 'ProactorEventLoop' to run in async mode.
5 | # use a compatible event loop, for instance by setting 'asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())'
6 | if platform.system() == "Windows":
7 | asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type: ignore[attr-defined]
8 |
--------------------------------------------------------------------------------
/tests/asyncio/engines.py:
--------------------------------------------------------------------------------
1 | from os import getenv
2 | from typing import List
3 |
4 | from dotenv import load_dotenv
5 | from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine
6 |
7 | __all__ = ["create_engines", "dispose_engines", "get_engines"]
8 |
9 |
10 | _ENGINES: List[AsyncEngine] = []
11 |
12 |
13 | def create_engines():
14 | global _ENGINES
15 |
16 | load_dotenv()
17 |
18 | urls = (
19 | getenv("TEST_ASYNC_URLS") or "mysql+aiomysql://test:test@127.0.0.1/test postgresql+asyncpg://postgres:test@127.0.0.1/"
20 | ).split()
21 |
22 | for url in urls:
23 | engine = create_async_engine(url)
24 | _ENGINES.append(engine)
25 |
26 | return _ENGINES
27 |
28 |
29 | async def dispose_engines():
30 | for engine in _ENGINES:
31 | await engine.dispose()
32 |
33 |
34 | def get_engines():
35 | return _ENGINES
36 |
--------------------------------------------------------------------------------
/tests/asyncio/test_basic.py:
--------------------------------------------------------------------------------
1 | from contextlib import AsyncExitStack
2 | from multiprocessing import cpu_count
3 | from random import randint
4 | from secrets import token_bytes, token_hex
5 | from unittest import IsolatedAsyncioTestCase
6 | from uuid import uuid4
7 |
8 | from sqlalchemy_dlock import create_async_sadlock
9 |
10 | from .engines import create_engines, dispose_engines, get_engines
11 |
12 | CPU_COUNT = cpu_count()
13 |
14 |
15 | class BasicTestCase(IsolatedAsyncioTestCase):
16 | def setUp(self):
17 | create_engines()
18 |
19 | async def asyncTearDown(self):
20 | await dispose_engines()
21 |
22 | async def test_enter_exit(self):
23 | for engine in get_engines():
24 | key = uuid4().hex
25 | async with engine.connect() as conn:
26 | assert conn is not None
27 | lock = create_async_sadlock(conn, key)
28 | self.assertFalse(lock.locked)
29 | await lock.acquire()
30 | self.assertTrue(lock.locked)
31 | await lock.release()
32 | self.assertFalse(lock.locked)
33 |
34 | async def test_with_statement(self):
35 | for engine in get_engines():
36 | async with engine.connect() as conn:
37 | assert conn is not None
38 | key = uuid4().hex
39 | async with create_async_sadlock(conn, key) as lock:
40 | self.assertTrue(lock.locked)
41 | self.assertFalse(lock.locked)
42 |
43 | async def test_timeout_in_with_statement(self):
44 | for engine in get_engines():
45 | async with AsyncExitStack() as stack:
46 | conn0, conn1 = [await stack.enter_async_context(engine.connect()) for _ in range(2)]
47 | key = uuid4().hex
48 | lock0 = create_async_sadlock(conn0, key)
49 | self.assertFalse(lock0.locked)
50 | r = await lock0.acquire(False)
51 | self.assertTrue(r)
52 | self.assertTrue(lock0.locked)
53 | with self.assertRaises(TimeoutError):
54 | async with create_async_sadlock(conn1, key, contextual_timeout=1):
55 | pass
56 | self.assertTrue(lock0.locked)
57 | await lock0.release()
58 | self.assertFalse(lock0.locked)
59 |
60 | async def test_many_str_key(self):
61 | for engine in get_engines():
62 | async with engine.connect() as conn:
63 | assert conn is not None
64 | for _ in range(100):
65 | key = uuid4().hex + uuid4().hex
66 | async with create_async_sadlock(conn, key) as lock:
67 | self.assertTrue(lock.locked)
68 | self.assertFalse(lock.locked)
69 |
70 | async def test_many_int_key(self):
71 | for engine in get_engines():
72 | async with engine.connect() as conn:
73 | assert conn is not None
74 | for _ in range(100):
75 | key = randint(-0x8000_0000_0000_0000, 0x7FFF_FFFF_FFFF_FFFF)
76 | async with create_async_sadlock(conn, key) as lock:
77 | self.assertTrue(lock.locked)
78 | self.assertFalse(lock.locked)
79 |
80 | async def test_many_bytes_key(self):
81 | for engine in get_engines():
82 | for _ in range(100):
83 | async with engine.connect() as conn:
84 | if engine.name == "mysql":
85 | key = token_hex().encode()
86 | elif engine.name == "postgresql":
87 | key = token_bytes()
88 | else:
89 | raise NotImplementedError()
90 | async with create_async_sadlock(conn, key) as lock:
91 | self.assertTrue(lock.locked)
92 | self.assertFalse(lock.locked)
93 |
94 | async def test_invoke_locked_lock(self):
95 | for engine in get_engines():
96 | async with engine.connect() as conn:
97 | assert conn is not None
98 | key = uuid4().hex
99 | async with create_async_sadlock(conn, key) as lock:
100 | self.assertTrue(lock.locked)
101 | with self.assertRaisesRegex(ValueError, "invoked on a locked lock"):
102 | await lock.acquire()
103 | self.assertFalse(lock.locked)
104 |
105 | async def test_invoke_unlocked_lock(self):
106 | for engine in get_engines():
107 | async with engine.connect() as conn:
108 | assert conn is not None
109 | key = uuid4().hex
110 | lock = create_async_sadlock(conn, key)
111 | self.assertFalse(lock.locked)
112 | with self.assertRaisesRegex(ValueError, "invoked on an unlocked lock"):
113 | await lock.release()
114 | self.assertFalse(lock.locked)
115 |
116 | async def test_timeout_positive(self):
117 | for engine in get_engines():
118 | key = uuid4().hex
119 | for _ in range(CPU_COUNT + 1):
120 | async with engine.connect() as conn:
121 | assert conn is not None
122 | lock = create_async_sadlock(conn, key)
123 | try:
124 | self.assertFalse(lock.locked)
125 | r = await lock.acquire(timeout=randint(1, 1024))
126 | self.assertTrue(r)
127 | self.assertTrue(lock.locked)
128 | finally:
129 | await lock.release()
130 | self.assertFalse(lock.locked)
131 |
132 | async def test_timeout_zero(self):
133 | for engine in get_engines():
134 | async with engine.connect() as conn:
135 | assert conn is not None
136 | key = uuid4().hex
137 | lock = create_async_sadlock(conn, key)
138 | try:
139 | self.assertFalse(lock.locked)
140 | r = await lock.acquire(timeout=0)
141 | self.assertTrue(r)
142 | self.assertTrue(lock.locked)
143 | finally:
144 | await lock.release()
145 | self.assertFalse(lock.locked)
146 |
147 | async def test_timeout_negative(self):
148 | for engine in get_engines():
149 | for _ in range(CPU_COUNT + 1):
150 | async with engine.connect() as conn:
151 | assert conn is not None
152 | key = uuid4().hex
153 | lock = create_async_sadlock(conn, key)
154 | try:
155 | r = await lock.acquire(timeout=-1 * randint(1, 1024))
156 | self.assertTrue(r)
157 | finally:
158 | await lock.release()
159 | self.assertFalse(lock.locked)
160 |
161 | async def test_timeout_none(self):
162 | for engine in get_engines():
163 | for _ in range(CPU_COUNT + 1):
164 | async with engine.connect() as conn:
165 | assert conn is not None
166 | key = uuid4().hex
167 | lock = create_async_sadlock(conn, key)
168 | try:
169 | r = await lock.acquire(timeout=None)
170 | self.assertTrue(r)
171 | finally:
172 | await lock.release()
173 | self.assertFalse(lock.locked)
174 |
175 | async def test_enter_locked(self):
176 | for engine in get_engines():
177 | key = uuid4().hex
178 | async with AsyncExitStack() as stack:
179 | conn0, conn1 = [await stack.enter_async_context(engine.connect()) for _ in range(2)]
180 |
181 | lock0 = create_async_sadlock(conn0, key)
182 | self.assertFalse(lock0.locked)
183 | r = await lock0.acquire(False)
184 | self.assertTrue(r)
185 | self.assertTrue(lock0.locked)
186 |
187 | lock1 = create_async_sadlock(conn1, key)
188 | self.assertFalse(lock1.locked)
189 | r = await lock1.acquire(block=False)
190 | self.assertFalse(r)
191 | self.assertFalse(lock1.locked)
192 |
193 | self.assertTrue(lock0.locked)
194 | await lock0.release()
195 | self.assertFalse(lock0.locked)
196 |
197 | r = await lock1.acquire(False)
198 | self.assertTrue(r)
199 | self.assertTrue(lock1.locked)
200 | await lock1.release()
201 | self.assertFalse(lock1.locked)
202 |
203 | async def test_release_unlocked_error(self):
204 | for engine in get_engines():
205 | key = uuid4().hex
206 | async with AsyncExitStack() as stack:
207 | conn0, conn1 = [await stack.enter_async_context(engine.connect()) for _ in range(2)]
208 |
209 | lock0 = create_async_sadlock(conn0, key)
210 | r = await lock0.acquire(False)
211 | self.assertTrue(r)
212 | self.assertTrue(lock0.locked)
213 |
214 | lock1 = create_async_sadlock(conn1, key)
215 | with self.assertRaisesRegex(ValueError, "invoked on an unlocked lock"):
216 | await lock1.release()
217 |
--------------------------------------------------------------------------------
/tests/asyncio/test_concurrency.py:
--------------------------------------------------------------------------------
1 | # https://github.com/sqlalchemy/sqlalchemy/issues/5581
2 | #
3 | # Multiple Co-routines of SQL executions on a same Engine's Connection/Session will case a deadlock.
4 | # So we shall do that on different Engine objects!
5 |
6 |
7 | import asyncio
8 | from time import time
9 | from unittest import IsolatedAsyncioTestCase
10 | from uuid import uuid4
11 |
12 | from sqlalchemy.ext.asyncio import create_async_engine
13 |
14 | from sqlalchemy_dlock import create_async_sadlock
15 |
16 | from .engines import create_engines, dispose_engines, get_engines
17 |
18 |
19 | class ConcurrencyTestCase(IsolatedAsyncioTestCase):
20 | def setUp(self):
21 | create_engines()
22 |
23 | async def asyncTearDown(self):
24 | await dispose_engines()
25 |
26 | async def test_timeout(self):
27 | key = uuid4().hex
28 | for engine in get_engines():
29 | delay = 3
30 | timeout = 1
31 | event = asyncio.Event()
32 | engine1 = create_async_engine(engine.url)
33 | engine2 = create_async_engine(engine.url)
34 | try:
35 |
36 | async def coro1():
37 | async with engine1.connect() as conn:
38 | async with create_async_sadlock(conn, key) as lck:
39 | self.assertTrue(lck.locked)
40 | event.set()
41 | await asyncio.sleep(delay)
42 | self.assertFalse(lck.locked)
43 |
44 | async def coro2():
45 | async with engine2.connect() as conn:
46 | lck = create_async_sadlock(conn, key)
47 | await event.wait()
48 | t0 = time()
49 | is_ok = await lck.acquire(timeout=timeout)
50 | self.assertFalse(is_ok)
51 | self.assertFalse(lck.locked)
52 | self.assertGreaterEqual(time() - t0, timeout)
53 |
54 | aws = (
55 | asyncio.create_task(coro1()),
56 | asyncio.create_task(coro2()),
57 | )
58 | await asyncio.wait(aws, timeout=delay * 2)
59 | finally:
60 | aws = (
61 | asyncio.create_task(engine1.dispose()),
62 | asyncio.create_task(engine2.dispose()),
63 | )
64 | await asyncio.wait(aws, timeout=delay * 2)
65 |
--------------------------------------------------------------------------------
/tests/asyncio/test_key_convert.py:
--------------------------------------------------------------------------------
1 | from multiprocessing import cpu_count
2 | from random import choice
3 | from unittest import IsolatedAsyncioTestCase
4 | from uuid import uuid4
5 | from zlib import crc32
6 |
7 | from sqlalchemy_dlock import create_async_sadlock
8 | from sqlalchemy_dlock.lock.mysql import MYSQL_LOCK_NAME_MAX_LENGTH
9 |
10 | from .engines import create_engines, dispose_engines, get_engines
11 |
12 | CPU_COUNT = cpu_count()
13 |
14 |
15 | class KeyConvertTestCase(IsolatedAsyncioTestCase):
16 | def setUp(self):
17 | create_engines()
18 |
19 | async def asyncTearDown(self):
20 | await dispose_engines()
21 |
22 | async def test_convert(self):
23 | for engine in get_engines():
24 | key = uuid4().hex
25 |
26 | if engine.name == "mysql":
27 |
28 | def _convert(k): # type: ignore
29 | return f"key is {k!r}"
30 |
31 | elif engine.name == "postgresql":
32 |
33 | def _convert(k): # type: ignore
34 | return crc32(str(k).encode())
35 |
36 | else:
37 | raise NotImplementedError()
38 |
39 | async with engine.connect() as conn:
40 | async with create_async_sadlock(conn, key, convert=_convert) as lock:
41 | self.assertTrue(lock.locked)
42 | self.assertFalse(lock.locked)
43 |
44 | async def test_mysql_key_max_length(self):
45 | for engine in get_engines():
46 | if engine.name != "mysql":
47 | continue
48 | key = "".join(choice([chr(n) for n in range(0x20, 0x7F)]) for _ in range(MYSQL_LOCK_NAME_MAX_LENGTH))
49 | async with engine.connect() as conn:
50 | async with create_async_sadlock(conn, key) as lock:
51 | self.assertTrue(lock.locked)
52 | self.assertFalse(lock.locked)
53 |
54 | async def test_mysql_key_gt_max_length(self):
55 | for engine in get_engines():
56 | if engine.name != "mysql":
57 | continue
58 | key = "".join(choice([chr(n) for n in range(0x20, 0x7F)]) for _ in range(MYSQL_LOCK_NAME_MAX_LENGTH + 1))
59 | async with engine.connect() as conn:
60 | with self.assertRaises(ValueError):
61 | create_async_sadlock(conn, key)
62 |
63 | async def test_mysql_key_not_a_string(self):
64 | keys = None, 1, 0, -1, 0.1, True, False, (), [], set(), {}, object()
65 | for engine in get_engines():
66 | if engine.name != "mysql":
67 | continue
68 | async with engine.connect() as conn:
69 | for k in keys:
70 | with self.assertRaises(TypeError):
71 | create_async_sadlock(conn, k, convert=lambda x: x)
72 |
73 | async def test_postgresql_key_max(self):
74 | for engine in get_engines():
75 | if engine.name != "postgresql":
76 | continue
77 | key = 2**63 - 1
78 | async with engine.connect() as conn:
79 | async with create_async_sadlock(conn, key) as lock:
80 | self.assertTrue(lock.locked)
81 | self.assertFalse(lock.locked)
82 |
83 | async def test_postgresql_key_over_max(self):
84 | for engine in get_engines():
85 | if engine.name != "postgresql":
86 | continue
87 | key = 2**63
88 | async with engine.connect() as conn:
89 | with self.assertRaises(OverflowError):
90 | create_async_sadlock(conn, key)
91 |
92 | async def test_postgresql_key_min(self):
93 | for engine in get_engines():
94 | if engine.name != "postgresql":
95 | continue
96 | key = -(2**63)
97 | async with engine.connect() as conn:
98 | async with create_async_sadlock(conn, key) as lock:
99 | self.assertTrue(lock.locked)
100 | self.assertFalse(lock.locked)
101 |
102 | async def test_postgresql_key_over_min(self):
103 | for engine in get_engines():
104 | if engine.name != "postgresql":
105 | continue
106 | key = -(2**63) - 1
107 | async with engine.connect() as conn:
108 | with self.assertRaises(OverflowError):
109 | create_async_sadlock(conn, key)
110 |
111 | async def test_key_wrong_type(self):
112 | for engine in get_engines():
113 | async with engine.connect() as conn:
114 | for k in ((), {}, set(), [], object()):
115 | with self.assertRaises(TypeError):
116 | create_async_sadlock(conn, k)
117 |
--------------------------------------------------------------------------------
/tests/asyncio/test_pg.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import sys
3 | from unittest import IsolatedAsyncioTestCase, skipIf
4 | from uuid import uuid4
5 |
6 | from sqlalchemy_dlock import create_async_sadlock
7 |
8 | from .engines import create_engines, dispose_engines, get_engines
9 |
10 |
11 | class PgTestCase(IsolatedAsyncioTestCase):
12 | def setUp(self):
13 | create_engines()
14 |
15 | async def asyncTearDown(self):
16 | await dispose_engines()
17 |
18 | async def test_pg_invalid_interval(self):
19 | for engine in get_engines():
20 | if engine.name != "postgresql":
21 | continue
22 | key = uuid4().hex
23 | async with engine.connect() as conn:
24 | lck = create_async_sadlock(conn, key)
25 | with self.assertRaises(ValueError):
26 | await lck.acquire(timeout=0, interval=-1)
27 |
28 | async def test_simple_xact(self):
29 | key = uuid4().hex
30 | for engine in get_engines():
31 | if engine.name != "postgresql":
32 | continue
33 | async with engine.connect() as conn:
34 | lck = create_async_sadlock(conn, key, xact=True)
35 | async with conn.begin():
36 | self.assertTrue(await lck.acquire())
37 |
38 | @skipIf(sys.version_info < (3, 11), "‘asyncio.Barrier’: New in version 3.11")
39 | async def test_xact_coro(self):
40 | key = uuid4().hex
41 | for engine in get_engines():
42 | if engine.name != "postgresql":
43 | continue
44 |
45 | bar = asyncio.Barrier(2)
46 |
47 | async def coro():
48 | async with engine.connect() as c_:
49 | l_ = create_async_sadlock(c_, key, xact=True)
50 | await asyncio.wait_for(bar.wait(), 10)
51 | async with c_.begin():
52 | self.assertFalse(await l_.acquire(block=False))
53 | await asyncio.sleep(3)
54 | self.assertTrue(await l_.acquire(block=False))
55 |
56 | task = asyncio.create_task(coro())
57 |
58 | async with engine.connect() as conn:
59 | lck = create_async_sadlock(conn, key, xact=True)
60 | async with conn.begin():
61 | self.assertTrue(await lck.acquire(block=False))
62 | await asyncio.wait_for(bar.wait(), 5)
63 | await asyncio.sleep(3)
64 |
65 | await asyncio.wait_for(task, 10)
66 |
--------------------------------------------------------------------------------
/tests/asyncio/test_session.py:
--------------------------------------------------------------------------------
1 | from unittest import IsolatedAsyncioTestCase
2 | from uuid import uuid1
3 |
4 | from sqlalchemy.ext.asyncio import AsyncSession
5 |
6 | from sqlalchemy_dlock import create_async_sadlock
7 |
8 | from .engines import create_engines, dispose_engines, get_engines
9 |
10 |
11 | class SessionTestCase(IsolatedAsyncioTestCase):
12 | sessions = [] # type: ignore[var-annotated]
13 |
14 | def setUp(self):
15 | create_engines()
16 |
17 | async def asyncTearDown(self):
18 | await dispose_engines()
19 |
20 | async def test_once(self):
21 | key = uuid1().hex
22 | for engine in get_engines():
23 | session = AsyncSession(engine)
24 | async with session.begin():
25 | async with create_async_sadlock(session, key) as lock:
26 | self.assertTrue(lock.locked)
27 | self.assertFalse(lock.locked)
28 |
--------------------------------------------------------------------------------
/tests/docker-compose.yml:
--------------------------------------------------------------------------------
1 | name: sqlalchemy-dlock-tests
2 |
3 | x-common-environment: &common-environment
4 | MYSQL_RANDOM_ROOT_PASSWORD: "1"
5 | MYSQL_DATABASE: test
6 | MYSQL_USER: test
7 | MYSQL_PASSWORD: test
8 | POSTGRES_PASSWORD: test
9 |
10 | services:
11 | mysql:
12 | image: mysql
13 | ports:
14 | - "3306:3306"
15 | environment:
16 | <<: *common-environment
17 |
18 | postgres:
19 | image: postgres
20 | ports:
21 | - "5432:5432"
22 | environment:
23 | <<: *common-environment
24 |
25 | python:
26 | build: .
27 | volumes:
28 | - type: bind
29 | source: ..
30 | target: /workspace
31 | working_dir: /workspace
32 | depends_on: [mysql, postgres]
33 | environment:
34 | <<: *common-environment
35 | env_file: .env
36 | command: [/bin/bash, /workspace/scripts/run-test.sh]
37 |
--------------------------------------------------------------------------------
/tests/engines.py:
--------------------------------------------------------------------------------
1 | from os import getenv
2 |
3 | from dotenv import load_dotenv
4 | from sqlalchemy import create_engine
5 |
6 | __all__ = ["ENGINES", "URLS"]
7 |
8 | load_dotenv()
9 |
10 | URLS = (getenv("TEST_URLS") or "mysql://test:test@127.0.0.1/test postgresql://postgres:test@127.0.0.1/").split()
11 |
12 | ENGINES = [create_engine(url) for url in URLS]
13 |
--------------------------------------------------------------------------------
/tests/requirements-compose.txt:
--------------------------------------------------------------------------------
1 | coverage
2 |
3 | python-dotenv
4 |
5 | mysqlclient
6 | aiomysql
7 |
8 | psycopg2
9 | asyncpg
10 |
--------------------------------------------------------------------------------
/tests/requirements.txt:
--------------------------------------------------------------------------------
1 | coverage
2 |
3 | python-dotenv
4 | cryptography;sys_platform=="win32"
5 |
6 | mysqlclient
7 | aiomysql
8 |
9 | psycopg2-binary
10 | asyncpg
11 |
--------------------------------------------------------------------------------
/tests/test_basic.py:
--------------------------------------------------------------------------------
1 | from contextlib import ExitStack, closing
2 | from multiprocessing import cpu_count
3 | from random import randint
4 | from secrets import token_bytes, token_hex
5 | from unittest import TestCase
6 | from uuid import uuid4
7 |
8 | from sqlalchemy_dlock import create_sadlock
9 |
10 | from .engines import ENGINES
11 |
12 | CPU_COUNT = cpu_count()
13 |
14 |
15 | class BasicTestCase(TestCase):
16 | def tearDown(self):
17 | for engine in ENGINES:
18 | engine.dispose()
19 |
20 | def test_enter_exit(self):
21 | for engine in ENGINES:
22 | key = uuid4().hex
23 | with engine.connect() as conn:
24 | lock = create_sadlock(conn, key)
25 | self.assertFalse(lock.locked)
26 | lock.acquire()
27 | self.assertTrue(lock.locked)
28 | lock.release()
29 | self.assertFalse(lock.locked)
30 |
31 | def test_with_statement(self):
32 | for engine in ENGINES:
33 | key = uuid4().hex
34 | with engine.connect() as conn:
35 | with create_sadlock(conn, key) as lock:
36 | self.assertTrue(lock.locked)
37 | self.assertFalse(lock.locked)
38 |
39 | def test_timeout_with_statement(self):
40 | for engine in ENGINES:
41 | key = uuid4().hex
42 | with ExitStack() as stack:
43 | conn0, conn1 = [stack.enter_context(engine.connect()) for _ in range(2)]
44 | lock0 = create_sadlock(conn0, key)
45 | self.assertTrue(lock0.acquire(False))
46 | with self.assertRaises(TimeoutError):
47 | with create_sadlock(conn1, key, contextual_timeout=1):
48 | pass
49 | lock0.release()
50 | self.assertFalse(lock0.locked)
51 |
52 | def test_many_str_key(self):
53 | for engine in ENGINES:
54 | for _ in range(100):
55 | with engine.connect() as conn:
56 | key = uuid4().hex + uuid4().hex
57 | with create_sadlock(conn, key) as lock:
58 | self.assertTrue(lock.locked)
59 | self.assertFalse(lock.locked)
60 |
61 | def test_many_int_key(self):
62 | for engine in ENGINES:
63 | for _ in range(100):
64 | with engine.connect() as conn:
65 | key = randint(-0x8000_0000_0000_0000, 0x7FFF_FFFF_FFFF_FFFF)
66 | with create_sadlock(conn, key) as lock:
67 | self.assertTrue(lock.locked)
68 | self.assertFalse(lock.locked)
69 |
70 | def test_many_bytes_key(self):
71 | for engine in ENGINES:
72 | for _ in range(100):
73 | with engine.connect() as conn:
74 | if engine.name == "mysql":
75 | key = token_hex().encode()
76 | elif engine.name == "postgresql":
77 | key = token_bytes()
78 | else:
79 | raise NotImplementedError()
80 | with create_sadlock(conn, key) as lock:
81 | self.assertTrue(lock.locked)
82 | self.assertFalse(lock.locked)
83 |
84 | def test_closing(self):
85 | for engine in ENGINES:
86 | key = uuid4().hex
87 | with engine.connect() as conn:
88 | with closing(create_sadlock(conn, key)) as lock:
89 | self.assertFalse(lock.locked)
90 | self.assertTrue(lock.acquire())
91 | self.assertTrue(lock.locked)
92 | self.assertFalse(lock.locked)
93 |
94 | def test_no_blocking(self):
95 | for engine in ENGINES:
96 | key = uuid4().hex
97 | with engine.connect() as conn:
98 | with closing(create_sadlock(conn, key)) as lock:
99 | self.assertFalse(lock.locked)
100 | acquired = lock.acquire(False)
101 | self.assertTrue(acquired)
102 | self.assertTrue(lock.locked)
103 | self.assertFalse(lock.locked)
104 |
105 | def test_invoke_locked_lock(self):
106 | for engine in ENGINES:
107 | key = uuid4().hex
108 | with engine.connect() as conn:
109 | with create_sadlock(conn, key) as lock:
110 | self.assertTrue(lock.locked)
111 | self.assertRaisesRegex(ValueError, "invoked on a locked lock", lock.acquire)
112 | self.assertFalse(lock.locked)
113 |
114 | def test_invoke_unlocked_lock(self):
115 | for engine in ENGINES:
116 | key = uuid4().hex
117 | with engine.connect() as conn:
118 | with closing(create_sadlock(conn, key)) as lock:
119 | self.assertFalse(lock.locked)
120 | self.assertRaisesRegex(ValueError, "invoked on an unlocked lock", lock.release)
121 | self.assertFalse(lock.locked)
122 |
123 | def test_timeout_positive(self):
124 | for engine in ENGINES:
125 | key = uuid4().hex
126 | for _ in range(CPU_COUNT + 1):
127 | with engine.connect() as conn:
128 | with closing(create_sadlock(conn, key)) as lock:
129 | self.assertTrue(lock.acquire(timeout=randint(1, 1024)))
130 | self.assertFalse(lock.locked)
131 |
132 | def test_timeout_zero(self):
133 | for engine in ENGINES:
134 | key = uuid4().hex
135 | with engine.connect() as conn:
136 | with closing(create_sadlock(conn, key)) as lock:
137 | self.assertTrue(lock.acquire(timeout=0))
138 | self.assertFalse(lock.locked)
139 |
140 | def test_timeout_negative(self):
141 | for engine in ENGINES:
142 | key = uuid4().hex
143 | for _ in range(CPU_COUNT + 1):
144 | with engine.connect() as conn:
145 | with closing(create_sadlock(conn, key)) as lock:
146 | self.assertTrue(lock.acquire(timeout=-1 * randint(1, 1024)))
147 | self.assertFalse(lock.locked)
148 |
149 | def test_timeout_none(self):
150 | for engine in ENGINES:
151 | key = uuid4().hex
152 | for i in range(CPU_COUNT + 1):
153 | with engine.connect() as conn:
154 | with closing(create_sadlock(conn, key)) as lock:
155 | self.assertTrue(lock.acquire(timeout=None))
156 | self.assertFalse(lock.locked)
157 |
158 | def test_enter_locked(self):
159 | for engine in ENGINES:
160 | key = uuid4().hex
161 | with ExitStack() as stack:
162 | conn0, conn1 = [stack.enter_context(engine.connect()) for _ in range(2)]
163 | lock0 = create_sadlock(conn0, key)
164 | self.assertTrue(lock0.acquire(False))
165 | lock1 = create_sadlock(conn1, key)
166 | self.assertFalse(lock1.acquire(False))
167 | lock0.release()
168 | self.assertFalse(lock0.locked)
169 | self.assertTrue(lock1.acquire(False))
170 | lock1.release()
171 | self.assertFalse(lock1.locked)
172 |
173 | def test_release_unlocked_error(self):
174 | for engine in ENGINES:
175 | key = uuid4().hex
176 | with ExitStack() as stack:
177 | conn0, conn1 = [stack.enter_context(engine.connect()) for _ in range(2)]
178 | lock0 = create_sadlock(conn0, key)
179 | self.assertTrue(lock0.acquire(False))
180 | lock1 = create_sadlock(conn1, key)
181 | with self.assertRaisesRegex(ValueError, "invoked on an unlocked lock"):
182 | lock1.release()
183 |
--------------------------------------------------------------------------------
/tests/test_key_convert.py:
--------------------------------------------------------------------------------
1 | from multiprocessing import cpu_count
2 | from random import choice
3 | from unittest import TestCase
4 | from uuid import uuid4
5 | from zlib import crc32
6 |
7 | from sqlalchemy_dlock import create_sadlock
8 | from sqlalchemy_dlock.lock.mysql import MYSQL_LOCK_NAME_MAX_LENGTH
9 |
10 | from .engines import ENGINES
11 |
12 | CPU_COUNT = cpu_count()
13 |
14 |
15 | class KeyConvertTestCase(TestCase):
16 | def tearDown(self):
17 | for engine in ENGINES:
18 | engine.dispose()
19 |
20 | def test_convert(self):
21 | for engine in ENGINES:
22 | key = uuid4().hex
23 |
24 | if engine.name == "mysql":
25 |
26 | def _convert(k): # type: ignore
27 | return f"key is {k!r}"
28 |
29 | elif engine.name == "postgresql":
30 |
31 | def _convert(k): # type: ignore
32 | return crc32(str(k).encode())
33 |
34 | else:
35 | raise NotImplementedError()
36 |
37 | with engine.connect() as conn:
38 | with create_sadlock(conn, key, convert=_convert) as lock:
39 | self.assertTrue(lock.locked)
40 | self.assertFalse(lock.locked)
41 |
42 | def test_mysql_key_max_length(self):
43 | for engine in ENGINES:
44 | if engine.name != "mysql":
45 | continue
46 | key = "".join(choice([chr(n) for n in range(0x20, 0x7F)]) for _ in range(MYSQL_LOCK_NAME_MAX_LENGTH))
47 | with engine.connect() as conn:
48 | with create_sadlock(conn, key) as lock:
49 | self.assertTrue(lock.locked)
50 | self.assertFalse(lock.locked)
51 |
52 | def test_mysql_key_gt_max_length(self):
53 | for engine in ENGINES:
54 | if engine.name != "mysql":
55 | continue
56 | key = "".join(choice([chr(n) for n in range(0x20, 0x7F)]) for _ in range(MYSQL_LOCK_NAME_MAX_LENGTH + 1))
57 | with engine.connect() as conn:
58 | with self.assertRaises(ValueError):
59 | create_sadlock(conn, key)
60 |
61 | def test_mysql_key_not_a_string(self):
62 | keys = None, 1, 0, -1, 0.1, True, False, (), [], set(), {}, object()
63 |
64 | for engine in ENGINES:
65 | if engine.name != "mysql":
66 | continue
67 |
68 | with engine.connect() as conn:
69 | for k in keys:
70 | with self.assertRaises(TypeError):
71 | create_sadlock(conn, k, convert=lambda x: x)
72 |
73 | def test_postgresql_key_max(self):
74 | for engine in ENGINES:
75 | if engine.name != "postgresql":
76 | continue
77 | key = 2**63 - 1
78 | with engine.connect() as conn:
79 | with create_sadlock(conn, key) as lock:
80 | self.assertTrue(lock.locked)
81 | self.assertFalse(lock.locked)
82 |
83 | def test_postgresql_key_over_max(self):
84 | for engine in ENGINES:
85 | if engine.name != "postgresql":
86 | continue
87 | key = 2**63
88 | with engine.connect() as conn:
89 | with self.assertRaises(OverflowError):
90 | create_sadlock(conn, key)
91 |
92 | def test_postgresql_key_min(self):
93 | for engine in ENGINES:
94 | if engine.name != "postgresql":
95 | continue
96 | key = -(2**63)
97 | with engine.connect() as conn:
98 | with create_sadlock(conn, key) as lock:
99 | self.assertTrue(lock.locked)
100 | self.assertFalse(lock.locked)
101 |
102 | def test_postgresql_key_over_min(self):
103 | for engine in ENGINES:
104 | if engine.name != "postgresql":
105 | continue
106 | key = -(2**63) - 1
107 | with engine.connect() as conn:
108 | with self.assertRaises(OverflowError):
109 | create_sadlock(conn, key)
110 |
111 | def test_key_wrong_type(self):
112 | for engine in ENGINES:
113 | with engine.connect() as conn:
114 | for k in ((), {}, set(), [], object()):
115 | with self.assertRaises(TypeError):
116 | create_sadlock(conn, k)
117 |
--------------------------------------------------------------------------------
/tests/test_multiprocess.py:
--------------------------------------------------------------------------------
1 | from contextlib import closing
2 | from multiprocessing import Barrier, Process
3 | from time import sleep, time
4 | from unittest import TestCase
5 | from uuid import uuid4
6 |
7 | from sqlalchemy import create_engine
8 |
9 | from sqlalchemy_dlock import create_sadlock
10 |
11 | from .engines import URLS
12 |
13 |
14 | class MpNonBlockingSuccessTestCase(TestCase):
15 | @staticmethod
16 | def fn1(url, k, b):
17 | engine = create_engine(url)
18 | with engine.connect() as conn:
19 | with create_sadlock(conn, k) as lock:
20 | assert lock.locked
21 | assert not lock.locked
22 | b.wait()
23 |
24 | @staticmethod
25 | def fn2(url, k, b):
26 | engine = create_engine(url)
27 | with engine.connect() as conn:
28 | with closing(create_sadlock(conn, k)) as lock:
29 | b.wait()
30 | assert lock.acquire(False)
31 |
32 | def test(self):
33 | key = uuid4().hex
34 | for url in URLS:
35 | bar = Barrier(2)
36 |
37 | p1 = Process(target=self.__class__.fn1, args=(url, key, bar))
38 | p2 = Process(target=self.__class__.fn2, args=(url, key, bar))
39 |
40 | p1.start()
41 | p2.start()
42 |
43 | p1.join()
44 | p2.join()
45 |
46 | self.assertEqual(p1.exitcode, 0)
47 | self.assertEqual(p2.exitcode, 0)
48 |
49 |
50 | class MpNonBlockingFailTestCase(TestCase):
51 | @staticmethod
52 | def fn1(url, k, b, delay):
53 | engine = create_engine(url)
54 | with engine.connect() as conn:
55 | with create_sadlock(conn, k) as lock:
56 | assert lock.locked
57 | b.wait()
58 | sleep(delay)
59 | assert lock.locked
60 | assert not lock.locked
61 |
62 | @staticmethod
63 | def fn2(url, k, b):
64 | engine = create_engine(url)
65 | with engine.connect() as conn:
66 | with closing(create_sadlock(conn, k)) as lock:
67 | b.wait()
68 | assert not lock.acquire(False)
69 |
70 | def test(self):
71 | key = uuid4().hex
72 | delay = 1.0
73 | cls = self.__class__
74 | for url in URLS:
75 | bar = Barrier(2)
76 |
77 | p1 = Process(target=cls.fn1, args=(url, key, bar, delay))
78 | p2 = Process(target=cls.fn2, args=(url, key, bar))
79 |
80 | p1.start()
81 | p2.start()
82 |
83 | p1.join()
84 | p2.join()
85 |
86 | self.assertEqual(p1.exitcode, 0)
87 | self.assertEqual(p2.exitcode, 0)
88 |
89 |
90 | class MpTimeoutSuccessTestCase(TestCase):
91 | @staticmethod
92 | def fn1(url, k, b, delay):
93 | engine = create_engine(url)
94 | with engine.connect() as conn:
95 | with create_sadlock(conn, k) as lock:
96 | assert lock.locked
97 | b.wait()
98 | sleep(delay)
99 | assert lock.locked
100 | assert not lock.locked
101 |
102 | @staticmethod
103 | def fn2(url, k, b, delay, timeout):
104 | engine = create_engine(url)
105 | with engine.connect() as conn:
106 | with closing(create_sadlock(conn, k)) as lock:
107 | b.wait()
108 | ts = time()
109 | assert lock.acquire(timeout=timeout)
110 | assert time() - ts >= delay
111 | assert timeout >= time() - ts
112 | assert lock.locked
113 |
114 | def test(self):
115 | key = uuid4().hex
116 | delay = 1.0
117 | timeout = 3.0
118 | cls = self.__class__
119 |
120 | for url in URLS:
121 | bar = Barrier(2)
122 |
123 | p1 = Process(target=cls.fn1, args=(url, key, bar, delay))
124 | p2 = Process(target=cls.fn2, args=(url, key, bar, delay, timeout))
125 |
126 | p1.start()
127 | p2.start()
128 |
129 | p1.join()
130 | p2.join()
131 |
132 | self.assertEqual(p1.exitcode, 0)
133 | self.assertEqual(p2.exitcode, 0)
134 |
135 |
136 | class MpTimeoutFailTestCase(TestCase):
137 | @staticmethod
138 | def fn1(url, k, b, delay):
139 | engine = create_engine(url)
140 | with engine.connect() as conn:
141 | with create_sadlock(conn, k) as lock:
142 | assert lock.locked
143 | b.wait()
144 | sleep(delay)
145 | assert lock.locked
146 | assert not lock.locked
147 |
148 | @staticmethod
149 | def fn2(url, k, b, timeout):
150 | engine = create_engine(url)
151 | with engine.connect() as conn:
152 | with closing(create_sadlock(conn, k)) as lock:
153 | b.wait()
154 | ts = time()
155 | assert not lock.acquire(timeout=timeout)
156 | assert round(time() - ts) >= timeout
157 | assert not lock.locked
158 |
159 | def test(self):
160 | cls = self.__class__
161 | key = uuid4().hex
162 | delay = 3.0
163 | timeout = 1.0
164 |
165 | for url in URLS:
166 | bar = Barrier(2)
167 |
168 | p1 = Process(target=cls.fn1, args=(url, key, bar, delay))
169 | p2 = Process(target=cls.fn2, args=(url, key, bar, timeout))
170 |
171 | p1.start()
172 | p2.start()
173 |
174 | p1.join()
175 | p2.join()
176 |
177 | self.assertEqual(p1.exitcode, 0)
178 | self.assertEqual(p2.exitcode, 0)
179 |
180 |
181 | class MpReleaseOmittedTestCase(TestCase):
182 | @staticmethod
183 | def fn1(url, k):
184 | engine = create_engine(url)
185 | lock = create_sadlock(engine.connect(), k)
186 | assert lock.acquire(False)
187 |
188 | @staticmethod
189 | def fn2(url, k):
190 | engine = create_engine(url)
191 | with engine.connect() as conn:
192 | with closing(create_sadlock(conn, k)) as lock:
193 | assert lock.acquire(False)
194 |
195 | def test(self):
196 | cls = self.__class__
197 | key = uuid4().hex
198 |
199 | for url in URLS:
200 | p1 = Process(target=cls.fn1, args=(url, key))
201 | p2 = Process(target=cls.fn2, args=(url, key))
202 |
203 | p1.start()
204 | p1.join()
205 |
206 | p2.start()
207 | p2.join()
208 |
209 | self.assertEqual(p1.exitcode, 0)
210 | self.assertEqual(p2.exitcode, 0)
211 |
--------------------------------------------------------------------------------
/tests/test_multithread.py:
--------------------------------------------------------------------------------
1 | from contextlib import closing
2 | from threading import Barrier, Thread
3 | from time import sleep, time
4 | from unittest import TestCase
5 | from uuid import uuid4
6 |
7 | from sqlalchemy_dlock import create_sadlock
8 |
9 | from .engines import ENGINES
10 |
11 |
12 | class MultiThreadTestCase(TestCase):
13 | def tearDown(self):
14 | for engine in ENGINES:
15 | engine.dispose()
16 |
17 | def test_non_blocking_success(self):
18 | key = uuid4().hex
19 | for engine in ENGINES:
20 | bar = Barrier(2)
21 |
22 | def fn1(b):
23 | with engine.connect() as conn:
24 | with create_sadlock(conn, key) as lock:
25 | self.assertTrue(lock.locked)
26 | self.assertFalse(lock.locked)
27 | b.wait()
28 |
29 | def fn2(b):
30 | with engine.connect() as conn:
31 | with closing(create_sadlock(conn, key)) as lock:
32 | b.wait()
33 | self.assertTrue(lock.acquire(False))
34 |
35 | trd1 = Thread(target=fn1, args=(bar,))
36 | trd2 = Thread(target=fn2, args=(bar,))
37 |
38 | trd1.start()
39 | trd2.start()
40 |
41 | trd1.join()
42 | trd2.join()
43 |
44 | def test_non_blocking_fail(self):
45 | key = uuid4().hex
46 | delay = 1.0
47 |
48 | for engine in ENGINES:
49 | bar = Barrier(2)
50 |
51 | def fn1(b):
52 | with engine.connect() as conn:
53 | with create_sadlock(conn, key) as lock:
54 | self.assertTrue(lock.locked)
55 | b.wait()
56 | sleep(delay)
57 | self.assertTrue(lock.locked)
58 | self.assertFalse(lock.locked)
59 |
60 | def fn2(b):
61 | with engine.connect() as conn:
62 | with closing(create_sadlock(conn, key)) as lock:
63 | b.wait()
64 | self.assertFalse(lock.acquire(False))
65 |
66 | trd1 = Thread(target=fn1, args=(bar,))
67 | trd2 = Thread(target=fn2, args=(bar,))
68 |
69 | trd1.start()
70 | trd2.start()
71 |
72 | trd1.join()
73 | trd2.join()
74 |
75 | def test_timeout_fail(self):
76 | key = uuid4().hex
77 | delay = 3.0
78 | timeout = 1.0
79 | for engine in ENGINES:
80 | bar = Barrier(2)
81 |
82 | def fn1(b):
83 | with engine.connect() as conn:
84 | with create_sadlock(conn, key) as lock:
85 | self.assertTrue(lock.locked)
86 | b.wait()
87 | self.assertTrue(lock.locked)
88 | sleep(delay)
89 | self.assertTrue(lock.locked)
90 | self.assertFalse(lock.locked)
91 |
92 | def fn2(b):
93 | with engine.connect() as conn:
94 | with closing(create_sadlock(conn, key)) as lock:
95 | b.wait()
96 | ts = time()
97 | self.assertFalse(lock.acquire(timeout=timeout))
98 | self.assertGreaterEqual(time() - ts, timeout)
99 | self.assertFalse(lock.locked)
100 |
101 | trd1 = Thread(target=fn1, args=(bar,))
102 | trd2 = Thread(target=fn2, args=(bar,))
103 |
104 | trd1.start()
105 | trd2.start()
106 |
107 | trd1.join()
108 | trd2.join()
109 |
110 | def test_timeout_success(self):
111 | key = uuid4().hex
112 | delay = 1.0
113 | timeout = 3.0
114 |
115 | for engine in ENGINES:
116 | bar = Barrier(2)
117 |
118 | def fn1(b):
119 | with engine.connect() as conn:
120 | with create_sadlock(conn, key) as lock:
121 | self.assertTrue(lock.locked)
122 | b.wait()
123 | sleep(delay)
124 | self.assertTrue(lock.locked)
125 | self.assertFalse(lock.locked)
126 |
127 | def fn2(b):
128 | with engine.connect() as conn:
129 | with closing(create_sadlock(conn, key)) as lock:
130 | b.wait()
131 | ts = time()
132 | self.assertTrue(lock.acquire(timeout=timeout))
133 | self.assertGreaterEqual(time() - ts, delay)
134 | self.assertGreaterEqual(timeout, time() - ts)
135 | self.assertTrue(lock.locked)
136 |
137 | trd1 = Thread(target=fn1, args=(bar,))
138 | trd2 = Thread(target=fn2, args=(bar,))
139 |
140 | trd1.start()
141 | trd2.start()
142 |
143 | trd1.join()
144 | trd2.join()
145 |
146 | def test_connection_released(self):
147 | key = uuid4().hex
148 |
149 | for engine in ENGINES:
150 |
151 | def fn1():
152 | with engine.connect() as conn:
153 | lock = create_sadlock(conn, key)
154 | self.assertTrue(lock.acquire(False))
155 |
156 | def fn2():
157 | with engine.connect() as conn:
158 | with closing(create_sadlock(conn, key)) as lock:
159 | self.assertTrue(lock.acquire(False))
160 |
161 | trd1 = Thread(target=fn1)
162 | trd2 = Thread(target=fn2)
163 |
164 | trd1.start()
165 | trd1.join()
166 |
167 | trd2.start()
168 | trd2.join()
169 |
--------------------------------------------------------------------------------
/tests/test_pg.py:
--------------------------------------------------------------------------------
1 | from threading import Barrier, Thread
2 | from time import sleep
3 | from unittest import TestCase
4 | from uuid import uuid4
5 |
6 | from sqlalchemy_dlock import create_sadlock
7 |
8 | from .engines import ENGINES
9 |
10 |
11 | class PgTestCase(TestCase):
12 | def tearDown(self):
13 | for engine in ENGINES:
14 | engine.dispose()
15 |
16 | def test_pg_invalid_interval(self):
17 | for engine in ENGINES:
18 | if engine.name != "postgresql":
19 | continue
20 | key = uuid4().hex
21 | with engine.connect() as conn:
22 | lck = create_sadlock(conn, key)
23 | with self.assertRaises(ValueError):
24 | lck.acquire(timeout=0, interval=-1)
25 |
26 | def test_simple_xact(self):
27 | key = uuid4().hex
28 | for engine in ENGINES:
29 | if engine.name != "postgresql":
30 | continue
31 | with engine.connect() as conn:
32 | lck = create_sadlock(conn, key, xact=True)
33 | with conn.begin():
34 | self.assertTrue(lck.acquire())
35 |
36 | def test_xact_thread(self):
37 | key = uuid4().hex
38 | for engine in ENGINES:
39 | if engine.name != "postgresql":
40 | continue
41 |
42 | trd_exc = None
43 | bar = Barrier(2)
44 |
45 | def fn_():
46 | nonlocal trd_exc
47 | try:
48 | with engine.connect() as c_:
49 | l_ = create_sadlock(c_, key, xact=True)
50 | bar.wait(30)
51 | with c_.begin():
52 | self.assertFalse(l_.acquire(block=False))
53 | sleep(10)
54 | self.assertTrue(l_.acquire(block=False))
55 | except Exception as exc:
56 | trd_exc = exc
57 | raise exc
58 |
59 | trd = Thread(target=fn_)
60 | trd.start()
61 |
62 | with engine.connect() as conn:
63 | lck = create_sadlock(conn, key, xact=True)
64 | with conn.begin():
65 | self.assertTrue(lck.acquire(block=False))
66 | bar.wait(30)
67 | sleep(3)
68 |
69 | trd.join()
70 |
71 | if trd_exc is not None:
72 | raise trd_exc # type: ignore
73 |
--------------------------------------------------------------------------------
/tests/test_scoped_session.py:
--------------------------------------------------------------------------------
1 | from unittest import TestCase
2 | from uuid import uuid4
3 |
4 | from sqlalchemy.orm import scoped_session, sessionmaker
5 |
6 | from sqlalchemy_dlock import create_sadlock
7 |
8 | from .engines import ENGINES
9 |
10 |
11 | class ScopedSessionTestCase(TestCase):
12 | def setUp(self):
13 | self.Sessions = []
14 | self.sessions = []
15 | for engine in ENGINES:
16 | factory = sessionmaker(bind=engine)
17 | Session = scoped_session(factory)
18 | self.Sessions.append(Session)
19 | self.sessions.append(Session())
20 |
21 | def tearDown(self):
22 | for Session in self.Sessions:
23 | Session.remove()
24 | for engine in ENGINES:
25 | engine.dispose()
26 |
27 | def test_once(self):
28 | key = uuid4().hex
29 | for session in self.sessions:
30 | with create_sadlock(session, key) as lock:
31 | self.assertTrue(lock.locked)
32 | self.assertFalse(lock.locked)
33 |
34 | def test_twice(self):
35 | key = uuid4().hex
36 | for session in self.sessions:
37 | for _ in range(2):
38 | with create_sadlock(session, key) as lock:
39 | self.assertTrue(lock.locked)
40 | self.assertFalse(lock.locked)
41 |
42 | def test_separated_connection(self):
43 | key = uuid4().hex
44 | for session in self.sessions:
45 | session.commit()
46 | lock = create_sadlock(session, key)
47 | session.rollback()
48 | self.assertTrue(lock.acquire())
49 | session.close()
50 | lock.release()
51 | self.assertFalse(lock.locked)
52 |
--------------------------------------------------------------------------------
/tests/test_session.py:
--------------------------------------------------------------------------------
1 | from unittest import TestCase
2 | from uuid import uuid1
3 |
4 | from sqlalchemy.orm import sessionmaker
5 |
6 | from sqlalchemy_dlock import create_sadlock
7 |
8 | from .engines import ENGINES
9 |
10 |
11 | class SessionTestCase(TestCase):
12 | Sessions = [] # type: ignore[var-annotated]
13 |
14 | @classmethod
15 | def setUpClass(cls):
16 | for engine in ENGINES:
17 | Session = sessionmaker(bind=engine)
18 | cls.Sessions.append(Session)
19 |
20 | def tearDown(self):
21 | for engine in ENGINES:
22 | engine.dispose()
23 |
24 | def test_once(self):
25 | key = uuid1().hex
26 | for Session in self.Sessions:
27 | with Session() as session:
28 | with create_sadlock(session, key) as lock:
29 | self.assertTrue(lock.locked)
30 | self.assertFalse(lock.locked)
31 |
32 | def test_cross_transaction(self):
33 | key = uuid1().hex
34 | for Session in self.Sessions:
35 | with Session() as session:
36 | session.commit()
37 | lock = create_sadlock(session, key)
38 | session.rollback()
39 | self.assertTrue(lock.acquire())
40 | session.close()
41 | lock.release()
42 | self.assertFalse(lock.locked)
43 |
--------------------------------------------------------------------------------