├── tests ├── test_project │ ├── __init__.py │ └── settings.py ├── config.py ├── conftest.py ├── test_django_integration.py ├── helper.py └── test_redis_lock.py ├── docs ├── authors.rst ├── readme.rst ├── changelog.rst ├── contributing.rst ├── requirements.txt ├── redis-lock diagram.png ├── reference │ ├── index.rst │ └── redis_lock.rst ├── redis-lock diagram (v3.0).png ├── installation.rst ├── spelling_wordlist.txt ├── index.rst ├── conf.py └── usage.rst ├── ci ├── requirements.txt ├── templates │ └── .github │ │ └── workflows │ │ └── github-actions.yml └── bootstrap.py ├── SECURITY.md ├── pyproject.toml ├── .coveragerc ├── .readthedocs.yml ├── setup.cfg ├── .editorconfig ├── MANIFEST.in ├── examples ├── README.rst ├── test-tmux.py ├── plain.py ├── bench.py └── bench.rst ├── .bumpversion.cfg ├── .pre-commit-config.yaml ├── AUTHORS.rst ├── .gitignore ├── pytest.ini ├── LICENSE ├── .cookiecutterrc ├── src └── redis_lock │ ├── django_cache.py │ └── __init__.py ├── tox.ini ├── CONTRIBUTING.rst ├── setup.py ├── .github └── workflows │ └── github-actions.yml ├── CHANGELOG.rst └── README.rst /tests/test_project/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../AUTHORS.rst 2 | -------------------------------------------------------------------------------- /docs/readme.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CHANGELOG.rst 2 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx>=1.3 2 | sphinx-py3doc-enhanced-theme 3 | -------------------------------------------------------------------------------- /ci/requirements.txt: -------------------------------------------------------------------------------- 1 | virtualenv>=16.6.0 2 | pip>=19.1.1 3 | setuptools>=18.0.1 4 | six>=1.14.0 5 | tox 6 | -------------------------------------------------------------------------------- /docs/redis-lock diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionelmc/python-redis-lock/HEAD/docs/redis-lock diagram.png -------------------------------------------------------------------------------- /docs/reference/index.rst: -------------------------------------------------------------------------------- 1 | Reference 2 | ========= 3 | 4 | .. toctree:: 5 | :glob: 6 | 7 | redis_lock* 8 | -------------------------------------------------------------------------------- /docs/redis-lock diagram (v3.0).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionelmc/python-redis-lock/HEAD/docs/redis-lock diagram (v3.0).png -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Installation 3 | ============ 4 | 5 | At the command line:: 6 | 7 | pip install python-redis-lock 8 | -------------------------------------------------------------------------------- /tests/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | TIMEOUT = int(os.getenv('REDIS_LOCK_TEST_TIMEOUT', 10)) 5 | HELPER = str(Path(__file__).parent / 'helper.py') 6 | -------------------------------------------------------------------------------- /docs/reference/redis_lock.rst: -------------------------------------------------------------------------------- 1 | redis_lock 2 | ========== 3 | 4 | .. testsetup:: 5 | 6 | from redis_lock import * 7 | 8 | .. automodule:: redis_lock 9 | :members: 10 | -------------------------------------------------------------------------------- /docs/spelling_wordlist.txt: -------------------------------------------------------------------------------- 1 | builtin 2 | builtins 3 | classmethod 4 | staticmethod 5 | classmethods 6 | staticmethods 7 | args 8 | kwargs 9 | callstack 10 | Changelog 11 | Indices 12 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | ## Security contact information 2 | 3 | To report a security vulnerability, please use the 4 | [Tidelift security contact](https://tidelift.com/security). 5 | Tidelift will coordinate the fix and disclosure. 6 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=30.3.0", 4 | "wheel", 5 | ] 6 | 7 | [tool.black] 8 | line-length = 140 9 | target-version = ['py37'] 10 | skip-string-normalization = true 11 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [paths] 2 | source = src 3 | 4 | [run] 5 | branch = true 6 | source = 7 | src 8 | tests 9 | parallel = true 10 | 11 | [report] 12 | show_missing = true 13 | precision = 2 14 | omit = *migrations* 15 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 2 | version: 2 3 | sphinx: 4 | configuration: docs/conf.py 5 | formats: all 6 | python: 7 | install: 8 | - requirements: docs/requirements.txt 9 | - method: pip 10 | path: . 11 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 140 3 | exclude = .tox,.eggs,ci/templates,build,dist 4 | 5 | [tool:isort] 6 | force_single_line = True 7 | line_length = 120 8 | known_first_party = redis_lock 9 | default_section = THIRDPARTY 10 | forced_separate = test_redis_lock,config 11 | skip = .tox,.eggs,ci/templates,build,dist 12 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Contents 3 | ======== 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | readme 9 | installation 10 | usage 11 | reference/index 12 | contributing 13 | authors 14 | changelog 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | * :ref:`modindex` 21 | * :ref:`search` 22 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # see https://editorconfig.org/ 2 | root = true 3 | 4 | [*] 5 | # Use Unix-style newlines for most files (except Windows files, see below). 6 | end_of_line = lf 7 | trim_trailing_whitespace = true 8 | indent_style = space 9 | insert_final_newline = true 10 | indent_size = 4 11 | charset = utf-8 12 | 13 | [*.{bat,cmd,ps1}] 14 | end_of_line = crlf 15 | 16 | [*.{yml,yaml}] 17 | indent_size = 2 18 | 19 | [*.tsv] 20 | indent_style = tab 21 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | graft docs 2 | graft src 3 | graft ci 4 | graft tests 5 | graft examples 6 | 7 | include .bumpversion.cfg 8 | include .cookiecutterrc 9 | include .coveragerc 10 | include .editorconfig 11 | include .github/workflows/github-actions.yml 12 | include .pre-commit-config.yaml 13 | include .readthedocs.yml 14 | include pytest.ini 15 | include tox.ini 16 | 17 | include AUTHORS.rst 18 | include CHANGELOG.rst 19 | include CONTRIBUTING.rst 20 | include LICENSE 21 | include README.rst 22 | 23 | global-exclude *.py[cod] __pycache__/* *.so *.dylib 24 | -------------------------------------------------------------------------------- /examples/README.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ======== 3 | 4 | 5 | Visual display of locks working 6 | ------------------------------- 7 | 8 | Requirements: tmux, tox, redis-server 9 | 10 | To run (make sure the redis server runs on the default port beforehand):: 11 | 12 | python ./examples/test-tmux.py LOCKNAME 13 | 14 | This will open a tmux session with a bunch of panes, all waiting on the same lock. 15 | 16 | After all the panes acquire the lock you can make the process/pane exit by pressing any key. 17 | 18 | The result should be something like this: https://asciinema.org/a/DhfkKYMWg5IJLSL6LkRaDVwjc 19 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from process_tests import TestProcess 3 | from process_tests import wait_for_strings 4 | 5 | 6 | @pytest.fixture 7 | def redis_socket(tmp_path): 8 | return str(tmp_path.joinpath('redis.sock')) 9 | 10 | 11 | @pytest.fixture 12 | def redis_server(tmp_path, redis_socket): 13 | with TestProcess( 14 | 'redis-server', '--port', '0', '--save', '', '--appendonly', 'yes', '--dir', tmp_path, '--unixsocket', redis_socket 15 | ) as redis_server: 16 | wait_for_strings(redis_server.read, 2, 'ready to accept connections') 17 | yield redis_server 18 | -------------------------------------------------------------------------------- /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 4.0.0 3 | commit = True 4 | tag = True 5 | 6 | [bumpversion:file:setup.py] 7 | search = version='{current_version}' 8 | replace = version='{new_version}' 9 | 10 | [bumpversion:file (badge):README.rst] 11 | search = /v{current_version}.svg 12 | replace = /v{new_version}.svg 13 | 14 | [bumpversion:file (link):README.rst] 15 | search = /v{current_version}...master 16 | replace = /v{new_version}...master 17 | 18 | [bumpversion:file:docs/conf.py] 19 | search = version = release = '{current_version}' 20 | replace = version = release = '{new_version}' 21 | 22 | [bumpversion:file:src/redis_lock/__init__.py] 23 | search = __version__ = '{current_version}' 24 | replace = __version__ = '{new_version}' 25 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # To install the git pre-commit hook run: 2 | # pre-commit install 3 | # To update the pre-commit hooks run: 4 | # pre-commit install-hooks 5 | exclude: '^(\.tox|ci/templates|\.bumpversion\.cfg)(/|$)' 6 | repos: 7 | - repo: https://github.com/pre-commit/pre-commit-hooks 8 | rev: v4.3.0 9 | hooks: 10 | - id: trailing-whitespace 11 | - id: end-of-file-fixer 12 | - id: debug-statements 13 | - repo: https://github.com/timothycrosley/isort 14 | rev: 5.10.1 15 | hooks: 16 | - id: isort 17 | - repo: https://github.com/psf/black 18 | rev: 22.10.0 19 | hooks: 20 | - id: black 21 | - repo: https://github.com/pycqa/flake8 22 | rev: 3.9.2 23 | hooks: 24 | - id: flake8 25 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | 2 | Authors 3 | ======= 4 | 5 | * Ionel Cristian Mărieș - https://blog.ionelmc.ro 6 | * Rob Terhaar - https://github.com/robbyt 7 | * Corey Farwell - http://rwell.org 8 | * Andrey Kobyshev - https://github.com/yokotoka 9 | * Jardel Weyrich - https://twitter.com/jweyrich 10 | * Victor Torres - https://github.com/victor-torres 11 | * Andrew Pashkin - https://github.com/AndreiPashkin 12 | * Tero Vuotila - https://github.com/tvuotila 13 | * Joel Höner - https://github.com/athre0z 14 | * Julie MacDonell - https://github.com/juliemacdonell 15 | * Julien Heller - https://github.com/flux627 16 | * Przemysław Suliga - https://github.com/suligap 17 | * Artem Slobodkin - https://github.com/artslob 18 | * Salomon Smeke Cohen - https://github.com/SalomonSmeke 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | __pycache__ 3 | 4 | # C extensions 5 | *.so 6 | 7 | # Packages 8 | *.egg 9 | *.egg-info 10 | dist 11 | build 12 | eggs 13 | .eggs 14 | parts 15 | bin 16 | var 17 | sdist 18 | wheelhouse 19 | develop-eggs 20 | .installed.cfg 21 | lib 22 | lib64 23 | venv*/ 24 | pyvenv*/ 25 | pip-wheel-metadata/ 26 | 27 | # Installer logs 28 | pip-log.txt 29 | 30 | # Unit test / coverage reports 31 | .coverage 32 | .tox 33 | .coverage.* 34 | .pytest_cache/ 35 | nosetests.xml 36 | coverage.xml 37 | htmlcov 38 | 39 | # Translations 40 | *.mo 41 | 42 | # Buildout 43 | .mr.developer.cfg 44 | 45 | # IDE project files 46 | .project 47 | .pydevproject 48 | .idea 49 | .vscode 50 | *.iml 51 | *.komodoproject 52 | 53 | # Complexity 54 | output/*.html 55 | output/*/index.html 56 | 57 | # Sphinx 58 | docs/_build 59 | 60 | .DS_Store 61 | *~ 62 | .*.sw[po] 63 | .build 64 | .ve 65 | .env 66 | .cache 67 | .pytest 68 | .benchmarks 69 | .bootstrap 70 | .appveyor.token 71 | *.bak 72 | 73 | # Mypy Cache 74 | .mypy_cache/ 75 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | # If a pytest section is found in one of the possible config files 3 | # (pytest.ini, tox.ini or setup.cfg), then pytest will not look for any others, 4 | # so if you add a pytest config section elsewhere, 5 | # you will need to delete this section from setup.cfg. 6 | norecursedirs = 7 | .git 8 | .tox 9 | .env 10 | dist 11 | build 12 | migrations 13 | 14 | python_files = 15 | test_*.py 16 | *_test.py 17 | tests.py 18 | addopts = 19 | -ra 20 | --strict-markers 21 | --ignore=docs/conf.py 22 | --ignore=setup.py 23 | --ignore=ci 24 | --ignore=.eggs 25 | --doctest-glob=\*.rst 26 | --tb=short 27 | testpaths = 28 | tests 29 | 30 | # Idea from: https://til.simonwillison.net/pytest/treat-warnings-as-errors 31 | filterwarnings = 32 | error 33 | # You can add exclusions, some examples: 34 | # ignore:'redis_lock' defines default_app_config:PendingDeprecationWarning:: 35 | # ignore:The {{% if::: 36 | # ignore:Coverage disabled via --no-cov switch! 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2013-2022, Ionel Cristian Mărieș. All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the 6 | following conditions are met: 7 | 8 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following 9 | disclaimer. 10 | 11 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following 12 | disclaimer in the documentation and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 15 | INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 16 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 17 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 18 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 19 | WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 20 | THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 21 | -------------------------------------------------------------------------------- /tests/test_django_integration.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pytest 4 | 5 | try: 6 | import django 7 | except ImportError: 8 | django = None 9 | else: 10 | from django.core.cache import cache 11 | 12 | 13 | @pytest.fixture(scope='module') 14 | def redis_socket_static(tmpdir_factory): 15 | path = str(tmpdir_factory.getbasetemp() / 'redis.sock') 16 | os.environ['REDIS_SOCKET'] = path 17 | return path 18 | 19 | 20 | @pytest.fixture 21 | def redis_socket(redis_socket_static): 22 | return redis_socket_static 23 | 24 | 25 | @pytest.mark.skipif("not django") 26 | def test_django_works(redis_server): 27 | with cache.lock('whateva'): 28 | pass 29 | 30 | 31 | @pytest.mark.skipif("not django") 32 | def test_django_add_or_set_locked(redis_server): 33 | def creator_42(): 34 | return 42 35 | 36 | def none_creator(): 37 | return None 38 | 39 | def assert_false_creator(): 40 | assert False 41 | 42 | assert cache.locked_get_or_set("foobar-aosl", creator_42) == 42 43 | assert cache.locked_get_or_set("foobar-aosl", assert_false_creator) == 42 44 | 45 | try: 46 | cache.locked_get_or_set("foobar-aosl2", none_creator) 47 | except ValueError: 48 | pass 49 | else: 50 | assert False 51 | 52 | 53 | @pytest.mark.skipif("not django") 54 | def test_reset_all(redis_server): 55 | lock1 = cache.lock("foobar1") 56 | lock2 = cache.lock("foobar2") 57 | lock1.acquire(blocking=False) 58 | lock2.acquire(blocking=False) 59 | cache.reset_all() 60 | lock1 = cache.lock("foobar1") 61 | lock2 = cache.lock("foobar2") 62 | lock1.acquire(blocking=False) 63 | lock2.acquire(blocking=False) 64 | lock1.release() 65 | lock2.release() 66 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import unicode_literals 3 | 4 | import sphinx_py3doc_enhanced_theme 5 | 6 | extensions = [ 7 | 'sphinx.ext.autodoc', 8 | 'sphinx.ext.autosummary', 9 | 'sphinx.ext.coverage', 10 | 'sphinx.ext.doctest', 11 | 'sphinx.ext.extlinks', 12 | 'sphinx.ext.ifconfig', 13 | 'sphinx.ext.napoleon', 14 | 'sphinx.ext.todo', 15 | 'sphinx.ext.viewcode', 16 | ] 17 | source_suffix = '.rst' 18 | master_doc = 'index' 19 | project = 'redis-lock' 20 | year = '2013-2022' 21 | author = 'Ionel Cristian Mărieș' 22 | copyright = '{0}, {1}'.format(year, author) 23 | version = release = '4.0.0' 24 | 25 | linkcheck_ignore = [ 26 | # This redirects to hosted % image on AWS. 27 | 'https://coveralls\\.io/repos/ionelmc/python\\-redis\\-lock/badge\\.svg\\?branch=master\\&service=github', 28 | # This anchor is incorrectly marked as missing. GH bug? 29 | 'https://github\\.com/jazzband/django\\-redis\\#configure\\-as\\-cache\\-backend', 30 | ] 31 | pygments_style = 'trac' 32 | templates_path = ['.'] 33 | extlinks = { 34 | 'issue': ('https://github.com/ionelmc/python-redis-lock/issues/%s', '#'), 35 | 'pr': ('https://github.com/ionelmc/python-redis-lock/pull/%s', 'PR #'), 36 | } 37 | html_theme = 'sphinx_py3doc_enhanced_theme' 38 | html_theme_path = [sphinx_py3doc_enhanced_theme.get_html_theme_path()] 39 | html_theme_options = { 40 | 'githuburl': 'https://github.com/ionelmc/python-redis-lock/', 41 | } 42 | 43 | html_use_smartypants = True 44 | html_last_updated_fmt = '%b %d, %Y' 45 | html_split_index = False 46 | html_sidebars = { 47 | '**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'], 48 | } 49 | html_short_title = '%s-%s' % (project, version) 50 | 51 | napoleon_use_ivar = True 52 | napoleon_use_rtype = False 53 | napoleon_use_param = False 54 | -------------------------------------------------------------------------------- /examples/test-tmux.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import subprocess 3 | import sys 4 | 5 | subprocess.check_call("tox -e py38-dj3-nocov --notest".split()) 6 | 7 | left_commands = [ 8 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 9 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 10 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 11 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 12 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 13 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 14 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 15 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 16 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 17 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 18 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 19 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 20 | ".tox/py38-dj3-nocov/bin/python examples/plain.py %s" % sys.argv[1], 21 | ] 22 | right_commands = left_commands 23 | session = '' 24 | 25 | if right_commands: 26 | session += 'tmux selectp -t0;tmux splitw -hd -p50 \"%s\"; ' % right_commands[-1] 27 | for index, command in enumerate(right_commands[:-1]): 28 | session += 'tmux selectp -t1;tmux splitw -d -p%i \"%s\"; ' % (100 / (len(right_commands) - index), command) 29 | 30 | for index, command in enumerate(left_commands[1:]): 31 | session += 'tmux selectp -t0;tmux splitw -d -p%i \"%s\"; ' % (100 / (len(left_commands) - index), command) 32 | if left_commands: 33 | session += left_commands[0] 34 | 35 | args = [ 36 | 'tmux', 37 | 'new-session', 38 | session, 39 | ] 40 | print('Running ', args) 41 | subprocess.call(args) 42 | -------------------------------------------------------------------------------- /tests/test_project/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for test_project project. 3 | 4 | For more information on this file, see 5 | https://docs.djangoproject.com/en/dev/topics/settings/ 6 | 7 | For the full list of settings and their values, see 8 | https://docs.djangoproject.com/en/dev/ref/settings/ 9 | """ 10 | 11 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 12 | import os 13 | 14 | REDIS_SOCKET = os.environ['REDIS_SOCKET'] 15 | 16 | BASE_DIR = os.path.dirname(os.path.dirname(__file__)) 17 | 18 | 19 | # Quick-start development settings - unsuitable for production 20 | # See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/ 21 | 22 | # SECURITY WARNING: keep the secret key used in production secret! 23 | SECRET_KEY = '+ln!mmswhbemdn@*v8sbic_n+i&j4+ct8(n=y09s81c)7fyyf2' 24 | 25 | # SECURITY WARNING: don't run with debug turned on in production! 26 | DEBUG = True 27 | 28 | TEMPLATE_DEBUG = True 29 | 30 | ALLOWED_HOSTS = [] 31 | 32 | 33 | # Application definition 34 | 35 | INSTALLED_APPS = () 36 | 37 | MIDDLEWARE_CLASSES = () 38 | 39 | ROOT_URLCONF = 'test_app.urls' 40 | 41 | WSGI_APPLICATION = 'test_project.wsgi.application' 42 | 43 | 44 | # Database 45 | # https://docs.djangoproject.com/en/dev/ref/settings/#databases 46 | 47 | DATABASES = { 48 | 'default': { 49 | 'ENGINE': 'django.db.backends.sqlite3', 50 | 'NAME': ':memory:', 51 | } 52 | } 53 | 54 | # Internationalization 55 | # https://docs.djangoproject.com/en/dev/topics/i18n/ 56 | 57 | LANGUAGE_CODE = 'en-us' 58 | 59 | TIME_ZONE = 'UTC' 60 | 61 | USE_I18N = True 62 | 63 | USE_L10N = True 64 | 65 | USE_TZ = True 66 | 67 | 68 | # Static files (CSS, JavaScript, Images) 69 | # https://docs.djangoproject.com/en/dev/howto/static-files/ 70 | 71 | STATIC_URL = '/static/' 72 | 73 | 74 | CACHES = { 75 | "default": { 76 | "BACKEND": "redis_lock.django_cache.RedisCache", 77 | "LOCATION": f"unix://{REDIS_SOCKET}", 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /examples/plain.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import sched 4 | import sys 5 | import time 6 | 7 | import redis 8 | 9 | import redis_lock 10 | 11 | try: 12 | import termios 13 | import tty 14 | except ImportError: 15 | # Probably Windows. 16 | try: 17 | import msvcrt 18 | except ImportError: 19 | # FIXME what to do on other platforms? 20 | # Just give up here. 21 | raise ImportError('getch not available') 22 | else: 23 | getch = msvcrt.getch 24 | else: 25 | 26 | def getch(): 27 | """getch() -> key character 28 | 29 | Read a single keypress from stdin and return the resulting character. 30 | Nothing is echoed to the console. This call will block if a keypress 31 | is not already available, but will not wait for Enter to be pressed. 32 | 33 | If the pressed key was a modifier key, nothing will be detected; if 34 | it were a special function key, it may return the first character of 35 | of an escape sequence, leaving additional characters in the buffer. 36 | """ 37 | fd = sys.stdin.fileno() 38 | old_settings = termios.tcgetattr(fd) 39 | try: 40 | tty.setraw(fd) 41 | ch = sys.stdin.read(1) 42 | finally: 43 | termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) 44 | return ch 45 | 46 | 47 | logging.basicConfig(level="DEBUG", format="%(asctime)s | %(process)6s | %(message)s") 48 | 49 | c = redis.StrictRedis() 50 | pid = os.getpid() 51 | lock = redis_lock.Lock(c, sys.argv[1], expire=5) 52 | 53 | 54 | def run(): 55 | with lock: 56 | logging.debug("GOT LOCK. WAITING ...") 57 | time.sleep(0.05) 58 | logging.debug("DONE. Press any key to exit.") 59 | 60 | getch() 61 | 62 | 63 | if __name__ == "__main__": 64 | s = sched.scheduler(time.time, time.sleep) 65 | now = int(time.time()) / 10 66 | t = (now + 1) * 10 67 | logging.debug("Running in %s seconds ...", t - time.time()) 68 | s.enterabs(t, 0, run, ()) 69 | s.run() 70 | -------------------------------------------------------------------------------- /.cookiecutterrc: -------------------------------------------------------------------------------- 1 | # Generated by cookiepatcher, a small shim around cookiecutter (pip install cookiepatcher) 2 | 3 | default_context: 4 | allow_tests_inside_package: 'no' 5 | appveyor: 'no' 6 | c_extension_function: '-' 7 | c_extension_module: '-' 8 | c_extension_optional: 'no' 9 | c_extension_support: 'no' 10 | c_extension_test_pypi: 'no' 11 | c_extension_test_pypi_username: '-' 12 | codacy: 'no' 13 | codacy_projectid: '[Get ID from https://app.codacy.com/app/ionelmc/python-redis-lock/settings]' 14 | codeclimate: 'no' 15 | codecov: 'yes' 16 | command_line_interface: 'no' 17 | command_line_interface_bin_name: '-' 18 | coveralls: 'yes' 19 | distribution_name: python-redis-lock 20 | email: contact@ionelmc.ro 21 | full_name: Ionel Cristian Mărieș 22 | github_actions: 'yes' 23 | github_actions_osx: 'yes' 24 | github_actions_windows: 'yes' 25 | legacy_python: 'no' 26 | license: BSD 2-Clause License 27 | linter: flake8 28 | package_name: redis_lock 29 | pre_commit: 'yes' 30 | pre_commit_formatter: black 31 | project_name: redis-lock 32 | project_short_description: Lock context manager implemented via redis SETNX/BLPOP. 33 | pypi_badge: 'yes' 34 | pypi_disable_upload: 'no' 35 | release_date: '2020-11-20' 36 | repo_hosting: github.com 37 | repo_hosting_domain: github.com 38 | repo_main_branch: master 39 | repo_name: python-redis-lock 40 | repo_username: ionelmc 41 | requiresio: 'yes' 42 | scrutinizer: 'no' 43 | setup_py_uses_pytest_runner: 'no' 44 | setup_py_uses_setuptools_scm: 'no' 45 | sphinx_docs: 'yes' 46 | sphinx_docs_hosting: https://python-redis-lock.readthedocs.io/ 47 | sphinx_doctest: 'no' 48 | sphinx_theme: sphinx-py3doc-enhanced-theme 49 | test_matrix_configurator: 'no' 50 | test_matrix_separate_coverage: 'yes' 51 | travis: 'no' 52 | travis_osx: 'no' 53 | version: 3.7.0 54 | version_manager: bump2version 55 | website: http://blog.ionelmc.ro 56 | year_from: '2013' 57 | year_to: '2022' 58 | -------------------------------------------------------------------------------- /src/redis_lock/django_cache.py: -------------------------------------------------------------------------------- 1 | from django.core.cache.backends.base import DEFAULT_TIMEOUT 2 | from django_redis.cache import RedisCache as PlainRedisCache 3 | 4 | from redis_lock import Lock 5 | from redis_lock import reset_all 6 | 7 | 8 | class RedisCache(PlainRedisCache): 9 | @property 10 | def __client(self): 11 | try: 12 | return self.client.get_client() 13 | except Exception as exc: 14 | raise NotImplementedError( 15 | f"RedisCache doesn't have a raw client: {exc}. Use 'redis_cache.client.DefaultClient' as the CLIENT_CLASS !" 16 | ) 17 | 18 | def lock(self, key, expire=None, id=None, auto_renewal=False): 19 | return Lock(self.__client, key, expire=expire, id=id, auto_renewal=auto_renewal) 20 | 21 | def locked_get_or_set(self, key, value_creator, version=None, expire=None, id=None, lock_key=None, timeout=DEFAULT_TIMEOUT): 22 | """ 23 | Fetch a given key from the cache. If the key does not exist, the key is added and 24 | set to the value returned when calling `value_creator`. The creator function 25 | is invoked inside of a lock. 26 | """ 27 | if lock_key is None: 28 | lock_key = 'get_or_set:' + key 29 | 30 | val = self.get(key, version=version) 31 | if val is not None: 32 | return val 33 | 34 | with self.lock(lock_key, expire=expire, id=id): 35 | # Was the value set while we were trying to acquire the lock? 36 | val = self.get(key, version=version) 37 | if val is not None: 38 | return val 39 | 40 | # Nope, create value now. 41 | val = value_creator() 42 | 43 | if val is None: 44 | raise ValueError('`value_creator` must return a value') 45 | 46 | self.set(key, val, timeout=timeout, version=version) 47 | return val 48 | 49 | def reset_all(self): 50 | """ 51 | Forcibly deletes all locks if its remains (like a crash reason). Use this with care. 52 | """ 53 | reset_all(self.__client) 54 | -------------------------------------------------------------------------------- /ci/templates/.github/workflows/github-actions.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | on: [push, pull_request] 3 | jobs: 4 | test: 5 | name: {{ '${{ matrix.name }}' }} 6 | runs-on: {{ '${{ matrix.os }}' }} 7 | timeout-minutes: 30 8 | strategy: 9 | fail-fast: false 10 | matrix: 11 | include: 12 | - name: 'check' 13 | python: '3.9' 14 | toxpython: 'python3.9' 15 | tox_env: 'check' 16 | os: 'ubuntu-latest' 17 | - name: 'docs' 18 | python: '3.9' 19 | toxpython: 'python3.9' 20 | tox_env: 'docs' 21 | os: 'ubuntu-latest' 22 | {% for env in tox_environments %} 23 | {% set prefix = env.split('-')[0] -%} 24 | {% if prefix.startswith('pypy') %} 25 | {% set python %}pypy-{{ prefix[4] }}.{{ prefix[5] }}{% endset %} 26 | {% set cpython %}pp{{ prefix[4:5] }}{% endset %} 27 | {% set toxpython %}pypy{{ prefix[4] }}.{{ prefix[5] }}{% endset %} 28 | {% else %} 29 | {% set python %}{{ prefix[2] }}.{{ prefix[3:] }}{% endset %} 30 | {% set cpython %}cp{{ prefix[2:] }}{% endset %} 31 | {% set toxpython %}python{{ prefix[2] }}.{{ prefix[3:] }}{% endset %} 32 | {% endif %} 33 | {% for os, python_arch in [ 34 | ['ubuntu', 'x64'], 35 | ] %} 36 | - name: '{{ env }} ({{ os }})' 37 | python: '{{ python }}' 38 | toxpython: '{{ toxpython }}' 39 | python_arch: '{{ python_arch }}' 40 | tox_env: '{{ env }}{% if 'cover' in env %},codecov{% endif %}' 41 | os: '{{ os }}-latest' 42 | {% endfor %} 43 | {% endfor %} 44 | steps: 45 | - uses: actions/checkout@v2 46 | with: 47 | fetch-depth: 0 48 | - uses: actions/setup-python@v2 49 | with: 50 | python-version: {{ '${{ matrix.python }}' }} 51 | architecture: {{ '${{ matrix.python_arch }}' }} 52 | - name: install redis-server 53 | run: sudo apt-get install -y redis-server 54 | - name: install dependencies 55 | run: | 56 | python -mpip install --progress-bar=off -r ci/requirements.txt 57 | virtualenv --version 58 | pip --version 59 | tox --version 60 | pip list --format=freeze 61 | - name: test 62 | env: 63 | TOXPYTHON: '{{ '${{ matrix.toxpython }}' }}' 64 | run: > 65 | tox -e {{ '${{ matrix.tox_env }}' }} -v 66 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | ===== 2 | Usage 3 | ===== 4 | 5 | To use redis-lock in a project:: 6 | 7 | import redis_lock 8 | 9 | Blocking lock:: 10 | 11 | conn = StrictRedis() 12 | lock = redis_lock.Lock(conn, "name-of-the-lock") 13 | if lock.acquire(): 14 | print("Got the lock. Doing some work ...") 15 | time.sleep(5) 16 | 17 | Blocking lock with timeout:: 18 | 19 | conn = StrictRedis() 20 | lock = redis_lock.Lock(conn, "name-of-the-lock") 21 | if lock.acquire(timeout=3): 22 | print("Got the lock. Doing some work ...") 23 | time.sleep(5) 24 | else: 25 | print("Someone else has the lock.") 26 | 27 | Non-blocking lock:: 28 | 29 | conn = StrictRedis() 30 | lock = redis_lock.Lock(conn, "name-of-the-lock") 31 | if lock.acquire(blocking=False): 32 | print("Got the lock. Doing some work ...") 33 | time.sleep(5) 34 | else: 35 | print("Someone else has the lock.") 36 | 37 | Releasing previously acquired lock:: 38 | 39 | conn = StrictRedis() 40 | lock = redis_lock.Lock(conn, "name-of-the-lock") 41 | lock.acquire() 42 | print("Got the lock. Doing some work ...") 43 | time.sleep(5) 44 | lock.release() 45 | 46 | The above example could be rewritten using context manager:: 47 | 48 | conn = StrictRedis() 49 | with redis_lock.Lock(conn, "name-of-the-lock"): 50 | print("Got the lock. Doing some work ...") 51 | time.sleep(5) 52 | 53 | You can pass `blocking=False` parameter to the contex manager (default value 54 | is True, will raise a NotAcquired exception if lock won't be acquired):: 55 | 56 | conn = StrictRedis() 57 | with redis_lock.Lock(conn, "name-of-the-lock", blocking=False): 58 | print("Got the lock. Doing some work ...") 59 | time.sleep(5) 60 | 61 | In cases, where lock not necessarily in acquired state, and 62 | user need to ensure, that it has a matching ``id``, example:: 63 | 64 | lock1 = Lock(conn, "foo") 65 | lock1.acquire() 66 | lock2 = Lock(conn, "foo", id=lock1.id) 67 | lock2.release() 68 | 69 | To check if lock with same name is already locked 70 | (it can be this or another lock with identical names):: 71 | 72 | is_locked = Lock(conn, "lock-name").locked() 73 | 74 | You can control the log output by modifying various loggers:: 75 | 76 | logging.getLogger("redis_lock.thread").disabled = True 77 | logging.getLogger("redis_lock").disable(logging.DEBUG) 78 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [testenv:bootstrap] 2 | deps = 3 | jinja2 4 | tox 5 | skip_install = true 6 | commands = 7 | python ci/bootstrap.py --no-env 8 | passenv = 9 | * 10 | ; a generative tox configuration, see: https://tox.readthedocs.io/en/latest/config.html#generative-envlist 11 | 12 | [tox] 13 | envlist = 14 | clean, 15 | check, 16 | docs, 17 | {py37,py38,py39,py310,pypy37,pypy38,pypy39}-{cover,nocov}, 18 | report 19 | ignore_basepython_conflict = true 20 | 21 | [testenv] 22 | basepython = 23 | pypy37: {env:TOXPYTHON:pypy3.7} 24 | pypy38: {env:TOXPYTHON:pypy3.8} 25 | pypy39: {env:TOXPYTHON:pypy3.9} 26 | py37: {env:TOXPYTHON:python3.7} 27 | py38: {env:TOXPYTHON:python3.8} 28 | py39: {env:TOXPYTHON:python3.9} 29 | py310: {env:TOXPYTHON:python3.10} 30 | {bootstrap,clean,check,report,docs,codecov,coveralls}: {env:TOXPYTHON:python3} 31 | setenv = 32 | PYTHONPATH={toxinidir}/tests 33 | PYTHONUNBUFFERED=yes 34 | DJANGO_SETTINGS_MODULE=test_project.settings 35 | passenv = 36 | * 37 | usedevelop = 38 | cover: true 39 | nocov: false 40 | deps = 41 | pytest 42 | cover: pytest-cov 43 | Django~=3.2 44 | django-redis==5.2.0 45 | process-tests 46 | redis==4.2.0 47 | gevent==21.12.0 48 | eventlet==0.33.0 49 | commands = 50 | nocov: {posargs:pytest -vv --ignore=src} 51 | cover: {posargs:pytest --cov --cov-report=term-missing -vv} 52 | 53 | [testenv:check] 54 | deps = 55 | docutils 56 | check-manifest 57 | flake8 58 | readme-renderer 59 | pygments 60 | isort 61 | skip_install = true 62 | commands = 63 | python setup.py check --strict --metadata --restructuredtext 64 | check-manifest {toxinidir} 65 | flake8 66 | isort --verbose --check-only --diff --filter-files . 67 | 68 | [testenv:docs] 69 | usedevelop = true 70 | deps = 71 | -r{toxinidir}/docs/requirements.txt 72 | commands = 73 | sphinx-build {posargs:-E} -b html docs dist/docs 74 | sphinx-build -b linkcheck docs dist/docs 75 | 76 | [testenv:coveralls] 77 | deps = 78 | coveralls 79 | skip_install = true 80 | commands = 81 | coveralls [] 82 | 83 | [testenv:codecov] 84 | deps = 85 | codecov 86 | skip_install = true 87 | commands = 88 | codecov [] 89 | 90 | [testenv:report] 91 | deps = 92 | coverage 93 | skip_install = true 94 | commands = 95 | coverage report 96 | coverage html 97 | 98 | [testenv:clean] 99 | commands = coverage erase 100 | skip_install = true 101 | deps = 102 | coverage 103 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Contributing 3 | ============ 4 | 5 | Contributions are welcome, and they are greatly appreciated! Every 6 | little bit helps, and credit will always be given. 7 | 8 | Bug reports 9 | =========== 10 | 11 | When `reporting a bug `_ please include: 12 | 13 | * Your operating system name and version. 14 | * Any details about your local setup that might be helpful in troubleshooting. 15 | * Detailed steps to reproduce the bug. 16 | 17 | Documentation improvements 18 | ========================== 19 | 20 | redis-lock could always use more documentation, whether as part of the 21 | official redis-lock docs, in docstrings, or even on the web in blog posts, 22 | articles, and such. 23 | 24 | Feature requests and feedback 25 | ============================= 26 | 27 | The best way to send feedback is to file an issue at https://github.com/ionelmc/python-redis-lock/issues. 28 | 29 | If you are proposing a feature: 30 | 31 | * Explain in detail how it would work. 32 | * Keep the scope as narrow as possible, to make it easier to implement. 33 | * Remember that this is a volunteer-driven project, and that code contributions are welcome :) 34 | 35 | Development 36 | =========== 37 | 38 | To set up `python-redis-lock` for local development: 39 | 40 | 1. Fork `python-redis-lock `_ 41 | (look for the "Fork" button). 42 | 2. Clone your fork locally:: 43 | 44 | git clone git@github.com:YOURGITHUBNAME/python-redis-lock.git 45 | 46 | 3. Create a branch for local development:: 47 | 48 | git checkout -b name-of-your-bugfix-or-feature 49 | 50 | Now you can make your changes locally. 51 | 52 | 4. When you're done making changes run all the checks and docs builder with `tox `_ one command:: 53 | 54 | tox 55 | 56 | 5. Commit your changes and push your branch to GitHub:: 57 | 58 | git add . 59 | git commit -m "Your detailed description of your changes." 60 | git push origin name-of-your-bugfix-or-feature 61 | 62 | 6. Submit a pull request through the GitHub website. 63 | 64 | Pull Request Guidelines 65 | ----------------------- 66 | 67 | If you need some code review or feedback while you're developing the code just make the pull request. 68 | 69 | For merging, you should: 70 | 71 | 1. Include passing tests (run ``tox``). 72 | 2. Update documentation when there's new API, functionality etc. 73 | 3. Add a note to ``CHANGELOG.rst`` about the changes. 74 | 4. Add yourself to ``AUTHORS.rst``. 75 | 76 | 77 | 78 | Tips 79 | ---- 80 | 81 | To run a subset of tests:: 82 | 83 | tox -e envname -- pytest -k test_myfeature 84 | 85 | To run all the test environments in *parallel*:: 86 | 87 | tox -p auto 88 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- encoding: utf-8 -*- 3 | 4 | import io 5 | import re 6 | from glob import glob 7 | from os.path import basename 8 | from os.path import dirname 9 | from os.path import join 10 | from os.path import splitext 11 | 12 | from setuptools import find_packages 13 | from setuptools import setup 14 | 15 | 16 | def read(*names, **kwargs): 17 | with io.open(join(dirname(__file__), *names), encoding=kwargs.get('encoding', 'utf8')) as fh: 18 | return fh.read() 19 | 20 | 21 | setup( 22 | name='python-redis-lock', 23 | version='4.0.0', 24 | license='BSD-2-Clause', 25 | description='Lock context manager implemented via redis SETNX/BLPOP.', 26 | long_description='{}\n{}'.format( 27 | re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')), 28 | re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst')), 29 | ), 30 | author='Ionel Cristian Mărieș', 31 | author_email='contact@ionelmc.ro', 32 | url='https://github.com/ionelmc/python-redis-lock', 33 | packages=find_packages('src'), 34 | package_dir={'': 'src'}, 35 | py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')], 36 | include_package_data=True, 37 | zip_safe=False, 38 | classifiers=[ 39 | # complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers 40 | 'Development Status :: 5 - Production/Stable', 41 | 'Intended Audience :: Developers', 42 | 'License :: OSI Approved :: BSD License', 43 | 'Operating System :: Unix', 44 | 'Operating System :: POSIX', 45 | 'Operating System :: Microsoft :: Windows', 46 | 'Programming Language :: Python', 47 | 'Programming Language :: Python :: 3', 48 | 'Programming Language :: Python :: 3 :: Only', 49 | 'Programming Language :: Python :: 3.7', 50 | 'Programming Language :: Python :: 3.8', 51 | 'Programming Language :: Python :: 3.9', 52 | 'Programming Language :: Python :: 3.10', 53 | 'Programming Language :: Python :: Implementation :: CPython', 54 | 'Programming Language :: Python :: Implementation :: PyPy', 55 | # uncomment if you test on these interpreters: 56 | # 'Programming Language :: Python :: Implementation :: IronPython', 57 | # 'Programming Language :: Python :: Implementation :: Jython', 58 | # 'Programming Language :: Python :: Implementation :: Stackless', 59 | 'Topic :: Utilities', 60 | ], 61 | project_urls={ 62 | 'Documentation': 'https://python-redis-lock.readthedocs.io/', 63 | 'Changelog': 'https://python-redis-lock.readthedocs.io/en/latest/changelog.html', 64 | 'Issue Tracker': 'https://github.com/ionelmc/python-redis-lock/issues', 65 | }, 66 | keywords=[ 67 | # eg: 'keyword1', 'keyword2', 'keyword3', 68 | ], 69 | python_requires='>=3.7', 70 | install_requires=[ 71 | 'redis>=2.10.0', 72 | ], 73 | extras_require={ 74 | 'django': [ 75 | 'django-redis>=3.8.0', 76 | ] 77 | }, 78 | ) 79 | -------------------------------------------------------------------------------- /tests/helper.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from __future__ import print_function 3 | 4 | import logging 5 | import os 6 | import sys 7 | import threading 8 | import time 9 | 10 | from redis import StrictRedis 11 | 12 | from redis_lock import Lock 13 | 14 | from config import TIMEOUT 15 | 16 | if __name__ == '__main__': 17 | logging.basicConfig( 18 | level=logging.DEBUG, format='%(process)d %(asctime)s,%(msecs)05d %(name)s %(levelname)s %(message)s', datefmt="%x~%X" 19 | ) 20 | redis_socket = sys.argv[1] 21 | test_name = sys.argv[2] 22 | if ':' in test_name: 23 | test_name, effect = test_name.split(':') 24 | logging.info('Applying effect %r.', effect) 25 | if effect == 'gevent': 26 | from gevent import monkey 27 | 28 | monkey.patch_all() 29 | elif effect == 'eventlet': 30 | import eventlet 31 | 32 | eventlet.monkey_patch() 33 | else: 34 | raise RuntimeError('Invalid effect spec %r.' % effect) 35 | logging.info('threading.get_ident.__module__=%s', threading.get_ident.__module__) 36 | if test_name == 'test_simple': 37 | conn = StrictRedis(unix_socket_path=redis_socket) 38 | with Lock(conn, "foobar"): 39 | time.sleep(0.1) 40 | elif test_name == 'test_simple_auto_renewal': 41 | conn = StrictRedis(unix_socket_path=redis_socket) 42 | with Lock(conn, "foobar", expire=1, auto_renewal=True) as lock: 43 | time.sleep(2) 44 | elif test_name == 'test_no_block': 45 | conn = StrictRedis(unix_socket_path=redis_socket) 46 | lock = Lock(conn, "foobar") 47 | res = lock.acquire(blocking=False) 48 | logging.info("acquire=>%s", res) 49 | elif test_name == 'test_timeout': 50 | conn = StrictRedis(unix_socket_path=redis_socket) 51 | with Lock(conn, "foobar"): 52 | time.sleep(1) 53 | elif test_name == 'test_expire': 54 | conn = StrictRedis(unix_socket_path=redis_socket) 55 | with Lock(conn, "foobar", expire=TIMEOUT / 4): 56 | time.sleep(0.1) 57 | with Lock(conn, "foobar", expire=TIMEOUT / 4): 58 | time.sleep(0.1) 59 | elif test_name == 'test_no_overlap': 60 | from sched import scheduler 61 | 62 | sched = scheduler(time.time, time.sleep) 63 | start = time.time() + TIMEOUT / 2 64 | 65 | # the idea is to start all the lock at the same time - we use the scheduler to start everything in TIMEOUT/2 seconds, by 66 | # that time all the forks should be ready 67 | 68 | def cb_no_overlap(): 69 | with Lock(conn, "foobar"): 70 | time.sleep(0.001) 71 | 72 | sched.enterabs(start, 0, cb_no_overlap, ()) 73 | pids = [] 74 | 75 | for _ in range(125): 76 | pid = os.fork() 77 | if pid: 78 | pids.append(pid) 79 | else: 80 | try: 81 | conn = StrictRedis(unix_socket_path=redis_socket) 82 | sched.run() 83 | finally: 84 | os._exit(0) 85 | for pid in pids: 86 | os.waitpid(pid, 0) 87 | else: 88 | raise RuntimeError('Invalid test spec %r.' % test_name) 89 | logging.info('DIED.') 90 | -------------------------------------------------------------------------------- /examples/bench.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import signal 3 | import sys 4 | import time 5 | from concurrent.futures import ProcessPoolExecutor 6 | from sched import scheduler 7 | 8 | from redis import StrictRedis 9 | 10 | from redis_lock import Lock 11 | from redis_lock import logger 12 | 13 | logging.basicConfig(level="WARN", format="%(message)s") 14 | logger.setLevel("WARN") 15 | 16 | 17 | class Exit(Exception): 18 | pass 19 | 20 | 21 | def bail(n, f): 22 | raise Exit() 23 | 24 | 25 | signal.signal(signal.SIGALRM, bail) 26 | 27 | 28 | def test(arg): 29 | t, duration, type_ = arg 30 | conn = StrictRedis() 31 | conn.flushdb() 32 | ret = [] 33 | 34 | def run(): 35 | iterations = 0 36 | signal.setitimer(signal.ITIMER_REAL, int(sys.argv[1])) 37 | try: 38 | if type_ == 'redis_lock': 39 | lock = Lock(conn, "test-lock") 40 | elif type_ == 'native': 41 | lock = conn.lock("test-lock") 42 | else: 43 | raise RuntimeError 44 | while True: 45 | with lock: 46 | iterations += 1 47 | if duration: 48 | time.sleep(duration) 49 | except Exit as exc: 50 | logging.info("Got %r. Returning ...", exc) 51 | ret.append(iterations) 52 | 53 | sched = scheduler(time.time, time.sleep) 54 | logging.info("Running in %s seconds ...", t - time.time()) 55 | sched.enterabs(t, 0, run, ()) 56 | sched.run() 57 | return ret[0] 58 | 59 | 60 | logging.critical("============== ============= =========== ========= ========== ========== ========== ==========") 61 | logging.critical("Implementation Lock duration Concurrency Acquires: Total Avg Min Max") 62 | logging.critical("============== ============= =========== ========= ========== ========== ========== ==========") 63 | 64 | 65 | for concurrency in (1, 2, 3, 6, 12, 24, 48): 66 | for duration in ( 67 | 0, 68 | 0.01, 69 | 0.5, 70 | ): 71 | for type_ in ( 72 | 'redis_lock', 73 | 'native', 74 | ): 75 | with ProcessPoolExecutor(max_workers=concurrency) as pool: 76 | t = round(time.time()) + 1 77 | load = [(t, duration, type_) for _ in range(concurrency)] 78 | logging.info("Running %s", load) 79 | ret = [i for i in pool.map(test, load)] 80 | if concurrency > 1: 81 | logging.critical( 82 | "%14s %12.3fs %11s %20s %10.2f %10s %10s", 83 | type_, 84 | duration, 85 | concurrency, 86 | sum(ret), 87 | sum(ret) / len(ret), 88 | min(ret), 89 | max(ret), 90 | ) 91 | else: 92 | logging.critical( 93 | "%14s %12.3fs %11s %20s", 94 | type_, 95 | duration, 96 | concurrency, 97 | sum(ret), 98 | ) 99 | logging.critical("============== ============= =========== ========= ========== ========== ========== ==========") 100 | -------------------------------------------------------------------------------- /ci/bootstrap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | from __future__ import absolute_import 4 | from __future__ import print_function 5 | from __future__ import unicode_literals 6 | 7 | import os 8 | import subprocess 9 | import sys 10 | from os.path import abspath 11 | from os.path import dirname 12 | from os.path import exists 13 | from os.path import join 14 | from os.path import relpath 15 | 16 | base_path = dirname(dirname(abspath(__file__))) 17 | templates_path = join(base_path, "ci", "templates") 18 | 19 | 20 | def check_call(args): 21 | print("+", *args) 22 | subprocess.check_call(args) 23 | 24 | 25 | def exec_in_env(): 26 | env_path = join(base_path, ".tox", "bootstrap") 27 | if sys.platform == "win32": 28 | bin_path = join(env_path, "Scripts") 29 | else: 30 | bin_path = join(env_path, "bin") 31 | if not exists(env_path): 32 | import subprocess 33 | 34 | print("Making bootstrap env in: {0} ...".format(env_path)) 35 | try: 36 | check_call([sys.executable, "-m", "venv", env_path]) 37 | except subprocess.CalledProcessError: 38 | try: 39 | check_call([sys.executable, "-m", "virtualenv", env_path]) 40 | except subprocess.CalledProcessError: 41 | check_call(["virtualenv", env_path]) 42 | print("Installing `jinja2` into bootstrap environment...") 43 | check_call([join(bin_path, "pip"), "install", "jinja2", "tox"]) 44 | python_executable = join(bin_path, "python") 45 | if not os.path.exists(python_executable): 46 | python_executable += '.exe' 47 | 48 | print("Re-executing with: {0}".format(python_executable)) 49 | print("+ exec", python_executable, __file__, "--no-env") 50 | os.execv(python_executable, [python_executable, __file__, "--no-env"]) 51 | 52 | 53 | def main(): 54 | import jinja2 55 | 56 | print("Project path: {0}".format(base_path)) 57 | 58 | jinja = jinja2.Environment( 59 | loader=jinja2.FileSystemLoader(templates_path), 60 | trim_blocks=True, 61 | lstrip_blocks=True, 62 | keep_trailing_newline=True, 63 | ) 64 | 65 | tox_environments = [ 66 | line.strip() 67 | # 'tox' need not be installed globally, but must be importable 68 | # by the Python that is running this script. 69 | # This uses sys.executable the same way that the call in 70 | # cookiecutter-pylibrary/hooks/post_gen_project.py 71 | # invokes this bootstrap.py itself. 72 | for line in subprocess.check_output([sys.executable, '-m', 'tox', '--listenvs'], universal_newlines=True).splitlines() 73 | ] 74 | tox_environments = [line for line in tox_environments if line.startswith('py')] 75 | 76 | for root, _, files in os.walk(templates_path): 77 | for name in files: 78 | relative = relpath(root, templates_path) 79 | with open(join(base_path, relative, name), "w") as fh: 80 | fh.write(jinja.get_template(join(relative, name)).render(tox_environments=tox_environments)) 81 | print("Wrote {}".format(name)) 82 | print("DONE.") 83 | 84 | 85 | if __name__ == "__main__": 86 | args = sys.argv[1:] 87 | if args == ["--no-env"]: 88 | main() 89 | elif not args: 90 | exec_in_env() 91 | else: 92 | print("Unexpected arguments {0}".format(args), file=sys.stderr) 93 | sys.exit(1) 94 | -------------------------------------------------------------------------------- /.github/workflows/github-actions.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | on: [push, pull_request] 3 | jobs: 4 | test: 5 | name: ${{ matrix.name }} 6 | runs-on: ${{ matrix.os }} 7 | timeout-minutes: 30 8 | strategy: 9 | fail-fast: false 10 | matrix: 11 | include: 12 | - name: 'check' 13 | python: '3.9' 14 | toxpython: 'python3.9' 15 | tox_env: 'check' 16 | os: 'ubuntu-latest' 17 | - name: 'docs' 18 | python: '3.9' 19 | toxpython: 'python3.9' 20 | tox_env: 'docs' 21 | os: 'ubuntu-latest' 22 | - name: 'py37-cover (ubuntu)' 23 | python: '3.7' 24 | toxpython: 'python3.7' 25 | python_arch: 'x64' 26 | tox_env: 'py37-cover,codecov' 27 | os: 'ubuntu-latest' 28 | - name: 'py37-nocov (ubuntu)' 29 | python: '3.7' 30 | toxpython: 'python3.7' 31 | python_arch: 'x64' 32 | tox_env: 'py37-nocov' 33 | os: 'ubuntu-latest' 34 | - name: 'py38-cover (ubuntu)' 35 | python: '3.8' 36 | toxpython: 'python3.8' 37 | python_arch: 'x64' 38 | tox_env: 'py38-cover,codecov' 39 | os: 'ubuntu-latest' 40 | - name: 'py38-nocov (ubuntu)' 41 | python: '3.8' 42 | toxpython: 'python3.8' 43 | python_arch: 'x64' 44 | tox_env: 'py38-nocov' 45 | os: 'ubuntu-latest' 46 | - name: 'py39-cover (ubuntu)' 47 | python: '3.9' 48 | toxpython: 'python3.9' 49 | python_arch: 'x64' 50 | tox_env: 'py39-cover,codecov' 51 | os: 'ubuntu-latest' 52 | - name: 'py39-nocov (ubuntu)' 53 | python: '3.9' 54 | toxpython: 'python3.9' 55 | python_arch: 'x64' 56 | tox_env: 'py39-nocov' 57 | os: 'ubuntu-latest' 58 | - name: 'py310-cover (ubuntu)' 59 | python: '3.10' 60 | toxpython: 'python3.10' 61 | python_arch: 'x64' 62 | tox_env: 'py310-cover,codecov' 63 | os: 'ubuntu-latest' 64 | - name: 'py310-nocov (ubuntu)' 65 | python: '3.10' 66 | toxpython: 'python3.10' 67 | python_arch: 'x64' 68 | tox_env: 'py310-nocov' 69 | os: 'ubuntu-latest' 70 | - name: 'pypy37-cover (ubuntu)' 71 | python: 'pypy-3.7' 72 | toxpython: 'pypy3.7' 73 | python_arch: 'x64' 74 | tox_env: 'pypy37-cover,codecov' 75 | os: 'ubuntu-latest' 76 | - name: 'pypy37-nocov (ubuntu)' 77 | python: 'pypy-3.7' 78 | toxpython: 'pypy3.7' 79 | python_arch: 'x64' 80 | tox_env: 'pypy37-nocov' 81 | os: 'ubuntu-latest' 82 | - name: 'pypy38-cover (ubuntu)' 83 | python: 'pypy-3.8' 84 | toxpython: 'pypy3.8' 85 | python_arch: 'x64' 86 | tox_env: 'pypy38-cover,codecov' 87 | os: 'ubuntu-latest' 88 | - name: 'pypy38-nocov (ubuntu)' 89 | python: 'pypy-3.8' 90 | toxpython: 'pypy3.8' 91 | python_arch: 'x64' 92 | tox_env: 'pypy38-nocov' 93 | os: 'ubuntu-latest' 94 | - name: 'pypy39-cover (ubuntu)' 95 | python: 'pypy-3.9' 96 | toxpython: 'pypy3.9' 97 | python_arch: 'x64' 98 | tox_env: 'pypy39-cover,codecov' 99 | os: 'ubuntu-latest' 100 | - name: 'pypy39-nocov (ubuntu)' 101 | python: 'pypy-3.9' 102 | toxpython: 'pypy3.9' 103 | python_arch: 'x64' 104 | tox_env: 'pypy39-nocov' 105 | os: 'ubuntu-latest' 106 | steps: 107 | - uses: actions/checkout@v2 108 | with: 109 | fetch-depth: 0 110 | - uses: actions/setup-python@v2 111 | with: 112 | python-version: ${{ matrix.python }} 113 | architecture: ${{ matrix.python_arch }} 114 | - name: install redis-server 115 | run: sudo apt-get install -y redis-server 116 | - name: install dependencies 117 | run: | 118 | python -mpip install --progress-bar=off -r ci/requirements.txt 119 | virtualenv --version 120 | pip --version 121 | tox --version 122 | pip list --format=freeze 123 | - name: test 124 | env: 125 | TOXPYTHON: '${{ matrix.toxpython }}' 126 | run: > 127 | tox -e ${{ matrix.tox_env }} -v 128 | -------------------------------------------------------------------------------- /examples/bench.rst: -------------------------------------------------------------------------------- 1 | Benchmarks 2 | ========== 3 | 4 | Easy way to run it locally, provides you have a redis server running on default port:: 5 | 6 | tox -e py38-dj3-cover -- python examples/bench.py 10 7 | 8 | Note that the database will lose all it's data. The benchmark will keep using a lock in a loop till 10 seconds elapse with various settings. 9 | The concurrency is the number of processes that will try to acquire the same log and the lock duration is an artificial time slept before 10 | releasing. 11 | 12 | My local run with version 3.6.0 of redis-lock: 13 | 14 | ============== ============= =========== ========= ========== ========== ========== ========== 15 | Implementation Lock duration Concurrency Acquires: Total Avg Min Max 16 | ============== ============= =========== ========= ========== ========== ========== ========== 17 | redis_lock 0.000s 1 26296 18 | native 0.000s 1 35605 19 | redis_lock 0.010s 1 931 20 | native 0.010s 1 945 21 | redis_lock 0.500s 1 20 22 | native 0.500s 1 20 23 | redis_lock 0.000s 2 35477 17738.50 17661 17816 24 | native 0.000s 2 34861 17430.50 13930 20931 25 | redis_lock 0.010s 2 940 470.00 470 470 26 | native 0.010s 2 942 471.00 461 481 27 | redis_lock 0.500s 2 20 10.00 10 10 28 | native 0.500s 2 20 10.00 0 20 29 | redis_lock 0.000s 3 46123 15374.33 15291 15437 30 | native 0.000s 3 35285 11761.67 7759 14038 31 | redis_lock 0.010s 3 943 314.33 314 315 32 | native 0.010s 3 944 314.67 0 776 33 | redis_lock 0.500s 3 20 6.67 6 7 34 | native 0.500s 3 20 6.67 0 20 35 | redis_lock 0.000s 6 42249 7041.50 6863 7170 36 | native 0.000s 6 33852 5642.00 4488 6864 37 | redis_lock 0.010s 6 942 157.00 157 157 38 | native 0.010s 6 945 157.50 19 275 39 | redis_lock 0.500s 6 20 3.33 3 4 40 | native 0.500s 6 20 3.33 0 20 41 | redis_lock 0.000s 12 42506 3542.17 3206 3819 42 | native 0.000s 12 34203 2850.25 1748 4492 43 | redis_lock 0.010s 12 942 78.50 77 79 44 | native 0.010s 12 944 78.67 0 332 45 | redis_lock 0.500s 12 20 1.67 1 2 46 | native 0.500s 12 20 1.67 0 20 47 | redis_lock 0.000s 24 42192 1758.00 1603 1893 48 | native 0.000s 24 34925 1455.21 681 2402 49 | redis_lock 0.010s 24 944 39.33 39 40 50 | native 0.010s 24 945 39.38 0 256 51 | redis_lock 0.500s 24 20 0.83 0 1 52 | native 0.500s 24 20 0.83 0 20 53 | redis_lock 0.000s 48 44867 934.73 768 1172 54 | native 0.000s 48 34961 728.35 311 1399 55 | redis_lock 0.010s 48 943 19.65 19 20 56 | native 0.010s 48 942 19.62 0 254 57 | redis_lock 0.500s 48 20 0.42 0 1 58 | native 0.500s 48 20 0.42 0 20 59 | ============== ============= =========== ========= ========== ========== ========== ========== 60 | 61 | Key takeaways: 62 | 63 | * For a single client (no contention) redis-lock is a little bit slower. In the past it was faster but various fixes added a little bit of 64 | overhead in the lock releasing script. Note the ``Total`` column. 65 | * When two clients are involved things change a lot: 66 | 67 | * The native implementation will loose throughput because the acquiring routine basically does ``while True: sleep(0.1)``. 68 | Note the ``Total`` column. 69 | * The native implementation favours the first client (it will get most of the acquires because the waiting client simply sleeps a lot). 70 | Note the ``Min`` column. 71 | 72 | * When either concurrency (number of clients) or duration (amount of time slept while lock is acquired) are high for the native 73 | implementation things get very wild: 74 | 75 | * Some clients never get to acquire the lock. 76 | Note the ``Min`` column being ``0`` and the ``Max`` column being very high (indicating how many acquires a single client got). 77 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | 2 | Changelog 3 | ========= 4 | 5 | 4.0.0 (2022-10-17) 6 | ------------------ 7 | 8 | * Dropped support for Python 2.7 and 3.6. 9 | * Switched from Travis to GitHub Actions. 10 | * Made logging messages more consistent. 11 | * Replaced the ``redis_lock.refresh.thread.*`` loggers with a single ``redis_lock.refresh.thread`` logger. 12 | * Various testing cleanup (mainly removal of hardcoded tmp paths). 13 | 14 | 3.7.0 (2020-11-20) 15 | ------------------ 16 | 17 | * Made logger names more specific. Now can have granular filtering on these new logger names: 18 | 19 | * ``redis_lock.acquire`` (emits `DEBUG` messages) 20 | * ``redis_lock.acquire`` (emits `WARN` messages) 21 | * ``redis_lock.acquire`` (emits `INFO` messages) 22 | * ``redis_lock.refresh.thread.start`` (emits `DEBUG` messages) 23 | * ``redis_lock.refresh.thread.exit`` (emits `DEBUG` messages) 24 | * ``redis_lock.refresh.start`` (emits `DEBUG` messages) 25 | * ``redis_lock.refresh.shutdown`` (emits `DEBUG` messages) 26 | * ``redis_lock.refresh.exit`` (emits `DEBUG` messages) 27 | * ``redis_lock.release`` (emits `DEBUG` messages) 28 | 29 | Contributed by Salomon Smeke Cohen in :pr:`80`. 30 | * Fixed few CI issues regarding doc checks. 31 | Contributed by Salomon Smeke Cohen in :pr:`81`. 32 | 33 | 3.6.0 (2020-07-23) 34 | ------------------ 35 | 36 | * Improved ``timeout``/``expire`` validation so that: 37 | 38 | - ``timeout`` and ``expire are converted to ``None`` if they are falsy. Previously only ``None`` disabled these options, other falsy 39 | values created buggy situations. 40 | - Using ``timeout`` greater than ``expire`` is now allowed, if ``auto_renewal`` is set to ``True``. Previously a ``TimeoutTooLarge`` error 41 | was raised. 42 | See :issue:`74`. 43 | - Negative ``timeout`` or ``expire`` are disallowed. Previously such values were allowed, and created buggy situations. 44 | See :issue:`73`. 45 | * Updated benchmark and examples. 46 | * Removed the custom script caching code. Now the ``register_script`` method from the redis client is used. 47 | This will fix possible issue with redis clusters in theory, as the redis client has some specific handling for that. 48 | 49 | 3.5.0 (2020-01-13) 50 | ------------------ 51 | 52 | * Added a ``locked`` method. Contributed by Artem Slobodkin in :pr:`72`. 53 | 54 | 3.4.0 (2019-12-06) 55 | ------------------ 56 | 57 | * Fixed regression that can cause deadlocks or slowdowns in certain configurations. 58 | See: :issue:`71`. 59 | 60 | 3.3.1 (2019-01-19) 61 | ------------------ 62 | 63 | * Fixed failures when running python-redis-lock 3.3 alongside 3.2. 64 | See: :issue:`64`. 65 | 66 | 3.3.0 (2019-01-17) 67 | ------------------ 68 | 69 | * Fixed deprecated use of ``warnings`` API. Contributed by Julie MacDonell in 70 | :pr:`54`. 71 | * Added ``auto_renewal`` option in ``RedisCache.lock`` (the Django cache backend wrapper). Contributed by c 72 | in :pr:`55`. 73 | * Changed log level for "%(script)s not cached" from WARNING to INFO. 74 | * Added support for using ``decode_responses=True``. Lock keys are pure ascii now. 75 | 76 | 3.2.0 (2016-10-29) 77 | ------------------ 78 | 79 | * Changed the signal key cleanup operation do be done without any expires. This prevents lingering keys around for some time. 80 | Contributed by Andrew Pashkin in :pr:`38`. 81 | * Allow locks with given `id` to acquire. Previously it assumed that if you specify the `id` then the lock was already 82 | acquired. See :issue:`44` and 83 | :issue:`39`. 84 | * Allow using other redis clients with a ``strict=False``. Normally you're expected to pass in an instance 85 | of ``redis.StrictRedis``. 86 | * Added convenience method `locked_get_or_set` to Django cache backend. 87 | 88 | 3.1.0 (2016-04-16) 89 | ------------------ 90 | 91 | * Changed the auto renewal to automatically stop the renewal thread if lock gets garbage collected. Contributed by 92 | Andrew Pashkin in :pr:`33`. 93 | 94 | 3.0.0 (2016-01-16) 95 | ------------------ 96 | 97 | * Changed ``release`` so that it expires signal-keys immediately. Contributed by Andrew Pashkin in :pr:`28`. 98 | * Resetting locks (``reset`` or ``reset_all``) will release the lock. If there's someone waiting on the reset lock now it will 99 | acquire it. Contributed by Andrew Pashkin in :pr:`29`. 100 | * Added the ``extend`` method on ``Lock`` objects. Contributed by Andrew Pashkin in :pr:`24`. 101 | * Documentation improvements on ``release`` method. Contributed by Andrew Pashkin in :pr:`22`. 102 | * Fixed ``acquire(block=True)`` handling when ``expire`` option was used (it wasn't blocking indefinitely). Contributed by 103 | Tero Vuotila in :pr:`35`. 104 | * Changed ``release`` to check if lock was acquired with he same id. If not, ``NotAcquired`` will be raised. 105 | Previously there was just a check if it was acquired with the same instance (self._held). 106 | **BACKWARDS INCOMPATIBLE** 107 | * Removed the ``force`` option from ``release`` - it wasn't really necessary and it only encourages sloppy programming. See 108 | :issue:`25`. 109 | **BACKWARDS INCOMPATIBLE** 110 | * Dropped tests for Python 2.6. It may work but it is unsupported. 111 | 112 | 2.3.0 (2015-09-27) 113 | ------------------ 114 | 115 | * Added the ``timeout`` option. Contributed by Victor Torres in :pr:`20`. 116 | 117 | 2.2.0 (2015-08-19) 118 | ------------------ 119 | 120 | * Added the ``auto_renewal`` option. Contributed by Nick Groenen in :pr:`18`. 121 | 122 | 2.1.0 (2015-03-12) 123 | ------------------ 124 | 125 | * New specific exception classes: ``AlreadyAcquired`` and ``NotAcquired``. 126 | * Slightly improved efficiency when non-waiting acquires are used. 127 | 128 | 2.0.0 (2014-12-29) 129 | ------------------ 130 | 131 | * Rename ``Lock.token`` to ``Lock.id``. Now only allowed to be set via constructor. Contributed by Jardel Weyrich in :pr:`11`. 132 | 133 | 1.0.0 (2014-12-23) 134 | ------------------ 135 | 136 | * Fix Django integration. (reported by Jardel Weyrich) 137 | * Reorganize tests to use py.test. 138 | * Add test for Django integration. 139 | * Add ``reset_all`` functionality. Contributed by Yokotoka in :pr:`7`. 140 | * Add ``Lock.reset`` functionality. 141 | * Expose the ``Lock.token`` attribute. 142 | 143 | 0.1.2 (2013-11-05) 144 | ------------------ 145 | 146 | * `?` 147 | 148 | 0.1.1 (2013-10-26) 149 | ------------------ 150 | 151 | * `?` 152 | 153 | 0.1.0 (2013-10-26) 154 | ------------------ 155 | 156 | * `?` 157 | 158 | 0.0.1 (2013-10-25) 159 | ------------------ 160 | 161 | * First release on PyPI. 162 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Overview 3 | ======== 4 | 5 | .. start-badges 6 | 7 | .. list-table:: 8 | :stub-columns: 1 9 | 10 | * - docs 11 | - |docs| 12 | * - tests 13 | - | |github-actions| |requires| 14 | | |coveralls| |codecov| 15 | * - package 16 | - | |version| |wheel| |supported-versions| |supported-implementations| 17 | | |commits-since| 18 | .. |docs| image:: https://readthedocs.org/projects/python-redis-lock/badge/?style=flat 19 | :target: https://python-redis-lock.readthedocs.io/ 20 | :alt: Documentation Status 21 | 22 | .. |github-actions| image:: https://github.com/ionelmc/python-redis-lock/actions/workflows/github-actions.yml/badge.svg 23 | :alt: GitHub Actions Build Status 24 | :target: https://github.com/ionelmc/python-redis-lock/actions 25 | 26 | .. |requires| image:: https://requires.io/github/ionelmc/python-redis-lock/requirements.svg?branch=master 27 | :alt: Requirements Status 28 | :target: https://requires.io/github/ionelmc/python-redis-lock/requirements/?branch=master 29 | 30 | .. |coveralls| image:: https://coveralls.io/repos/ionelmc/python-redis-lock/badge.svg?branch=master&service=github 31 | :alt: Coverage Status 32 | :target: https://coveralls.io/r/ionelmc/python-redis-lock 33 | 34 | .. |codecov| image:: https://codecov.io/gh/ionelmc/python-redis-lock/branch/master/graphs/badge.svg?branch=master 35 | :alt: Coverage Status 36 | :target: https://codecov.io/github/ionelmc/python-redis-lock 37 | 38 | .. |version| image:: https://img.shields.io/pypi/v/python-redis-lock.svg 39 | :alt: PyPI Package latest release 40 | :target: https://pypi.org/project/python-redis-lock 41 | 42 | .. |wheel| image:: https://img.shields.io/pypi/wheel/python-redis-lock.svg 43 | :alt: PyPI Wheel 44 | :target: https://pypi.org/project/python-redis-lock 45 | 46 | .. |supported-versions| image:: https://img.shields.io/pypi/pyversions/python-redis-lock.svg 47 | :alt: Supported versions 48 | :target: https://pypi.org/project/python-redis-lock 49 | 50 | .. |supported-implementations| image:: https://img.shields.io/pypi/implementation/python-redis-lock.svg 51 | :alt: Supported implementations 52 | :target: https://pypi.org/project/python-redis-lock 53 | 54 | .. |commits-since| image:: https://img.shields.io/github/commits-since/ionelmc/python-redis-lock/v4.0.0.svg 55 | :alt: Commits since latest release 56 | :target: https://github.com/ionelmc/python-redis-lock/compare/v4.0.0...master 57 | 58 | 59 | 60 | .. end-badges 61 | 62 | Lock context manager implemented via redis SETNX/BLPOP. 63 | 64 | * Free software: BSD 2-Clause License 65 | 66 | Interface targeted to be exactly like `threading.Lock `_. 67 | 68 | Usage 69 | ===== 70 | 71 | Because we don't want to require users to share the lock instance across processes you will have to give them names. 72 | 73 | .. code-block:: python 74 | 75 | from redis import Redis 76 | conn = Redis() 77 | 78 | import redis_lock 79 | lock = redis_lock.Lock(conn, "name-of-the-lock") 80 | if lock.acquire(blocking=False): 81 | print("Got the lock.") 82 | lock.release() 83 | else: 84 | print("Someone else has the lock.") 85 | 86 | Locks as Context Managers 87 | ========================= 88 | 89 | .. code-block:: python 90 | 91 | conn = StrictRedis() 92 | with redis_lock.Lock(conn, "name-of-the-lock"): 93 | print("Got the lock. Doing some work ...") 94 | time.sleep(5) 95 | 96 | 97 | You can also associate an identifier along with the lock so that it can be retrieved later by the same process, or by a 98 | different one. This is useful in cases where the application needs to identify the lock owner (find out who currently 99 | owns the lock). 100 | 101 | .. code-block:: python 102 | 103 | import socket 104 | host_id = "owned-by-%s" % socket.gethostname() 105 | lock = redis_lock.Lock(conn, "name-of-the-lock", id=host_id) 106 | if lock.acquire(blocking=False): 107 | assert lock.locked() is True 108 | print("Got the lock.") 109 | lock.release() 110 | else: 111 | if lock.get_owner_id() == host_id: 112 | print("I already acquired this in another process.") 113 | else: 114 | print("The lock is held on another machine.") 115 | 116 | 117 | Avoid dogpile effect in django 118 | ------------------------------ 119 | 120 | The dogpile is also known as the thundering herd effect or cache stampede. Here's a pattern to avoid the problem 121 | without serving stale data. The work will be performed a single time and every client will wait for the fresh data. 122 | 123 | To use this you will need `django-redis `_, however, ``python-redis-lock`` 124 | provides you a cache backend that has a cache method for your convenience. Just install ``python-redis-lock`` like 125 | this: 126 | 127 | .. code-block:: bash 128 | 129 | pip install "python-redis-lock[django]" 130 | 131 | Now put something like this in your settings: 132 | 133 | .. code-block:: python 134 | 135 | CACHES = { 136 | 'default': { 137 | 'BACKEND': 'redis_lock.django_cache.RedisCache', 138 | 'LOCATION': 'redis://127.0.0.1:6379/1', 139 | 'OPTIONS': { 140 | 'CLIENT_CLASS': 'django_redis.client.DefaultClient' 141 | } 142 | } 143 | } 144 | 145 | .. note:: 146 | If using a `django-redis` < `3.8.x`, you'll probably need `redis_cache` 147 | which has been deprecated in favor to `django_redis`. The `redis_cache` 148 | module is removed in `django-redis` versions > `3.9.x`. See `django-redis notes `_. 149 | 150 | 151 | This backend just adds a convenient ``.lock(name, expire=None)`` function to django-redis's cache backend. 152 | 153 | You would write your functions like this: 154 | 155 | .. code-block:: python 156 | 157 | from django.core.cache import cache 158 | 159 | def function(): 160 | val = cache.get(key) 161 | if not val: 162 | with cache.lock(key): 163 | val = cache.get(key) 164 | if not val: 165 | # DO EXPENSIVE WORK 166 | val = ... 167 | cache.set(key, value) 168 | return val 169 | 170 | Troubleshooting 171 | --------------- 172 | 173 | In some cases, the lock remains in redis forever (like a server blackout / redis or application crash / an unhandled 174 | exception). In such cases, the lock is not removed by restarting the application. One solution is to turn on the 175 | `auto_renewal` parameter in combination with `expire` to set a time-out on the lock, but let `Lock()` automatically 176 | keep resetting the expire time while your application code is executing: 177 | 178 | .. code-block:: python 179 | 180 | # Get a lock with a 60-second lifetime but keep renewing it automatically 181 | # to ensure the lock is held for as long as the Python process is running. 182 | with redis_lock.Lock(conn, name='my-lock', expire=60, auto_renewal=True): 183 | # Do work.... 184 | 185 | Another solution is to use the ``reset_all()`` function when the application starts: 186 | 187 | .. code-block:: python 188 | 189 | # On application start/restart 190 | import redis_lock 191 | redis_lock.reset_all() 192 | 193 | Alternatively, you can reset individual locks via the ``reset`` method. 194 | 195 | Use these carefully, if you understand what you do. 196 | 197 | 198 | Features 199 | ======== 200 | 201 | * based on the standard SETNX recipe 202 | * optional expiry 203 | * optional timeout 204 | * optional lock renewal (use a low expire but keep the lock active) 205 | * no spinloops at acquire 206 | 207 | Implementation 208 | ============== 209 | 210 | ``redis_lock`` will use 2 keys for each lock named ````: 211 | 212 | * ``lock:`` - a string value for the actual lock 213 | * ``lock-signal:`` - a list value for signaling the waiters when the lock is released 214 | 215 | This is how it works: 216 | 217 | .. image:: https://raw.githubusercontent.com/ionelmc/python-redis-lock/master/docs/redis-lock%20diagram%20(v3.0).png 218 | :alt: python-redis-lock flow diagram 219 | 220 | Documentation 221 | ============= 222 | 223 | https://python-redis-lock.readthedocs.io/en/latest/ 224 | 225 | Development 226 | =========== 227 | 228 | To run the all tests run:: 229 | 230 | tox 231 | 232 | Requirements 233 | ============ 234 | 235 | :OS: Any 236 | :Runtime: Python 2.7, 3.3 or later, or PyPy 237 | :Services: Redis 2.6.12 or later. 238 | 239 | Similar projects 240 | ================ 241 | 242 | * `bbangert/retools `_ - acquire does spinloop 243 | * `distributing-locking-python-and-redis `_ - acquire does polling 244 | * `cezarsa/redis_lock `_ - acquire does not block 245 | * `andymccurdy/redis-py `_ - acquire does spinloop 246 | * `mpessas/python-redis-lock `_ - blocks fine but no expiration 247 | * `brainix/pottery `_ - acquire does spinloop 248 | -------------------------------------------------------------------------------- /src/redis_lock/__init__.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import weakref 3 | from base64 import b64encode 4 | from logging import getLogger 5 | from os import urandom 6 | from typing import Union 7 | 8 | from redis import StrictRedis 9 | 10 | __version__ = '4.0.0' 11 | 12 | logger_for_acquire = getLogger(f"{__name__}.acquire") 13 | logger_for_refresh_thread = getLogger(f"{__name__}.refresh.thread") 14 | logger_for_refresh_start = getLogger(f"{__name__}.refresh.start") 15 | logger_for_refresh_shutdown = getLogger(f"{__name__}.refresh.shutdown") 16 | logger_for_refresh_exit = getLogger(f"{__name__}.refresh.exit") 17 | logger_for_release = getLogger(f"{__name__}.release") 18 | 19 | # Check if the id match. If not, return an error code. 20 | UNLOCK_SCRIPT = b""" 21 | if redis.call("get", KEYS[1]) ~= ARGV[1] then 22 | return 1 23 | else 24 | redis.call("del", KEYS[2]) 25 | redis.call("lpush", KEYS[2], 1) 26 | redis.call("pexpire", KEYS[2], ARGV[2]) 27 | redis.call("del", KEYS[1]) 28 | return 0 29 | end 30 | """ 31 | 32 | # Covers both cases when key doesn't exist and doesn't equal to lock's id 33 | EXTEND_SCRIPT = b""" 34 | if redis.call("get", KEYS[1]) ~= ARGV[1] then 35 | return 1 36 | elseif redis.call("ttl", KEYS[1]) < 0 then 37 | return 2 38 | else 39 | redis.call("expire", KEYS[1], ARGV[2]) 40 | return 0 41 | end 42 | """ 43 | 44 | RESET_SCRIPT = b""" 45 | redis.call('del', KEYS[2]) 46 | redis.call('lpush', KEYS[2], 1) 47 | redis.call('pexpire', KEYS[2], ARGV[2]) 48 | return redis.call('del', KEYS[1]) 49 | """ 50 | 51 | RESET_ALL_SCRIPT = b""" 52 | local locks = redis.call('keys', 'lock:*') 53 | local signal 54 | for _, lock in pairs(locks) do 55 | signal = 'lock-signal:' .. string.sub(lock, 6) 56 | redis.call('del', signal) 57 | redis.call('lpush', signal, 1) 58 | redis.call('expire', signal, 1) 59 | redis.call('del', lock) 60 | end 61 | return #locks 62 | """ 63 | 64 | 65 | class AlreadyAcquired(RuntimeError): 66 | pass 67 | 68 | 69 | class NotAcquired(RuntimeError): 70 | pass 71 | 72 | 73 | class AlreadyStarted(RuntimeError): 74 | pass 75 | 76 | 77 | class TimeoutNotUsable(RuntimeError): 78 | pass 79 | 80 | 81 | class InvalidTimeout(RuntimeError): 82 | pass 83 | 84 | 85 | class TimeoutTooLarge(RuntimeError): 86 | pass 87 | 88 | 89 | class NotExpirable(RuntimeError): 90 | pass 91 | 92 | 93 | class Lock(object): 94 | """ 95 | A Lock context manager implemented via redis SETNX/BLPOP. 96 | """ 97 | 98 | unlock_script = None 99 | extend_script = None 100 | reset_script = None 101 | reset_all_script = None 102 | blocking = None 103 | 104 | _lock_renewal_interval: float 105 | _lock_renewal_thread: Union[threading.Thread, None] 106 | 107 | def __init__(self, redis_client, name, expire=None, id=None, auto_renewal=False, strict=True, signal_expire=1000, blocking=True): 108 | """ 109 | :param redis_client: 110 | An instance of :class:`~StrictRedis`. 111 | :param name: 112 | The name (redis key) the lock should have. 113 | :param expire: 114 | The lock expiry time in seconds. If left at the default (None) 115 | the lock will not expire. 116 | :param id: 117 | The ID (redis value) the lock should have. A random value is 118 | generated when left at the default. 119 | 120 | Note that if you specify this then the lock is marked as "held". Acquires 121 | won't be possible. 122 | :param auto_renewal: 123 | If set to ``True``, Lock will automatically renew the lock so that it 124 | doesn't expire for as long as the lock is held (acquire() called 125 | or running in a context manager). 126 | 127 | Implementation note: Renewal will happen using a daemon thread with 128 | an interval of ``expire*2/3``. If wishing to use a different renewal 129 | time, subclass Lock, call ``super().__init__()`` then set 130 | ``self._lock_renewal_interval`` to your desired interval. 131 | :param strict: 132 | If set ``True`` then the ``redis_client`` needs to be an instance of ``redis.StrictRedis``. 133 | :param signal_expire: 134 | Advanced option to override signal list expiration in milliseconds. Increase it for very slow clients. Default: ``1000``. 135 | :param blocking: 136 | Boolean value specifying whether lock should be blocking or not. 137 | Used in `__enter__` method. 138 | """ 139 | if strict and not isinstance(redis_client, StrictRedis): 140 | raise ValueError("redis_client must be instance of StrictRedis. Use strict=False if you know what you're doing.") 141 | if auto_renewal and expire is None: 142 | raise ValueError("Expire may not be None when auto_renewal is set") 143 | 144 | self._client = redis_client 145 | 146 | if expire: 147 | expire = int(expire) 148 | if expire < 0: 149 | raise ValueError("A negative expire is not acceptable.") 150 | else: 151 | expire = None 152 | self._expire = expire 153 | 154 | self._signal_expire = signal_expire 155 | if id is None: 156 | self._id = b64encode(urandom(18)).decode('ascii') 157 | elif isinstance(id, bytes): 158 | try: 159 | self._id = id.decode('ascii') 160 | except UnicodeDecodeError: 161 | self._id = b64encode(id).decode('ascii') 162 | elif isinstance(id, str): 163 | self._id = id 164 | else: 165 | raise TypeError(f"Incorrect type for `id`. Must be bytes/str not {type(id)}.") 166 | self._name = 'lock:' + name 167 | self._signal = 'lock-signal:' + name 168 | self._lock_renewal_interval = float(expire) * 2 / 3 if auto_renewal else None 169 | self._lock_renewal_thread = None 170 | 171 | self.blocking = blocking 172 | 173 | self.register_scripts(redis_client) 174 | 175 | @classmethod 176 | def register_scripts(cls, redis_client): 177 | global reset_all_script 178 | if reset_all_script is None: 179 | cls.unlock_script = redis_client.register_script(UNLOCK_SCRIPT) 180 | cls.extend_script = redis_client.register_script(EXTEND_SCRIPT) 181 | cls.reset_script = redis_client.register_script(RESET_SCRIPT) 182 | cls.reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT) 183 | reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT) 184 | 185 | @property 186 | def _held(self): 187 | return self.id == self.get_owner_id() 188 | 189 | def reset(self): 190 | """ 191 | Forcibly deletes the lock. Use this with care. 192 | """ 193 | self.reset_script(client=self._client, keys=(self._name, self._signal), args=(self.id, self._signal_expire)) 194 | 195 | @property 196 | def id(self): 197 | return self._id 198 | 199 | def get_owner_id(self): 200 | owner_id = self._client.get(self._name) 201 | if isinstance(owner_id, bytes): 202 | owner_id = owner_id.decode('ascii', 'replace') 203 | return owner_id 204 | 205 | def acquire(self, blocking=True, timeout=None): 206 | """ 207 | :param blocking: 208 | Boolean value specifying whether lock should be blocking or not. 209 | :param timeout: 210 | An integer value specifying the maximum number of seconds to block. 211 | """ 212 | logger_for_acquire.debug("Acquiring Lock(%r) ...", self._name) 213 | 214 | if self._held: 215 | raise AlreadyAcquired("Already acquired from this Lock instance.") 216 | 217 | if not blocking and timeout is not None: 218 | raise TimeoutNotUsable("Timeout cannot be used if blocking=False") 219 | 220 | if timeout: 221 | timeout = int(timeout) 222 | if timeout < 0: 223 | raise InvalidTimeout(f"Timeout ({timeout}) cannot be less than or equal to 0") 224 | 225 | if self._expire and not self._lock_renewal_interval and timeout > self._expire: 226 | raise TimeoutTooLarge(f"Timeout ({timeout}) cannot be greater than expire ({self._expire})") 227 | 228 | busy = True 229 | blpop_timeout = timeout or self._expire or 0 230 | timed_out = False 231 | while busy: 232 | busy = not self._client.set(self._name, self._id, nx=True, ex=self._expire) 233 | if busy: 234 | if timed_out: 235 | return False 236 | elif blocking: 237 | timed_out = not self._client.blpop(self._signal, blpop_timeout) and timeout 238 | else: 239 | logger_for_acquire.warning("Failed to acquire Lock(%r).", self._name) 240 | return False 241 | 242 | logger_for_acquire.info("Acquired Lock(%r).", self._name) 243 | if self._lock_renewal_interval is not None: 244 | self._start_lock_renewer() 245 | return True 246 | 247 | def extend(self, expire=None): 248 | """ 249 | Extends expiration time of the lock. 250 | 251 | :param expire: 252 | New expiration time. If ``None`` - `expire` provided during 253 | lock initialization will be taken. 254 | """ 255 | if expire: 256 | expire = int(expire) 257 | if expire < 0: 258 | raise ValueError("A negative expire is not acceptable.") 259 | elif self._expire is not None: 260 | expire = self._expire 261 | else: 262 | raise TypeError("To extend a lock 'expire' must be provided as an argument to extend() method or at initialization time.") 263 | 264 | error = self.extend_script(client=self._client, keys=(self._name, self._signal), args=(self._id, expire)) 265 | if error == 1: 266 | raise NotAcquired(f"Lock {self._name} is not acquired or it already expired.") 267 | elif error == 2: 268 | raise NotExpirable(f"Lock {self._name} has no assigned expiration time") 269 | elif error: 270 | raise RuntimeError(f"Unsupported error code {error} from EXTEND script") 271 | 272 | @staticmethod 273 | def _lock_renewer(name, lockref, interval, stop): 274 | """ 275 | Renew the lock key in redis every `interval` seconds for as long 276 | as `self._lock_renewal_thread.should_exit` is False. 277 | """ 278 | while not stop.wait(timeout=interval): 279 | logger_for_refresh_thread.debug("Refreshing Lock(%r).", name) 280 | lock: "Lock" = lockref() 281 | if lock is None: 282 | logger_for_refresh_thread.debug("Stopping loop because Lock(%r) was garbage collected.", name) 283 | break 284 | lock.extend(expire=lock._expire) 285 | del lock 286 | logger_for_refresh_thread.debug("Exiting renewal thread for Lock(%r).", name) 287 | 288 | def _start_lock_renewer(self): 289 | """ 290 | Starts the lock refresher thread. 291 | """ 292 | if self._lock_renewal_thread is not None: 293 | raise AlreadyStarted("Lock refresh thread already started") 294 | 295 | logger_for_refresh_start.debug( 296 | "Starting renewal thread for Lock(%r). Refresh interval: %s seconds.", self._name, self._lock_renewal_interval 297 | ) 298 | self._lock_renewal_stop = threading.Event() 299 | self._lock_renewal_thread = threading.Thread( 300 | group=None, 301 | target=self._lock_renewer, 302 | kwargs={ 303 | 'name': self._name, 304 | 'lockref': weakref.ref(self), 305 | 'interval': self._lock_renewal_interval, 306 | 'stop': self._lock_renewal_stop, 307 | }, 308 | ) 309 | self._lock_renewal_thread.daemon = True 310 | self._lock_renewal_thread.start() 311 | 312 | def _stop_lock_renewer(self): 313 | """ 314 | Stop the lock renewer. 315 | 316 | This signals the renewal thread and waits for its exit. 317 | """ 318 | if self._lock_renewal_thread is None or not self._lock_renewal_thread.is_alive(): 319 | return 320 | logger_for_refresh_shutdown.debug("Signaling renewal thread for Lock(%r) to exit.", self._name) 321 | self._lock_renewal_stop.set() 322 | self._lock_renewal_thread.join() 323 | self._lock_renewal_thread = None 324 | logger_for_refresh_exit.debug("Renewal thread for Lock(%r) exited.", self._name) 325 | 326 | def __enter__(self): 327 | acquired = self.acquire(blocking=self.blocking) 328 | if not acquired: 329 | if self.blocking: 330 | raise AssertionError(f"Lock({self._name}) wasn't acquired, but blocking=True was used!") 331 | raise NotAcquired(f"Lock({self._name}) is not acquired or it already expired.") 332 | return self 333 | 334 | def __exit__(self, exc_type=None, exc_value=None, traceback=None): 335 | self.release() 336 | 337 | def release(self): 338 | """Releases the lock, that was acquired with the same object. 339 | 340 | .. note:: 341 | 342 | If you want to release a lock that you acquired in a different place you have two choices: 343 | 344 | * Use ``Lock("name", id=id_from_other_place).release()`` 345 | * Use ``Lock("name").reset()`` 346 | """ 347 | if self._lock_renewal_thread is not None: 348 | self._stop_lock_renewer() 349 | logger_for_release.debug("Releasing Lock(%r).", self._name) 350 | error = self.unlock_script(client=self._client, keys=(self._name, self._signal), args=(self._id, self._signal_expire)) 351 | if error == 1: 352 | raise NotAcquired(f"Lock({self._name}) is not acquired or it already expired.") 353 | elif error: 354 | raise RuntimeError(f"Unsupported error code {error} from EXTEND script.") 355 | 356 | def locked(self): 357 | """ 358 | Return true if the lock is acquired. 359 | 360 | Checks that lock with same name already exists. This method returns true, even if 361 | lock have another id. 362 | """ 363 | return self._client.exists(self._name) == 1 364 | 365 | 366 | reset_all_script = None 367 | 368 | 369 | def reset_all(redis_client): 370 | """ 371 | Forcibly deletes all locks if its remains (like a crash reason). Use this with care. 372 | 373 | :param redis_client: 374 | An instance of :class:`~StrictRedis`. 375 | """ 376 | Lock.register_scripts(redis_client) 377 | 378 | reset_all_script(client=redis_client) # noqa 379 | -------------------------------------------------------------------------------- /tests/test_redis_lock.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | from __future__ import print_function 3 | 4 | import gc 5 | import logging 6 | import multiprocessing 7 | import platform 8 | import sys 9 | import threading 10 | import time 11 | from collections import defaultdict 12 | from functools import partial 13 | 14 | import pytest 15 | from process_tests import TestProcess 16 | from process_tests import dump_on_error 17 | from process_tests import wait_for_strings 18 | from redis import StrictRedis 19 | 20 | from redis_lock import AlreadyAcquired 21 | from redis_lock import InvalidTimeout 22 | from redis_lock import Lock 23 | from redis_lock import NotAcquired 24 | from redis_lock import NotExpirable 25 | from redis_lock import TimeoutNotUsable 26 | from redis_lock import TimeoutTooLarge 27 | from redis_lock import reset_all 28 | 29 | from config import HELPER 30 | from config import TIMEOUT 31 | 32 | pytest_plugins = ('pytester',) 33 | 34 | skipifpypy = partial(pytest.mark.skipif(platform.python_implementation() == 'PyPy')) 35 | 36 | 37 | def maybe_decode(data): 38 | if isinstance(data, bytes): 39 | return data.decode('ascii') 40 | else: 41 | return data 42 | 43 | 44 | @pytest.fixture(params=[True, False], ids=['decode_responses=True', 'decode_responses=False']) 45 | def make_conn_plain(request, redis_server, redis_socket): 46 | def conn_factory(**options): 47 | options.setdefault('encoding_errors', 'replace') 48 | conn = StrictRedis(unix_socket_path=redis_socket, **options) 49 | request.addfinalizer(conn.flushdb) 50 | return conn 51 | 52 | return conn_factory 53 | 54 | 55 | @pytest.fixture(params=[True, False], ids=['decode_responses=True', 'decode_responses=False']) 56 | def make_conn(request, make_conn_plain): 57 | return partial(make_conn_plain, decode_responses=request.param) 58 | 59 | 60 | @pytest.fixture 61 | def conn(request, make_conn): 62 | return make_conn() 63 | 64 | 65 | @pytest.fixture( 66 | params=[ 67 | 'normal', 68 | 'gevent', 69 | pytest.param('eventlet', marks=skipifpypy(reason="apparently broken on PyPy")), 70 | ] 71 | ) 72 | def effect(request): 73 | def wrap_name_with_effect(name): 74 | if request.param == 'normal': 75 | return name 76 | else: 77 | return '{}:{}'.format(name, request.param) 78 | 79 | wrap_name_with_effect.expected_impl = { 80 | 'normal': '_thread', 81 | 'gevent': 'gevent.thread', 82 | 'eventlet': 'eventlet.green.thread', 83 | }[request.param] 84 | return wrap_name_with_effect 85 | 86 | 87 | @pytest.fixture 88 | def make_process(request): 89 | """Process factory, that makes processes, that terminate themselves 90 | after a test run. 91 | """ 92 | 93 | def make_process_factory(*args, **kwargs): 94 | process = multiprocessing.Process(*args, **kwargs) 95 | request.addfinalizer(process.terminate) 96 | 97 | return process 98 | 99 | return make_process_factory 100 | 101 | 102 | def test_upgrade(conn, make_conn_plain): 103 | legacy_conn = make_conn_plain(decode_responses=False) 104 | lock = Lock(conn, "foobar") 105 | legacy_conn.set(lock._name, b'\xd6{\xc93\xe9\xbd,\xdb\xb6\xa8<\x8ax\xd1<\xb9', nx=True, ex=lock._expire) 106 | assert not lock.acquire(blocking=False) 107 | 108 | 109 | def test_simple(redis_server, redis_socket, effect): 110 | with TestProcess(sys.executable, HELPER, redis_socket, effect('test_simple')) as proc: 111 | with dump_on_error(proc.read): 112 | name = 'lock:foobar' 113 | wait_for_strings( 114 | proc.read, 115 | TIMEOUT, 116 | 'Acquiring Lock(%r) ...' % name, 117 | 'Acquired Lock(%r).' % name, 118 | 'Releasing Lock(%r).' % name, 119 | 'DIED.', 120 | ) 121 | 122 | 123 | def test_simple_auto_renewal(redis_server, redis_socket, effect, LineMatcher): 124 | with TestProcess(sys.executable, HELPER, redis_socket, effect('test_simple_auto_renewal')) as proc: 125 | with dump_on_error(proc.read): 126 | name = 'lock:foobar' 127 | wait_for_strings( 128 | proc.read, 129 | TIMEOUT, 130 | 'DIED.', 131 | ) 132 | LineMatcher(proc.read().splitlines()).fnmatch_lines( 133 | [ 134 | '* threading.get_ident.__module__=%s' % effect.expected_impl, 135 | '* Acquiring Lock(%r) ...' % name, 136 | '* Acquired Lock(%r).' % name, 137 | '* Starting renewal thread for Lock(%r). Refresh interval: 0.6666666666666666 seconds.' % name, 138 | '* Refreshing Lock(%r).' % name, 139 | '* Refreshing Lock(%r).' % name, 140 | '* Signaling renewal thread for Lock(%r) to exit.' % name, 141 | '* Exiting renewal thread for Lock(%r).' % name, 142 | '* Renewal thread for Lock(%r) exited.' % name, 143 | '* Releasing Lock(%r).' % name, 144 | ] 145 | ) 146 | 147 | 148 | def test_no_block(conn, redis_socket): 149 | with Lock(conn, "foobar"): 150 | with TestProcess(sys.executable, HELPER, redis_socket, 'test_no_block') as proc: 151 | with dump_on_error(proc.read): 152 | name = 'lock:foobar' 153 | wait_for_strings( 154 | proc.read, 155 | TIMEOUT, 156 | 'Acquiring Lock(%r) ...' % name, 157 | 'Failed to acquire Lock(%r).' % name, 158 | 'acquire=>False', 159 | 'DIED.', 160 | ) 161 | 162 | 163 | def test_timeout(conn): 164 | with Lock(conn, "foobar"): 165 | lock = Lock(conn, "foobar") 166 | assert lock.acquire(timeout=1) is False 167 | 168 | 169 | @pytest.mark.parametrize("timeout", [0, "0", "123"]) 170 | def test_timeout_int_conversion(conn, timeout): 171 | lock = Lock(conn, "foobar") 172 | lock.acquire(blocking=True, timeout=timeout) 173 | lock.release() 174 | 175 | 176 | def test_timeout_expire(conn): 177 | lock1 = Lock(conn, "foobar", expire=1) 178 | lock1.acquire() 179 | lock2 = Lock(conn, "foobar") 180 | assert lock2.acquire(timeout=2) 181 | 182 | 183 | def test_timeout_expire_with_renewal(conn): 184 | with Lock(conn, "foobar", expire=1, auto_renewal=True): 185 | lock = Lock(conn, "foobar") 186 | assert lock.acquire(timeout=2) is False 187 | 188 | 189 | def test_timeout_acquired(conn, redis_socket): 190 | with TestProcess(sys.executable, HELPER, redis_socket, 'test_timeout') as proc: 191 | with dump_on_error(proc.read): 192 | name = 'lock:foobar' 193 | wait_for_strings( 194 | proc.read, 195 | TIMEOUT, 196 | 'Acquiring Lock(%r) ...' % name, 197 | 'Acquired Lock(%r).' % name, 198 | ) 199 | lock = Lock(conn, "foobar") 200 | assert lock.acquire(timeout=2) 201 | 202 | 203 | def test_not_usable_timeout(conn): 204 | lock = Lock(conn, "foobar") 205 | with pytest.raises(TimeoutNotUsable): 206 | lock.acquire(blocking=False, timeout=1) 207 | 208 | 209 | def test_expire_less_than_timeout(conn): 210 | lock = Lock(conn, "foobar", expire=1) 211 | pytest.raises(TimeoutTooLarge, lock.acquire, blocking=True, timeout=2) 212 | 213 | lock = Lock(conn, "foobar", expire=1, auto_renewal=True) 214 | lock.acquire(blocking=True, timeout=2) 215 | lock.release() 216 | 217 | 218 | def test_invalid_timeout(conn): 219 | lock = Lock(conn, "foobar") 220 | pytest.raises(InvalidTimeout, lock.acquire, blocking=True, timeout=-123) 221 | 222 | lock = Lock(conn, "foobar") 223 | pytest.raises(InvalidTimeout, lock.acquire, blocking=True, timeout=-1) 224 | pytest.raises(ValueError, lock.acquire, blocking=True, timeout="foobar") 225 | 226 | 227 | def test_expire_int_conversion(): 228 | conn = object() 229 | 230 | lock = Lock(conn, name='foobar', strict=False, expire=1) 231 | assert lock._expire == 1 232 | 233 | lock = Lock(conn, name='foobar', strict=False, expire=0) 234 | assert lock._expire is None 235 | 236 | lock = Lock(conn, name='foobar', strict=False, expire="1") 237 | assert lock._expire == 1 238 | 239 | lock = Lock(conn, name='foobar', strict=False, expire="123") 240 | assert lock._expire == 123 241 | 242 | 243 | def test_expire(conn, redis_socket): 244 | lock = Lock(conn, "foobar", expire=TIMEOUT / 4) 245 | lock.acquire() 246 | with TestProcess(sys.executable, HELPER, redis_socket, 'test_expire') as proc: 247 | with dump_on_error(proc.read): 248 | name = 'lock:foobar' 249 | wait_for_strings( 250 | proc.read, 251 | TIMEOUT, 252 | 'Acquiring Lock(%r) ...' % name, 253 | 'Acquired Lock(%r).' % name, 254 | 'Releasing Lock(%r).' % name, 255 | 'DIED.', 256 | ) 257 | lock = Lock(conn, "foobar") 258 | try: 259 | assert lock.acquire(blocking=False) is True 260 | finally: 261 | lock.release() 262 | 263 | 264 | def test_expire_without_timeout(conn): 265 | first_lock = Lock(conn, 'expire', expire=2) 266 | second_lock = Lock(conn, 'expire', expire=1) 267 | first_lock.acquire() 268 | assert second_lock.acquire(blocking=False) is False 269 | assert second_lock.acquire() is True 270 | second_lock.release() 271 | 272 | 273 | def test_extend(conn): 274 | name = 'foobar' 275 | key_name = 'lock:' + name 276 | with Lock(conn, name, expire=100) as lock: 277 | assert conn.ttl(key_name) <= 100 278 | 279 | lock.extend(expire=1000) 280 | assert conn.ttl(key_name) > 100 281 | 282 | 283 | def test_extend_lock_default_expire(conn): 284 | name = 'foobar' 285 | key_name = 'lock:' + name 286 | with Lock(conn, name, expire=1000) as lock: 287 | time.sleep(3) 288 | assert conn.ttl(key_name) <= 997 289 | lock.extend() 290 | assert 997 < conn.ttl(key_name) <= 1000 291 | 292 | 293 | def test_extend_lock_without_expire_fail(conn): 294 | name = 'foobar' 295 | with Lock(conn, name) as lock: 296 | with pytest.raises(NotExpirable): 297 | lock.extend(expire=1000) 298 | 299 | with pytest.raises(TypeError): 300 | lock.extend() 301 | 302 | 303 | def test_extend_another_instance(conn): 304 | """It is possible to extend a lock using another instance of Lock with the 305 | same name. 306 | """ 307 | name = 'foobar' 308 | key_name = 'lock:' + name 309 | lock = Lock(conn, name, expire=100) 310 | lock.acquire() 311 | assert 0 <= conn.ttl(key_name) <= 100 312 | 313 | another_lock = Lock(conn, name, id=lock.id) 314 | another_lock.extend(1000) 315 | 316 | assert conn.ttl(key_name) > 100 317 | 318 | 319 | def test_extend_another_instance_different_id_fail(conn): 320 | """It is impossible to extend a lock using another instance of Lock with 321 | the same name, but different id. 322 | """ 323 | name = 'foobar' 324 | key_name = 'lock:' + name 325 | lock = Lock(conn, name, expire=100) 326 | lock.acquire() 327 | assert 0 <= conn.ttl(key_name) <= 100 328 | 329 | another_lock = Lock(conn, name) 330 | with pytest.raises(NotAcquired): 331 | another_lock.extend(1000) 332 | 333 | assert conn.ttl(key_name) <= 100 334 | assert lock.id != another_lock.id 335 | 336 | 337 | def test_double_acquire(conn): 338 | lock = Lock(conn, "foobar") 339 | with lock: 340 | pytest.raises(RuntimeError, lock.acquire) 341 | pytest.raises(AlreadyAcquired, lock.acquire) 342 | 343 | 344 | def test_enter_already_acquired_with_not_blocking(conn): 345 | lock = Lock(conn, "foobar") 346 | acquired = lock.acquire() 347 | assert acquired 348 | 349 | with pytest.raises(NotAcquired): 350 | with Lock(conn, "foobar", blocking=False): 351 | pass 352 | 353 | 354 | def test_plain(conn): 355 | with Lock(conn, "foobar"): 356 | time.sleep(0.01) 357 | 358 | 359 | def test_no_overlap(redis_server, redis_socket): 360 | """ 361 | This test tries to simulate contention: lots of clients trying to acquire at the same time. 362 | 363 | If there would be a bug that would allow two clients to hold the lock at the same time it 364 | would most likely regress this test. 365 | 366 | The code here mostly tries to parse out the pid of the process and the time when it got and 367 | released the lock. If there's is overlap (eg: pid1.start < pid2.start < pid1.end) then we 368 | got a very bad regression on our hands ... 369 | 370 | The subprocess being run (check helper.py) will fork bunch of processes and will try to 371 | syncronize them (using the builting sched) to try to acquire the lock at the same time. 372 | """ 373 | with TestProcess(sys.executable, HELPER, redis_socket, 'test_no_overlap') as proc: 374 | with dump_on_error(proc.read): 375 | name = 'lock:foobar' 376 | wait_for_strings(proc.read, 10 * TIMEOUT, 'Acquiring Lock(%r) ...' % name) 377 | wait_for_strings(proc.read, 10 * TIMEOUT, 'Acquired Lock(%r).' % name) 378 | wait_for_strings(proc.read, 10 * TIMEOUT, 'Releasing Lock(%r).' % name) 379 | wait_for_strings(proc.read, 10 * TIMEOUT, 'DIED.') 380 | 381 | class Event(object): 382 | pid = start = end = '?' 383 | 384 | def __str__(self): 385 | return "Event(%s; %r => %r)" % (self.pid, self.start, self.end) 386 | 387 | events = defaultdict(Event) 388 | for line in proc.read().splitlines(): 389 | try: 390 | pid, time, junk = line.split(' ', 2) 391 | pid = int(pid) 392 | except ValueError: 393 | continue 394 | if 'Got lock for' in junk: 395 | events[pid].pid = pid 396 | events[pid].start = time 397 | if 'Releasing' in junk: 398 | events[pid].pid = pid 399 | events[pid].end = time 400 | assert len(events) == 125 401 | 402 | # not very smart but we don't have millions of events so it's 403 | # ok - compare all the events with all the other events: 404 | for event in events.values(): 405 | for other in events.values(): 406 | if other is not event: 407 | try: 408 | if other.start < event.start < other.end or other.start < event.end < other.end: 409 | pytest.fail('%s overlaps %s' % (event, other)) 410 | except Exception: 411 | print("[%s/%s]" % (event, other)) 412 | raise 413 | 414 | 415 | def _no_overlap2_workerfn(redis_socket, event, count_lock, count): 416 | logging.basicConfig(level=logging.DEBUG) 417 | with StrictRedis(unix_socket_path=redis_socket) as conn: 418 | redis_lock = Lock(conn, 'lock') 419 | 420 | with count_lock: 421 | count.value += 1 422 | 423 | event.wait() 424 | 425 | if redis_lock.acquire(blocking=True): 426 | with count_lock: 427 | count.value += 1 428 | 429 | 430 | @skipifpypy(reason="way too slow to run on PyPy") 431 | def test_no_overlap2(make_process, redis_server, redis_socket): 432 | """The second version of contention test, that uses multiprocessing.""" 433 | event = multiprocessing.Event() 434 | count_lock = multiprocessing.Lock() 435 | count = multiprocessing.Value('H', 0) 436 | 437 | for _ in range(125): 438 | make_process(target=_no_overlap2_workerfn, args=(redis_socket, event, count_lock, count)).start() 439 | 440 | # Wait until all workers will come to point when they are ready to acquire 441 | # the redis lock. 442 | while count.value < 125: 443 | time.sleep(0.5) 444 | 445 | # Then "count" will be used as counter of workers, which acquired 446 | # redis-lock with success. 447 | count.value = 0 448 | 449 | event.set() 450 | 451 | time.sleep(1) 452 | 453 | assert count.value == 1 454 | 455 | 456 | def test_reset(conn): 457 | lock = Lock(conn, "foobar") 458 | lock.reset() 459 | new_lock = Lock(conn, "foobar") 460 | new_lock.acquire(blocking=False) 461 | new_lock.release() 462 | pytest.raises(NotAcquired, lock.release) 463 | 464 | 465 | def test_reset_all(conn): 466 | lock1 = Lock(conn, "foobar1") 467 | lock2 = Lock(conn, "foobar2") 468 | lock1.acquire(blocking=False) 469 | lock2.acquire(blocking=False) 470 | reset_all(conn) 471 | lock1 = Lock(conn, "foobar1") 472 | lock2 = Lock(conn, "foobar2") 473 | lock1.acquire(blocking=False) 474 | lock2.acquire(blocking=False) 475 | lock1.release() 476 | lock2.release() 477 | 478 | 479 | def test_owner_id(conn): 480 | unique_identifier = "foobar-identifier" 481 | lock = Lock(conn, "foobar-tok", expire=TIMEOUT / 4, id=unique_identifier) 482 | lock_id = lock.id 483 | assert lock_id == unique_identifier 484 | 485 | 486 | def test_get_owner_id(conn): 487 | lock = Lock(conn, "foobar-tok") 488 | lock.acquire() 489 | assert lock.get_owner_id() == lock.id 490 | lock.release() 491 | 492 | 493 | def test_token(conn): 494 | lock = Lock(conn, "foobar-tok") 495 | tok = lock.id 496 | assert conn.get(lock._name) is None 497 | lock.acquire(blocking=False) 498 | assert maybe_decode(conn.get(lock._name)) == tok 499 | 500 | 501 | def test_bogus_release(conn): 502 | lock = Lock(conn, "foobar-tok") 503 | pytest.raises(NotAcquired, lock.release) 504 | lock.acquire() 505 | lock2 = Lock(conn, "foobar-tok", id=lock.id) 506 | lock2.release() 507 | 508 | 509 | def test_release_from_nonblocking_leaving_garbage(conn): 510 | for _ in range(10): 511 | lock = Lock(conn, 'release_from_nonblocking') 512 | lock.acquire(blocking=False) 513 | lock.release() 514 | assert conn.llen('lock-signal:release_from_nonblocking') == 1 515 | 516 | 517 | def test_no_auto_renewal(conn): 518 | lock = Lock(conn, 'lock_renewal', expire=3, auto_renewal=False) 519 | assert lock._lock_renewal_interval is None 520 | lock.acquire() 521 | assert lock._lock_renewal_thread is None, "No lock refresh thread should have been spawned" 522 | 523 | 524 | def test_auto_renewal_bad_values(conn): 525 | with pytest.raises(ValueError): 526 | Lock(conn, 'lock_renewal', expire=None, auto_renewal=True) 527 | 528 | 529 | def test_auto_renewal(conn): 530 | lock = Lock(conn, 'lock_renewal', expire=3, auto_renewal=True) 531 | lock.acquire() 532 | 533 | assert isinstance(lock._lock_renewal_thread, threading.Thread) 534 | assert not lock._lock_renewal_stop.is_set() 535 | assert isinstance(lock._lock_renewal_interval, float) 536 | assert lock._lock_renewal_interval == 2 537 | 538 | time.sleep(3) 539 | assert maybe_decode(conn.get(lock._name)) == lock.id, "Key expired but it should have been getting renewed" 540 | 541 | lock.release() 542 | assert lock._lock_renewal_thread is None 543 | 544 | 545 | @pytest.mark.parametrize('signal_expire', [1000, 1500]) 546 | @pytest.mark.parametrize('method', ['release', 'reset_all']) 547 | def test_signal_expiration(conn, signal_expire, method): 548 | """Signal keys expire within two seconds after releasing the lock.""" 549 | lock = Lock(conn, 'signal_expiration', signal_expire=signal_expire) 550 | lock.acquire() 551 | if method == 'release': 552 | lock.release() 553 | elif method == 'reset_all': 554 | reset_all(conn) 555 | time.sleep(0.5) 556 | assert conn.exists('lock-signal:signal_expiration') 557 | time.sleep((signal_expire - 500) / 1000.0) 558 | assert conn.llen('lock-signal:signal_expiration') == 0 559 | 560 | 561 | def test_reset_signalizes(make_conn, make_process): 562 | """Call to reset() causes LPUSH to signal key, so blocked waiters 563 | become unblocked.""" 564 | 565 | def workerfn(unblocked): 566 | conn = make_conn() 567 | lock = Lock(conn, 'lock') 568 | if lock.acquire(): 569 | unblocked.value = 1 570 | 571 | unblocked = multiprocessing.Value('B', 0) 572 | conn = make_conn() 573 | lock = Lock(conn, 'lock') 574 | lock.acquire() 575 | 576 | worker = make_process(target=workerfn, args=(unblocked,)) 577 | worker.start() 578 | worker.join(0.5) 579 | lock.reset() 580 | worker.join(0.5) 581 | 582 | assert unblocked.value == 1 583 | 584 | 585 | def test_reset_all_signalizes(make_conn, make_process): 586 | """Call to reset_all() causes LPUSH to all signal keys, so blocked waiters 587 | become unblocked.""" 588 | 589 | def workerfn(unblocked): 590 | conn = make_conn() 591 | lock1 = Lock(conn, 'lock1') 592 | lock2 = Lock(conn, 'lock2') 593 | if lock1.acquire() and lock2.acquire(): 594 | unblocked.value = 1 595 | 596 | unblocked = multiprocessing.Value('B', 0) 597 | conn = make_conn() 598 | lock1 = Lock(conn, 'lock1') 599 | lock2 = Lock(conn, 'lock2') 600 | lock1.acquire() 601 | lock2.acquire() 602 | 603 | worker = make_process(target=workerfn, args=(unblocked,)) 604 | worker.start() 605 | worker.join(0.5) 606 | reset_all(conn) 607 | worker.join(0.5) 608 | 609 | assert unblocked.value == 1 610 | 611 | 612 | def test_auto_renewal_stops_after_gc(conn): 613 | """Auto renewal stops after lock is garbage collected.""" 614 | lock = Lock(conn, 'spam', auto_renewal=True, expire=1) 615 | name = lock._name 616 | lock.acquire(blocking=True) 617 | lock_renewal_thread = lock._lock_renewal_thread 618 | del lock 619 | gc.collect() 620 | 621 | slept = 0 622 | interval = 0.1 623 | while slept <= 5: 624 | slept += interval 625 | lock_renewal_thread.join(interval) 626 | if not lock_renewal_thread.is_alive(): 627 | break 628 | 629 | time.sleep(1.5) 630 | 631 | assert not lock_renewal_thread.is_alive() 632 | assert conn.get(name) is None 633 | 634 | 635 | def test_given_id(conn): 636 | """It is possible to extend a lock using another instance of Lock with the 637 | same name. 638 | """ 639 | name = 'foobar' 640 | key_name = 'lock:' + name 641 | orig = Lock(conn, name, expire=100, id=b"a") 642 | orig.acquire() 643 | pytest.raises(TypeError, Lock, conn, name, id=object()) 644 | lock = Lock(conn, name, id=b"a") 645 | pytest.raises(AlreadyAcquired, lock.acquire) 646 | lock.extend(100) 647 | lock.release() # this works, note that this ain't the object that acquired the lock 648 | pytest.raises(NotAcquired, orig.release) # and this fails because lock was released above 649 | 650 | assert conn.ttl(key_name) == -2 651 | 652 | 653 | def test_strict_check(): 654 | pytest.raises(ValueError, Lock, object(), name='foobar') 655 | Lock(object(), name='foobar', strict=False) 656 | 657 | 658 | def test_borken_expires(): 659 | conn = object() 660 | pytest.raises(ValueError, Lock, redis_client=conn, name='foobar', expire=-1) 661 | pytest.raises(ValueError, Lock, redis_client=conn, name='foobar', expire=-123) 662 | pytest.raises(ValueError, Lock, redis_client=conn, name='foobar', expire="-1") 663 | lock = Lock(redis_client=conn, name='foobar', strict=False) 664 | pytest.raises(ValueError, lock.extend, expire=-1) 665 | pytest.raises(ValueError, lock.extend, expire=-123) 666 | pytest.raises(ValueError, lock.extend, expire="-1") 667 | 668 | 669 | def test_locked_method(conn): 670 | lock_name = 'lock_name' 671 | 672 | lock = Lock(conn, lock_name, id='first') 673 | another_lock = Lock(conn, lock_name, id='another') 674 | 675 | assert lock.locked() is False 676 | assert another_lock.locked() is False 677 | 678 | assert lock.acquire() is True 679 | 680 | # another lock has same name and different id, 681 | # but method returns true 682 | assert lock.locked() is True 683 | assert another_lock.locked() is True 684 | --------------------------------------------------------------------------------