├── .editorconfig ├── .github └── ISSUE_TEMPLATE.md ├── .gitignore ├── .readthedocs.yml ├── .travis.yml ├── AUTHORS.rst ├── CONTRIBUTING.rst ├── HISTORY.rst ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── docs ├── Makefile ├── _static │ └── empty_dir_marker_file ├── authors.rst ├── conf.py ├── contributing.rst ├── history.rst ├── index.rst ├── installation.rst ├── make.bat ├── python_dynamodb_lock.rst ├── readme.rst └── usage.rst ├── python_dynamodb_lock ├── __init__.py └── python_dynamodb_lock.py ├── requirements.txt ├── requirements_dev.txt ├── setup.cfg ├── setup.py ├── tests ├── __init__.py └── test_python_dynamodb_lock.py ├── tests_integration └── test_app.py └── tox.ini /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | indent_style = space 7 | indent_size = 4 8 | trim_trailing_whitespace = true 9 | insert_final_newline = true 10 | charset = utf-8 11 | end_of_line = lf 12 | 13 | [*.bat] 14 | indent_style = tab 15 | end_of_line = crlf 16 | 17 | [LICENSE] 18 | insert_final_newline = false 19 | 20 | [Makefile] 21 | indent_style = tab 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | * Python DynamoDB Lock version: 2 | * Python version: 3 | * Operating System: 4 | 5 | ### Description 6 | 7 | Describe what you were trying to get done. 8 | Tell us what happened, what went wrong, and what you expected to happen. 9 | 10 | ### What I Did 11 | 12 | ``` 13 | Paste the command(s) you ran and the output. 14 | If there was a crash, please include the traceback here. 15 | ``` 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | 58 | # Flask stuff: 59 | instance/ 60 | .webassets-cache 61 | 62 | # Scrapy stuff: 63 | .scrapy 64 | 65 | # Sphinx documentation 66 | docs/_build/ 67 | 68 | # PyBuilder 69 | target/ 70 | 71 | # Jupyter Notebook 72 | .ipynb_checkpoints 73 | 74 | # pyenv 75 | .python-version 76 | 77 | # celery beat schedule file 78 | celerybeat-schedule 79 | 80 | # SageMath parsed files 81 | *.sage.py 82 | 83 | # dotenv 84 | .env 85 | 86 | # virtualenv 87 | .venv 88 | venv/ 89 | ENV/ 90 | 91 | # Spyder project settings 92 | .spyderproject 93 | .spyproject 94 | 95 | # Rope project settings 96 | .ropeproject 97 | 98 | # mkdocs documentation 99 | /site 100 | 101 | # mypy 102 | .mypy_cache/ 103 | 104 | # IDE(s) 105 | .idea/ 106 | *.iml 107 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | build: 2 | image: latest 3 | 4 | python: 5 | version: 3.6 6 | setup_py_install: true 7 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | matrix: 3 | include: 4 | - python: 3.6 5 | - python: 3.7 6 | dist: xenial 7 | sudo: true 8 | install: 9 | - pip install -r requirements.txt 10 | - pip install -U tox-travis 11 | script: tox 12 | deploy: 13 | provider: pypi 14 | distributions: sdist bdist_wheel 15 | user: mohankishore 16 | password: 17 | secure: TMO3Szq5Z/hu+CM3q6VKbvJOkWWAznBgjYv5w05qwTkvLMNsWcSBlwumlJ4erA6Pzd408aKVmGN/mie1xDAutrhoWTf3i49NAY++BhkEfYuMZ5s3+LgUXr6l4+Gk4G9t92bc7nMgZIyk1irFr3z3b9NtyTtjPMcZv+pMs/wjjzLDfeWr1rkZ0MiDx/0jUu1AWmkBH7MDWjlFipAT/ST3SjVqrmUyGeDNzQ2bKBLtSp64Cmt6QVi3tEEXA7zryOlVbwGnXgvntIoNLZAaJ9AyQ1RgGromnlOVlPJ9Q7mlgbWja8nQwMCEoDdtcF+ax0dz681PGw/kOBVl8W4REYnzc2J2Ftfk7FCQZo5JlQcZCw3aZqLIXlbpTHM1AK24UCsi5vq+xevPahvwM7mpWgCmGKGw3KadcMr9A5GtwN/xGiMBVB6GdinKZFp7rh8UccTkAYnHnVPlpnhSPD/8g9UgwhxZrWJUMyjCCmvdAOXn54q+j+2fdS+eVDXltcAZ7GVIthDMmZabc/uTMrJ/BHaib+dXDLNUjhZzqACgqZyQqej9CiHUEW3/p66X6eerxSkgc0TurePhUVvhSkmi1Fo30ev6+9K8wdBapw+OCfJnxFC+D1KbZhoF/5WX1NCMU4yKLzkxD4XEftEa1W9VyQHS1UTJU3vIsf6C6efvMfMJqC8= 18 | on: 19 | tags: true 20 | repo: mohankishore/python_dynamodb_lock 21 | python: 3.6 22 | -------------------------------------------------------------------------------- /AUTHORS.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Credits 3 | ======= 4 | 5 | Development Lead 6 | ---------------- 7 | 8 | * Mohan Kishore 9 | 10 | Contributors 11 | ------------ 12 | 13 | None yet. Why not be the first? 14 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: shell 2 | 3 | ============ 4 | Contributing 5 | ============ 6 | 7 | Contributions are welcome, and they are greatly appreciated! Every little bit 8 | helps, and credit will always be given. 9 | 10 | You can contribute in many ways: 11 | 12 | Types of Contributions 13 | ---------------------- 14 | 15 | Report Bugs 16 | ~~~~~~~~~~~ 17 | 18 | Report bugs at https://github.com/mohankishore/python_dynamodb_lock/issues. 19 | 20 | If you are reporting a bug, please include: 21 | 22 | * Your operating system name and version. 23 | * Any details about your local setup that might be helpful in troubleshooting. 24 | * Detailed steps to reproduce the bug. 25 | 26 | Fix Bugs 27 | ~~~~~~~~ 28 | 29 | Look through the GitHub issues for bugs. Anything tagged with "bug" and "help 30 | wanted" is open to whoever wants to implement it. 31 | 32 | Implement Features 33 | ~~~~~~~~~~~~~~~~~~ 34 | 35 | Look through the GitHub issues for features. Anything tagged with "enhancement" 36 | and "help wanted" is open to whoever wants to implement it. 37 | 38 | Write Documentation 39 | ~~~~~~~~~~~~~~~~~~~ 40 | 41 | Python DynamoDB Lock could always use more documentation, whether as part of the 42 | official Python DynamoDB Lock docs, in docstrings, or even on the web in blog posts, 43 | articles, and such. 44 | 45 | Submit Feedback 46 | ~~~~~~~~~~~~~~~ 47 | 48 | The best way to send feedback is to file an issue at https://github.com/mohankishore/python_dynamodb_lock/issues. 49 | 50 | If you are proposing a feature: 51 | 52 | * Explain in detail how it would work. 53 | * Keep the scope as narrow as possible, to make it easier to implement. 54 | * Remember that this is a volunteer-driven project, and that contributions 55 | are welcome :) 56 | 57 | Get Started! 58 | ------------ 59 | 60 | Ready to contribute? Here's how to set up `python_dynamodb_lock` for local development. 61 | 62 | 1. Fork the `python_dynamodb_lock` repo on GitHub. 63 | 2. Clone your fork locally:: 64 | 65 | $ git clone git@github.com:your_name_here/python_dynamodb_lock.git 66 | 67 | 3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development:: 68 | 69 | $ mkvirtualenv python_dynamodb_lock 70 | $ cd python_dynamodb_lock/ 71 | $ python setup.py develop 72 | 73 | 4. Create a branch for local development:: 74 | 75 | $ git checkout -b name-of-your-bugfix-or-feature 76 | 77 | Now you can make your changes locally. 78 | 79 | 5. When you're done making changes, check that your changes pass flake8 and the 80 | tests, including testing other Python versions with tox:: 81 | 82 | $ flake8 python_dynamodb_lock tests 83 | $ python setup.py test or py.test 84 | $ tox 85 | 86 | To get flake8 and tox, just pip install them into your virtualenv. 87 | 88 | 6. Commit your changes and push your branch to GitHub:: 89 | 90 | $ git add . 91 | $ git commit -m "Your detailed description of your changes." 92 | $ git push origin name-of-your-bugfix-or-feature 93 | 94 | 7. Submit a pull request through the GitHub website. 95 | 96 | Pull Request Guidelines 97 | ----------------------- 98 | 99 | Before you submit a pull request, check that it meets these guidelines: 100 | 101 | 1. The pull request should include tests. 102 | 2. If the pull request adds functionality, the docs should be updated. Put 103 | your new functionality into a function with a docstring, and add the 104 | feature to the list in README.rst. 105 | 3. The pull request should work for Python 2.7, 3.4, 3.5 and 3.6, and for PyPy. Check 106 | https://travis-ci.org/mohankishore/python_dynamodb_lock/pull_requests 107 | and make sure that the tests pass for all supported Python versions. 108 | 109 | Tips 110 | ---- 111 | 112 | To run a subset of tests:: 113 | 114 | 115 | $ python -m unittest tests.test_python_dynamodb_lock 116 | 117 | Deploying 118 | --------- 119 | 120 | A reminder for the maintainers on how to deploy. 121 | Make sure all your changes are committed (including an entry in HISTORY.rst). 122 | Then run:: 123 | 124 | $ bumpversion patch # possible: major / minor / patch 125 | $ git push 126 | $ git push --tags 127 | 128 | Travis will then deploy to PyPI if tests pass. 129 | -------------------------------------------------------------------------------- /HISTORY.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | History 3 | ======= 4 | 5 | 0.9.0 (2018-10-28) 6 | ------------------ 7 | 8 | * First release on PyPI. 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache Software License 2.0 2 | 3 | Copyright (c) 2018, Mohan Kishore 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | 17 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include AUTHORS.rst 2 | include CONTRIBUTING.rst 3 | include HISTORY.rst 4 | include LICENSE 5 | include README.rst 6 | 7 | recursive-include tests * 8 | recursive-exclude * __pycache__ 9 | recursive-exclude * *.py[co] 10 | 11 | recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif 12 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: clean clean-test clean-pyc clean-build docs help 2 | .DEFAULT_GOAL := help 3 | 4 | define BROWSER_PYSCRIPT 5 | import os, webbrowser, sys 6 | 7 | try: 8 | from urllib import pathname2url 9 | except: 10 | from urllib.request import pathname2url 11 | 12 | webbrowser.open("file://" + pathname2url(os.path.abspath(sys.argv[1]))) 13 | endef 14 | export BROWSER_PYSCRIPT 15 | 16 | define PRINT_HELP_PYSCRIPT 17 | import re, sys 18 | 19 | for line in sys.stdin: 20 | match = re.match(r'^([a-zA-Z_-]+):.*?## (.*)$$', line) 21 | if match: 22 | target, help = match.groups() 23 | print("%-20s %s" % (target, help)) 24 | endef 25 | export PRINT_HELP_PYSCRIPT 26 | 27 | BROWSER := python -c "$$BROWSER_PYSCRIPT" 28 | 29 | help: 30 | @python -c "$$PRINT_HELP_PYSCRIPT" < $(MAKEFILE_LIST) 31 | 32 | clean: clean-build clean-pyc clean-test ## remove all build, test, coverage and Python artifacts 33 | 34 | clean-build: ## remove build artifacts 35 | rm -fr build/ 36 | rm -fr dist/ 37 | rm -fr .eggs/ 38 | find . -name '*.egg-info' -exec rm -fr {} + 39 | find . -name '*.egg' -exec rm -f {} + 40 | 41 | clean-pyc: ## remove Python file artifacts 42 | find . -name '*.pyc' -exec rm -f {} + 43 | find . -name '*.pyo' -exec rm -f {} + 44 | find . -name '*~' -exec rm -f {} + 45 | find . -name '__pycache__' -exec rm -fr {} + 46 | 47 | clean-test: ## remove test and coverage artifacts 48 | rm -fr .tox/ 49 | rm -f .coverage 50 | rm -fr htmlcov/ 51 | rm -fr .pytest_cache 52 | 53 | lint: ## check style with flake8 54 | flake8 python_dynamodb_lock tests 55 | 56 | test: ## run tests quickly with the default Python 57 | python setup.py test 58 | 59 | test-all: ## run tests on every Python version with tox 60 | tox 61 | 62 | coverage: ## check code coverage quickly with the default Python 63 | coverage run --source python_dynamodb_lock setup.py test 64 | coverage report -m 65 | coverage html 66 | $(BROWSER) htmlcov/index.html 67 | 68 | docs: ## generate Sphinx HTML documentation, including API docs 69 | # rm -f docs/python_dynamodb_lock.rst 70 | # rm -f docs/modules.rst 71 | sphinx-apidoc -o docs/ -M python_dynamodb_lock 72 | $(MAKE) -C docs clean 73 | $(MAKE) -C docs html 74 | $(BROWSER) docs/_build/html/index.html 75 | 76 | servedocs: docs ## compile the docs watching for changes 77 | watchmedo shell-command -p '*.rst' -c '$(MAKE) -C docs html' -R -D . 78 | 79 | release: dist ## package and upload a release 80 | twine upload dist/* 81 | 82 | dist: clean ## builds source and wheel package 83 | python setup.py sdist 84 | python setup.py bdist_wheel 85 | ls -l dist 86 | 87 | install: clean ## install the package to the active Python's site-packages 88 | python setup.py install 89 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | Python DynamoDB Lock 3 | ==================== 4 | 5 | 6 | .. image:: https://img.shields.io/pypi/v/python_dynamodb_lock.svg 7 | :target: https://pypi.python.org/pypi/python_dynamodb_lock 8 | 9 | .. image:: https://img.shields.io/travis/mohankishore/python_dynamodb_lock.svg 10 | :target: https://travis-ci.org/mohankishore/python_dynamodb_lock 11 | 12 | .. image:: https://readthedocs.org/projects/python-dynamodb-lock/badge/?version=latest 13 | :target: https://python-dynamodb-lock.readthedocs.io/en/latest/?badge=latest 14 | :alt: Documentation Status 15 | 16 | 17 | 18 | 19 | This is a general purpose distributed locking library built on top of DynamoDB. It is heavily 20 | "inspired" by the java-based `AmazonDynamoDBLockClient `_ 21 | library, and supports both coarse-grained and fine-grained locking. 22 | 23 | * Free software: Apache Software License 2.0 24 | * Documentation: https://python-dynamodb-lock.readthedocs.io 25 | * Source Code: https://github.com/mohankishore/python_dynamodb_lock 26 | 27 | 28 | Features 29 | -------- 30 | 31 | * Acquire named locks - with configurable retry semantics 32 | * Periodic heartbeat/update for the locks to keep them alive 33 | * Auto-release the locks if there is no heartbeat for a configurable lease-duration 34 | * Notify an app-callback function if the lock is stolen, or gets too close to lease expiry 35 | * Store arbitrary application data along with the locks 36 | * Uses monotonically increasing clock to avoid issues due to clock skew and/or DST etc. 37 | * Auto-delete the database entries after a configurable expiry-period 38 | 39 | 40 | Consistency Notes 41 | ----------------- 42 | 43 | Note that while the lock itself can offer fairly strong consistency guarantees, it does NOT 44 | participate in any kind of distributed transaction. 45 | 46 | For example, you may wish to acquire a lock for some customer-id "xyz", and then make some changes 47 | to the corresponding database entry for this customer-id, and then release the lock - thereby 48 | guaranteeing that only one process changes any given customer-id at a time. 49 | 50 | While the happy path looks okay, consider a case where the application changes take a long time, 51 | and some errors/gc-pauses prevent the heartbeat from updating the lock. Then, some other client 52 | can assume the lock to be abandoned, and start processing the same customer in parallel. The original 53 | lock-client will recognize that its lock has been "stolen" and will let the app know through a callback 54 | event, but the app may have already committed its changes to the database. This can only be solved by 55 | having the application changes and the lock-release be part of a single distributed transaction - which, 56 | as indicated earlier, is NOT supported. 57 | 58 | That said, in most cases, where the heartbeat is not expected to get delayed beyond the lock's lease 59 | duration, the implementation should work just fine. 60 | 61 | Refer to an excellent post by Martin Kleppmann on this subject: 62 | https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html 63 | 64 | Credits 65 | ------- 66 | 67 | * AmazonDynamoDBLockClient: https://github.com/awslabs/dynamodb-lock-client 68 | * Cookiecutter: https://github.com/audreyr/cookiecutter 69 | * Cookiecutter Python: https://github.com/audreyr/cookiecutter-pypackage 70 | 71 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = python -msphinx 7 | SPHINXPROJ = python_dynamodb_lock 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/_static/empty_dir_marker_file: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohankishore/python_dynamodb_lock/be57d31492893ceb773e46892eaeb04858e287f3/docs/_static/empty_dir_marker_file -------------------------------------------------------------------------------- /docs/authors.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../AUTHORS.rst 2 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # 4 | # python_dynamodb_lock documentation build configuration file, created by 5 | # sphinx-quickstart on Fri Jun 9 13:47:02 2017. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | # If extensions (or modules to document with autodoc) are in another 17 | # directory, add these directories to sys.path here. If the directory is 18 | # relative to the documentation root, use os.path.abspath to make it 19 | # absolute, like shown here. 20 | # 21 | import os 22 | import sys 23 | sys.path.insert(0, os.path.abspath('..')) 24 | 25 | import python_dynamodb_lock 26 | 27 | # -- General configuration --------------------------------------------- 28 | 29 | # If your documentation needs a minimal Sphinx version, state it here. 30 | # 31 | # needs_sphinx = '1.0' 32 | 33 | # Add any Sphinx extension module names here, as strings. They can be 34 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 35 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] 36 | 37 | # autodoc config 38 | autoclass_content = 'both' 39 | add_module_names = False 40 | autodoc_member_order = 'bysource' 41 | 42 | # Add any paths that contain templates here, relative to this directory. 43 | templates_path = ['_templates'] 44 | 45 | # The suffix(es) of source filenames. 46 | # You can specify multiple suffix as a list of string: 47 | # 48 | # source_suffix = ['.rst', '.md'] 49 | source_suffix = '.rst' 50 | 51 | # The master toctree document. 52 | master_doc = 'index' 53 | 54 | # General information about the project. 55 | project = u'Python DynamoDB Lock' 56 | copyright = u"2018, Mohan Kishore" 57 | author = u"Mohan Kishore" 58 | 59 | # The version info for the project you're documenting, acts as replacement 60 | # for |version| and |release|, also used in various other places throughout 61 | # the built documents. 62 | # 63 | # The short X.Y version. 64 | version = python_dynamodb_lock.__version__ 65 | # The full version, including alpha/beta/rc tags. 66 | release = python_dynamodb_lock.__version__ 67 | 68 | # The language for content autogenerated by Sphinx. Refer to documentation 69 | # for a list of supported languages. 70 | # 71 | # This is also used if you do content translation via gettext catalogs. 72 | # Usually you set "language" from the command line for these cases. 73 | language = None 74 | 75 | # List of patterns, relative to source directory, that match files and 76 | # directories to ignore when looking for source files. 77 | # This patterns also effect to html_static_path and html_extra_path 78 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 79 | 80 | # The name of the Pygments (syntax highlighting) style to use. 81 | pygments_style = 'sphinx' 82 | 83 | # If true, `todo` and `todoList` produce output, else they produce nothing. 84 | todo_include_todos = False 85 | 86 | 87 | # -- Options for HTML output ------------------------------------------- 88 | 89 | # The theme to use for HTML and HTML Help pages. See the documentation for 90 | # a list of builtin themes. 91 | # 92 | html_theme = 'sphinx_rtd_theme' 93 | 94 | # Theme options are theme-specific and customize the look and feel of a 95 | # theme further. For a list of options available for each theme, see the 96 | # documentation. 97 | # 98 | # html_theme_options = {} 99 | 100 | # Add any paths that contain custom static files (such as style sheets) here, 101 | # relative to this directory. They are copied after the builtin static files, 102 | # so a file named "default.css" will overwrite the builtin "default.css". 103 | html_static_path = ['_static'] 104 | 105 | 106 | # -- Options for HTMLHelp output --------------------------------------- 107 | 108 | # Output file base name for HTML help builder. 109 | htmlhelp_basename = 'python_dynamodb_lockdoc' 110 | 111 | 112 | # -- Options for LaTeX output ------------------------------------------ 113 | 114 | latex_elements = { 115 | # The paper size ('letterpaper' or 'a4paper'). 116 | # 117 | # 'papersize': 'letterpaper', 118 | 119 | # The font size ('10pt', '11pt' or '12pt'). 120 | # 121 | # 'pointsize': '10pt', 122 | 123 | # Additional stuff for the LaTeX preamble. 124 | # 125 | # 'preamble': '', 126 | 127 | # Latex figure (float) alignment 128 | # 129 | # 'figure_align': 'htbp', 130 | } 131 | 132 | # Grouping the document tree into LaTeX files. List of tuples 133 | # (source start file, target name, title, author, documentclass 134 | # [howto, manual, or own class]). 135 | latex_documents = [ 136 | (master_doc, 'python_dynamodb_lock.tex', 137 | u'Python DynamoDB Lock Documentation', 138 | u'Mohan Kishore', 'manual'), 139 | ] 140 | 141 | 142 | # -- Options for manual page output ------------------------------------ 143 | 144 | # One entry per manual page. List of tuples 145 | # (source start file, name, description, authors, manual section). 146 | man_pages = [ 147 | (master_doc, 'python_dynamodb_lock', 148 | u'Python DynamoDB Lock Documentation', 149 | [author], 1) 150 | ] 151 | 152 | 153 | # -- Options for Texinfo output ---------------------------------------- 154 | 155 | # Grouping the document tree into Texinfo files. List of tuples 156 | # (source start file, target name, title, author, 157 | # dir menu entry, description, category) 158 | texinfo_documents = [ 159 | (master_doc, 'python_dynamodb_lock', 160 | u'Python DynamoDB Lock Documentation', 161 | author, 162 | 'python_dynamodb_lock', 163 | 'One line description of project.', 164 | 'Miscellaneous'), 165 | ] 166 | 167 | 168 | 169 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /docs/history.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../HISTORY.rst 2 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to Python DynamoDB Lock's documentation! 2 | ====================================== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :caption: Contents: 7 | 8 | readme 9 | installation 10 | usage 11 | python_dynamodb_lock 12 | contributing 13 | authors 14 | history 15 | 16 | Indices and tables 17 | ================== 18 | * :ref:`genindex` 19 | * :ref:`modindex` 20 | * :ref:`search` 21 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: shell 2 | 3 | ============ 4 | Installation 5 | ============ 6 | 7 | 8 | Stable release 9 | -------------- 10 | 11 | To install Python DynamoDB Lock, run this command in your terminal: 12 | 13 | .. code-block:: console 14 | 15 | $ pip install python_dynamodb_lock 16 | 17 | This is the preferred method to install Python DynamoDB Lock, as it will always install the most recent stable release. 18 | 19 | If you don't have `pip`_ installed, this `Python installation guide`_ can guide 20 | you through the process. 21 | 22 | .. _pip: https://pip.pypa.io 23 | .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ 24 | 25 | 26 | From sources 27 | ------------ 28 | 29 | The sources for Python DynamoDB Lock can be downloaded from the `Github repo`_. 30 | 31 | You can either clone the public repository: 32 | 33 | .. code-block:: console 34 | 35 | $ git clone git://github.com/mohankishore/python_dynamodb_lock 36 | 37 | Or download the `tarball`_: 38 | 39 | .. code-block:: console 40 | 41 | $ curl -OL https://github.com/mohankishore/python_dynamodb_lock/tarball/master 42 | 43 | Once you have a copy of the source, you can install it with: 44 | 45 | .. code-block:: console 46 | 47 | $ python setup.py install 48 | 49 | 50 | .. _Github repo: https://github.com/mohankishore/python_dynamodb_lock 51 | .. _tarball: https://github.com/mohankishore/python_dynamodb_lock/tarball/master 52 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=python -msphinx 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | set SPHINXPROJ=python_dynamodb_lock 13 | 14 | if "%1" == "" goto help 15 | 16 | %SPHINXBUILD% >NUL 2>NUL 17 | if errorlevel 9009 ( 18 | echo. 19 | echo.The Sphinx module was not found. Make sure you have Sphinx installed, 20 | echo.then set the SPHINXBUILD environment variable to point to the full 21 | echo.path of the 'sphinx-build' executable. Alternatively you may add the 22 | echo.Sphinx directory to PATH. 23 | echo. 24 | echo.If you don't have Sphinx installed, grab it from 25 | echo.http://sphinx-doc.org/ 26 | exit /b 1 27 | ) 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/python_dynamodb_lock.rst: -------------------------------------------------------------------------------- 1 | python\_dynamodb\_lock package 2 | ============================== 3 | 4 | .. automodule:: python_dynamodb_lock 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | 9 | 10 | python\_dynamodb\_lock module 11 | ----------------------------- 12 | 13 | .. automodule:: python_dynamodb_lock.python_dynamodb_lock 14 | :members: 15 | :undoc-members: 16 | :show-inheritance: 17 | 18 | -------------------------------------------------------------------------------- /docs/readme.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | Usage 2 | ===== 3 | 4 | To use Python DynamoDB Lock in a project:: 5 | 6 | from python_dynamodb_lock.python_dynamodb_lock import * 7 | 8 | 9 | Basic Usage 10 | ----------- 11 | 12 | You would typically create (and shutdown) the DynamoDBLockClient at the application startup 13 | and shutdown:: 14 | 15 | # get a reference to the DynamoDB resource 16 | dynamodb_resource = boto3.resource('dynamodb') 17 | 18 | # create the lock-client 19 | lock_client = DynamoDBLockClient(dynamodb_resource) 20 | 21 | ... 22 | 23 | # close the lock_client 24 | lock_client.close() 25 | 26 | 27 | Then, you would wrap the lock acquisition and release around the code-block that needs to be 28 | protected by a mutex:: 29 | 30 | # acquire the lock 31 | lock = lock_client.acquire_lock('my_key') 32 | 33 | # ... app logic that requires the lock ... 34 | 35 | # release the lock after you are done 36 | lock.release() 37 | 38 | 39 | Both the lock_client constructor and the acquire_lock method support numerous arguments to help 40 | control/customize the behavior. Please look at the :doc:`API documentation <./python_dynamodb_lock>` 41 | for more details. 42 | 43 | 44 | Context Management 45 | ------------------ 46 | The DynamoDBLock class implements the context-management interface and you can auto-release the 47 | lock by doing something like this:: 48 | 49 | with lock_client.acquire_lock('my_key'): 50 | # ... app logic that requires the lock ... 51 | 52 | 53 | Table Creation 54 | -------------- 55 | The DynamoDBLockClient provides a helper class-method to create the table in DynamoDB:: 56 | 57 | # get a reference to the DynamoDB client 58 | ddb_client = boto3.client('dynamodb') 59 | 60 | # create the table 61 | DynamoDBLockClient.create_dynamodb_table(ddb_client) 62 | 63 | The above code snippet will create a table with the default name, partition/sort-key column-names, 64 | read/write througput, but the method supports optional parameters to configure all of these. 65 | 66 | That said, you can always create the table offline (e.g. using the AWS console) and use whatever 67 | table and column names you wish. Please do remember to setup the TTL attribute to enable auto-deleting 68 | of old/abandoned locks. 69 | 70 | 71 | Error-Handling 72 | -------------- 73 | 74 | There are a lot of things that can go wrong when dealing with distributed systems - the library 75 | tries to strike the right balance between hiding these errors, and allowing the library to handle 76 | specific kinds of errors as needed. Let's go through the different use-cases one at a time. 77 | 78 | 79 | Lock Acquisition 80 | ~~~~~~~~~~~~~~~~ 81 | 82 | This is a synchronous use-case where the caller is waiting till it receives a lock. In this case, 83 | most of the errors are wrapped inside a DynamoDBError and raised up to the caller. The key error 84 | scenarios are the following: 85 | 86 | * **Some other client holds the lock** 87 | * This is not treated as real error scenario. This client would just wait for a configurable 88 | retry_period, and then try to acquire the lock again. 89 | * **Race-condition amongst multiple lock-clients waiting to acquire lock** 90 | * Whenever the "old" lock is released (or expires), there may be multiple "new" clients trying 91 | to grab the lock - in which case, one of those would succeed, and the rest of them would get 92 | a DynamoDB's ConditionalUpdateException. This is also not treated as a real error scenario, and 93 | the client would just wait for the retry_period and then try again. 94 | * **This client goes over the configurable retry_timeout period** 95 | * After repeated retry attempts, this client might eventually go over the retry_timeout period 96 | (as provided by the caller) - then, a DynamoDBLockError with code == ACQUIRE_TIMEOUT will be thrown. 97 | * **Any other error/exception** 98 | * Any other error would be wrapped inside a DynamoDBLockError with code == UNKNOWN_ERROR and raised 99 | to the caller. 100 | 101 | 102 | Lock Release 103 | ~~~~~~~~~~~~ 104 | 105 | While this is also a synchronous use-case, in most cases, by the time this method is called, the caller 106 | would have already committed his application-data changes, and would not have real rollback options. 107 | Therefore, this method defaults to the best_effort mode, where it will try to release the lock properly, 108 | but will log and swallow any exceptions encountered in the process. But, for the callers that are interested 109 | in being notified of the errors, they can pass in best_effort=False and have all the errors wrapped inside 110 | a DynamoDBLockError and raised up to them. The specific error scenarios could be one of the below: 111 | 112 | * **This client does not own the lock** 113 | * This can happen if the caller tries to use this client to release a lock owned by some other client. 114 | The client will raise a DynamoDBLockError with code == LOCK_NOT_OWNED. 115 | * **The lock was stolen by some other client** 116 | * This should typically not happen unless someone messes with the back-end DynamoDB table directly. The 117 | client will raise a DynamoDBLockError with code == LOCK_STOLEN. 118 | * **Any other error/exception** 119 | * Any other error would be wrapped inside a DynamoDBLockError with code == UNKNOWN_ERROR and raised 120 | to the caller. 121 | 122 | 123 | Lock Heartbeat 124 | ~~~~~~~~~~~~~~ 125 | 126 | This is an asynchronous use-case, where the caller is not directly available to handle any errors. To handle 127 | any error scenarios encountered while sending a heartbeat for a given lock, the client allows the caller to 128 | pass in an app_callback function at the time of acquiring the lock. 129 | 130 | * **The lock was stolen by some other client** 131 | * This should typically not happen unless someone messes with the back-end DynamoDB table directly. The 132 | client will call the app_callback with code == LOCK_STOLEN. The callback is expected to terminate the 133 | related application processing and rollback any changes made under this lock's protection. 134 | * **The lock has entered the danger zone** 135 | * If the send_heartbeat call for a given lock fails multiple times, the lock could go over the configurable 136 | safe_period. The client will call the app_callback with code == LOCK_IN_DANGER. The callback is expected 137 | to complete/terminate the related application processing, and call the lock.release() as soon as possible. 138 | 139 | Note: it is worth noting that the client spins up two separate threads - one to send out the heartbeats, and 140 | another one to check the lock-statuses. For whatever reason, if the send_heartbeat calls start hanging or 141 | taking too long, the other thread will allow the client to notify the app about the locks getting into the 142 | danger-zone. The actual app_callbacks are executed on a dedicated ThreadPoolExecutor. 143 | 144 | 145 | Client Close 146 | ~~~~~~~~~~~~ 147 | 148 | By default, the lock_client.close() will NOT release all the locks - as releasing the locks prematurely while the 149 | application is still making changes assuming that it has the lock can be dangerous. As soon as a lock is released 150 | by this client, some other client may pick it up, and the associated app may start processing the underlying 151 | business entity in parallel. 152 | 153 | It is highly recommended that the application manage its shutdown-lifecycle such that all the worker threads 154 | operating under these locks are first terminated (committed or rolled-back), the corresponding locks released 155 | (one at a time - by each worker thread), and then the lock_client.close() method is called. Alternatively, consider 156 | letting the process die without releasing all the locks - they will be auto-released when their lease runs out 157 | after a while. 158 | 159 | That said, if the caller does wish to release all locks when closing the lock_client, it can pass in release_locks=True 160 | argument when invoking the close() method. Please note that all the locks are released in the best_effort mode - 161 | i.e. all the errors will be logged and swallowed. 162 | 163 | 164 | Process Termination 165 | ~~~~~~~~~~~~~~~~~~~ 166 | 167 | A sudden process termination would leave the locks frozen with the values as of their last heartbeat. These locks 168 | will go through one of the following scenarios: 169 | 170 | * **Eventual expiry - as per the TTL attribute** 171 | * Each lock has a TTL attribute (named 'expiry_time' by default) - which stores the timestamp (as epoch) after 172 | which it is eligible for auto-deletion by DynamoDB. This deletion does not have a fixed SLA - but will likley 173 | happen over the next 24 hours after the lock expires. 174 | * **Some other client tries to acquire the lock** 175 | * The client will treat the lock as an active lock - and will wait for a period equal to its lease_duration from 176 | the point it first sees the lock. This does need the acquire_lock call to be made with a retry_period larger 177 | than the lease_duration of the lock - otherwise, the acquire_lock call will timeout before the lease expires. 178 | 179 | 180 | Throughput Provisioning 181 | ----------------------- 182 | 183 | Whenever using DynamoDB, you have to think about how much read and write throughput you need to provision for your 184 | table. The DynamoDBLockClient makes the following calls to DynamoDB: 185 | 186 | * **acquire_lock** 187 | * ``get_item``: at least once per lock, and more often if there is lock contention and the lock_client needs to 188 | retry multiple times before acquiring the lock. 189 | * ``put_item``: typically once per lock - whenever the lock becomes available. 190 | * ``update_item``: should be fairly rare - only needed when this client needs to take over an abandoned lock. 191 | * So, the write throughput should be directly proportional to the applications need to acquire locks, but the 192 | read throughput is a little harder to predict - it can be more sensitive to the lock contention at runtime. 193 | * **release_lock** 194 | * ``delete_item``: once per lock 195 | * So, assuming that every lock that is acquired will be released, this is also directly proportional to the 196 | application's lock acquition TPS. 197 | * **send_heartbeat** 198 | * ``update_item``: the lock client supports a deterministic model where the caller can pass in a TPS value, and 199 | the client will honor the same when making the heartbeat calls. Alternatively, the client also supports an 200 | "adaptive" mode (the default), where it will take all the active locks at the beginning of each heartbeat_period 201 | and spread their individual heartbeat calls evenly across the whole period. 202 | 203 | 204 | Differences from Java implementation 205 | ------------------------------------ 206 | 207 | As indicated before, this library derives most of its design from the 208 | `dynamo-db-lock `_ (Java) module. This section goes over few details 209 | where this library goes a slightly different way: 210 | 211 | * **Added suport for DynadmoDB TTL attribute** 212 | * Since Feb 2017, DynamoDB supports having the tables designate one of the attributes as a TTL attribute - 213 | containing an epoch timestamp value. Once the current time goes past that value, that row becomes eligible 214 | for automated deletion by DynamoDB. These deletes do not incur any additional costs and help keep the table 215 | clean of old/stale entries. 216 | * **Dropped support for lock retention after release** 217 | * The java library supports an additional lock-attribute called "deleteOnRelease" - which allows the caller to 218 | control whether the lock, on its release, should be deleted or just marked as released. This python module 219 | drops that flexibility, and always deletes the lock on release. The idea is to not try and treat the lock 220 | table as a general purpose data-store, and treat it as a persistent representation of the "currently active 221 | locks". 222 | * **Dropped support for BLOB data field** 223 | * The java library supports a byte[] field called 'data' in addition to supporting arbitrary named fields to 224 | be stored along with any lock. This python module drops that additional data field - with the understanding 225 | that any additional data that the app wishes to store, can be passed in as part of the additional_attributes 226 | map/dict that is already supported. 227 | * **Separate lock classes to represent local vs remote locks** 228 | * The java library uses the same LockItem class to represent both the locks created/acquired by this client as 229 | well as the locks loaded from the database (currently held by other clients). This results in confusing 230 | overloading of fields e.g. the "lookupTime" is overloaded to store the "lastUpdatedTime" for the locks owned 231 | by this client, and the "lastLookupTime" for the locks owned by other clients. 232 | * **Added support for explicit and adaptive heartbeat TPS** 233 | * The java library would fire off the heartbeat updates for all the active locks one-after-another - as fast as 234 | it can, and then wait till the end ot the heartbeat_period, and then do the same thing over. This can result 235 | in significant write TPS is the application has a lot (say ~100) active locks. This python module allows the 236 | caller to specific an explicit TPS value, or use an adaptive mode - where the heartbeats are evenly spread 237 | over the whole heatbeat_period. 238 | * **Different callback model** 239 | * The java library creates a different thread for each lock that wishes to support "session-monitors". This 240 | python module uses a single thread (separate from the one used to send heartbeats) to periodically check that 241 | the locks are being "heartbeat"-ed and if needed, use a ThreadPoolExecutor to invoke the app_callbacks. 242 | * **Uses retry_period/retry_timeout arguments instead of refreshPeriod/additionalTimeToWait** 243 | * Though the logic is pretty much the same, the names are a little clearer about the intent - the "retry_period" 244 | controls how long the client waits before retrying a previously failed lock acquisition, and "retry_timeout" 245 | controls how long the client keeps retrying before giving up and raising an error. 246 | * **Simplified sort-key handling** 247 | * The java library goes to great lengths to support the caller's ability to use a simple hash-partitioned table 248 | as well as a hash-and-range partitioned table. This python module drops the support for hash-partitioned 249 | tables, and instead chooses to use a default sort-key of '-' to simplify the implementation. 250 | * **Lock release best_effort mode** 251 | * The java library defaults to best_effort == False, whereas this python module defaults to True. i.e. trying 252 | to release a lock without choosing an explicit "best_effort" setting, could result in Exceptions being 253 | thrown in Java, but would be silently logged+swallowed in Python. 254 | * **Releasing all locks on client code** 255 | * The java library will always try to release all locks when closing the lock_client. This python module will 256 | default to NOT releasing the locks on lock_client closure - but does support an optional argument called 257 | "release_locks" that will allow the caller to request lock releases. The idea behind this is that it is not 258 | a safe operation to release the locks without considering the application threads that could continue to 259 | process under the assumption that they hold a lock on the underlying business entity. Making the caller 260 | request the lock-release explicitly is meant to encourage them to try and wind up the application processing 261 | first and release the locks first, before trying to close the lock_client. 262 | * **Dropped/Missing support for AWS RequestMetricCollector** 263 | * The java library has pervasive support for collecting the AWS request metrics. This python module does not 264 | (yet) support this capability. 265 | 266 | -------------------------------------------------------------------------------- /python_dynamodb_lock/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """The package contains a single module - with the same name i.e. python_dynamodb_lock""" 4 | 5 | __author__ = """Mohan Kishore""" 6 | __email__ = 'mohankishore@yahoo.com' 7 | __version__ = '0.9.1' 8 | __copyright__ = 'Copyright (C) 2018 Mohan Kishore' 9 | -------------------------------------------------------------------------------- /python_dynamodb_lock/python_dynamodb_lock.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | This is a general purpose distributed locking library built on top of DynamoDB. It is heavily 5 | "inspired" by the java-based AmazonDynamoDBLockClient library, and supports both coarse-grained 6 | and fine-grained locking. 7 | """ 8 | 9 | from botocore.exceptions import ClientError 10 | from concurrent.futures import ThreadPoolExecutor 11 | import datetime 12 | from decimal import Decimal 13 | import logging 14 | import socket 15 | import time 16 | import threading 17 | from urllib.parse import quote 18 | import uuid 19 | 20 | # module level logger 21 | logger = logging.getLogger(__name__) 22 | 23 | 24 | class DynamoDBLockClient: 25 | """ 26 | Provides distributed locks using DynamoDB's support for conditional reads/writes. 27 | """ 28 | 29 | # default values for class properties 30 | _DEFAULT_TABLE_NAME = 'DynamoDBLockTable' 31 | _DEFAULT_PARTITION_KEY_NAME = 'lock_key' 32 | _DEFAULT_SORT_KEY_NAME = 'sort_key' 33 | _DEFAULT_TTL_ATTRIBUTE_NAME = 'expiry_time' 34 | _DEFAULT_HEARTBEAT_PERIOD = datetime.timedelta(seconds=5) 35 | _DEFAULT_SAFE_PERIOD = datetime.timedelta(seconds=20) 36 | _DEFAULT_LEASE_DURATION = datetime.timedelta(seconds=30) 37 | _DEFAULT_EXPIRY_PERIOD = datetime.timedelta(hours=1) 38 | _DEFAULT_HEARTBEAT_TPS = -1 39 | _DEFAULT_APP_CALLBACK_THREADPOOL_SIZE = 5 40 | # for optional create-table method 41 | _DEFAULT_READ_CAPACITY = 5 42 | _DEFAULT_WRITE_CAPACITY = 5 43 | 44 | # to help make the sort-key optional 45 | _DEFAULT_SORT_KEY_VALUE = '-' 46 | 47 | # DynamoDB "hard-coded" column names 48 | _COL_OWNER_NAME = 'owner_name' 49 | _COL_LEASE_DURATION = 'lease_duration' 50 | _COL_RECORD_VERSION_NUMBER = 'record_version_number' 51 | 52 | 53 | def __init__(self, 54 | dynamodb_resource, 55 | table_name=_DEFAULT_TABLE_NAME, 56 | partition_key_name=_DEFAULT_PARTITION_KEY_NAME, 57 | sort_key_name=_DEFAULT_SORT_KEY_NAME, 58 | ttl_attribute_name=_DEFAULT_TTL_ATTRIBUTE_NAME, 59 | owner_name=None, 60 | heartbeat_period=_DEFAULT_HEARTBEAT_PERIOD, 61 | safe_period=_DEFAULT_SAFE_PERIOD, 62 | lease_duration=_DEFAULT_LEASE_DURATION, 63 | expiry_period=_DEFAULT_EXPIRY_PERIOD, 64 | heartbeat_tps=_DEFAULT_HEARTBEAT_TPS, 65 | app_callback_executor=None 66 | ): 67 | """ 68 | :param boto3.ServiceResource dynamodb_resource: mandatory argument 69 | :param str table_name: defaults to 'DynamoDBLockTable' 70 | :param str partition_key_name: defaults to 'lock_key' 71 | :param str sort_key_name: defaults to 'sort_key' 72 | :param str ttl_attribute_name: defaults to 'expiry_time' 73 | :param str owner_name: defaults to hostname + _uuid 74 | :param datetime.timedelta heartbeat_period: How often to update DynamoDB to note that the 75 | instance is still running. It is recommended to make this at least 4 times smaller 76 | than the leaseDuration. Defaults to 5 seconds. 77 | :param datetime.timedelta safe_period: How long is it okay to go without a heartbeat before 78 | considering a lock to be in "danger". Defaults to 20 seconds. 79 | :param datetime.timedelta lease_duration: The length of time that the lease for the lock 80 | will be granted for. i.e. if there is no heartbeat for this period of time, then 81 | the lock will be considered as expired. Defaults to 30 seconds. 82 | :param datetime.timedelta expiry_period: The fallback expiry timestamp to allow DynamoDB 83 | to cleanup old locks after a server crash. This value should be significantly larger 84 | than the _lease_duration to ensure that clock-skew etc. are not an issue. Defaults 85 | to 1 hour. 86 | :param int heartbeat_tps: The number of heartbeats to execute per second (per node) - this 87 | will have direct correlation to DynamoDB provisioned throughput for writes. If set 88 | to -1, the client will distribute the heartbeat calls evenly over the _heartbeat_period 89 | - which uses lower throughput for smaller number of locks. However, if you want a more 90 | deterministic heartbeat-call-rate, then specify an explicit TPS value. Defaults to -1. 91 | :param ThreadPoolExecutor app_callback_executor: The executor to be used for invoking the 92 | app_callbacks in case of un-expected errors. Defaults to a ThreadPoolExecutor with a 93 | maximum of 5 threads. 94 | """ 95 | self._uuid = uuid.uuid4().hex 96 | self._dynamodb_resource = dynamodb_resource 97 | self._table_name = table_name 98 | self._partition_key_name = partition_key_name 99 | self._sort_key_name = sort_key_name 100 | self._ttl_attribute_name = ttl_attribute_name 101 | self._owner_name = owner_name or (socket.getfqdn() + self._uuid) 102 | self._heartbeat_period = heartbeat_period 103 | self._safe_period = safe_period 104 | self._lease_duration = lease_duration 105 | self._expiry_period = expiry_period 106 | self._heartbeat_tps = heartbeat_tps 107 | self._app_callback_executor = app_callback_executor or ThreadPoolExecutor( 108 | max_workers=self._DEFAULT_APP_CALLBACK_THREADPOOL_SIZE, 109 | thread_name_prefix='DynamoDBLockClient-AC-' + self._uuid + "-" 110 | ) 111 | # additional properties 112 | self._locks = {} 113 | self._shutting_down = False 114 | self._dynamodb_table = dynamodb_resource.Table(table_name) 115 | # and, initialization 116 | self._start_heartbeat_sender_thread() 117 | self._start_heartbeat_checker_thread() 118 | logger.info('Created: %s', str(self)) 119 | 120 | 121 | def _start_heartbeat_sender_thread(self): 122 | """ 123 | Creates and starts a daemon thread - that sends out periodic heartbeats for the active locks 124 | """ 125 | self._heartbeat_sender_thread = threading.Thread( 126 | name='DynamoDBLockClient-HS-' + self._uuid, 127 | target=self._send_heartbeat_loop 128 | ) 129 | self._heartbeat_sender_thread.daemon = True 130 | self._heartbeat_sender_thread.start() 131 | logger.info('Started the heartbeat-sender thread: %s', str(self._heartbeat_sender_thread)) 132 | 133 | 134 | def _send_heartbeat_loop(self): 135 | """ 136 | Keeps renewing the leases for the locks owned by this client - till the client is closed. 137 | 138 | The method has a while loop that wakes up on a periodic basis (as defined by the _heartbeat_period) 139 | and invokes the _send_heartbeat() method on each lock. It spreads the heartbeat-calls evenly over 140 | the heartbeat window - to minimize the DynamoDB write throughput requirements. 141 | """ 142 | while not self._shutting_down: 143 | logger.info('Starting a send_heartbeat loop') 144 | start_time = time.monotonic() 145 | locks = self._locks.copy() 146 | 147 | avg_loop_time = 1.0 / self._heartbeat_tps 148 | if self._heartbeat_tps == -1: 149 | # use an "adaptive" algorithm if the TPS is set to -1 150 | avg_loop_time = self._heartbeat_period.total_seconds() / len(locks) if locks else -1.0 151 | 152 | count = 0 153 | for uid, lock in locks.items(): 154 | count += 1 155 | self._send_heartbeat(lock) 156 | # After each lock, sleep a little (if needed) to honor the _heartbeat_tps 157 | curr_loop_end_time = time.monotonic() 158 | next_loop_start_time = start_time + count * avg_loop_time 159 | if curr_loop_end_time < next_loop_start_time: 160 | time.sleep( next_loop_start_time - curr_loop_end_time ) 161 | 162 | # After all the locks have been "heartbeat"-ed, sleep before the next run (if needed) 163 | logger.info('Finished the send_heartbeat loop') 164 | end_time = time.monotonic() 165 | next_start_time = start_time + self._heartbeat_period.total_seconds() 166 | if end_time < next_start_time and not self._shutting_down: 167 | time.sleep( next_start_time - end_time ) 168 | elif end_time > next_start_time + avg_loop_time: 169 | logger.warning('Sending heartbeats for all the locks took longer than the _heartbeat_period') 170 | 171 | 172 | def _send_heartbeat(self, lock): 173 | """ 174 | Renews the lease for the given lock. 175 | 176 | It actually just switches the record_version_number on the existing lock - which tells 177 | all other clients waiting for this lock that the current owner is still alive, and they 178 | effectively reset their timers (to wait for _lease_duration from the time they see this 179 | new record_version_number). 180 | 181 | As this method is called on a background thread, it uses the app_callback to let the 182 | (lock requestor) app know when there are significant events in the lock lifecycle. 183 | 184 | 1) LOCK_STOLEN 185 | When the heartbeat process finds that someone else has taken over the lock, 186 | or it has been released/deleted without the lock-client's knowledge. In this case, the 187 | app_callback should just try to abort its processing and roll back any changes it had 188 | made with the assumption that it owned the lock. This is not a normal occurrance and 189 | should only happen if someone manually changes/deletes the data in DynamoDB. 190 | 191 | :param DynamoDBLock lock: the lock instance that needs its lease to be renewed 192 | """ 193 | logger.info('Sending a DynamoDBLock heartbeat: %s', lock.unique_identifier) 194 | with lock.thread_lock: 195 | try: 196 | # the ddb-lock might have been released while waiting for the thread-lock 197 | if lock.unique_identifier not in self._locks: return 198 | 199 | # skip if the lock is not in the LOCKED state 200 | if lock.status != DynamoDBLock.LOCKED: 201 | logger.info('Skipping the heartbeat as the lock is not locked any more: %s', lock.status) 202 | return 203 | 204 | old_record_version_number = lock.record_version_number 205 | new_record_version_number = str(uuid.uuid4()) 206 | new_expiry_time = int(time.time() + self._expiry_period.total_seconds()) 207 | 208 | # first, try to update the database 209 | self._dynamodb_table.update_item( 210 | Key={ 211 | self._partition_key_name: lock.partition_key, 212 | self._sort_key_name: lock.sort_key 213 | }, 214 | UpdateExpression='SET #rvn = :new_rvn, #et = :new_et', 215 | ConditionExpression='attribute_exists(#pk) AND attribute_exists(#sk) AND #rvn = :old_rvn', 216 | ExpressionAttributeNames={ 217 | '#pk': self._partition_key_name, 218 | '#sk': self._sort_key_name, 219 | '#rvn': self._COL_RECORD_VERSION_NUMBER, 220 | '#et': self._ttl_attribute_name, 221 | }, 222 | ExpressionAttributeValues={ 223 | ':old_rvn': old_record_version_number, 224 | ':new_rvn': new_record_version_number, 225 | ':new_et': new_expiry_time, 226 | } 227 | ) 228 | 229 | # if successful, update the in-memory lock representations 230 | lock.record_version_number = new_record_version_number 231 | lock.expiry_time = new_expiry_time 232 | lock.last_updated_time = time.monotonic() 233 | lock.status = DynamoDBLock.LOCKED 234 | logger.debug('Successfully sent the heartbeat: %s', lock.unique_identifier) 235 | except ClientError as e: 236 | if e.response['Error']['Code'] == 'ConditionalCheckFailedException': 237 | # someone else stole our lock! 238 | logger.warning('LockStolenError while sending heartbeat: %s', lock.unique_identifier) 239 | # let's mark the in-memory lock representation as invalid 240 | lock.status = DynamoDBLock.INVALID 241 | # let's drop it from our in-memory collection as well 242 | del self._locks[lock.unique_identifier] 243 | # callback - the app should abort its processing; no need to release 244 | self._call_app_callback(lock, DynamoDBLockError.LOCK_STOLEN) 245 | else: 246 | logger.warning('ClientError while sending heartbeat: %s', lock.unique_identifier, exc_info=True) 247 | except Exception: 248 | logger.warning('Unexpected error while sending heartbeat: %s', lock.unique_identifier, exc_info=True) 249 | 250 | 251 | def _start_heartbeat_checker_thread(self): 252 | """ 253 | Creates and starts a daemon thread - that checks that the locks are heartbeat-ing as expected 254 | """ 255 | self._heartbeat_checker_thread = threading.Thread( 256 | name='DynamoDBLockClient-HC-' + self._uuid, 257 | target=self._check_heartbeat_loop 258 | ) 259 | self._heartbeat_checker_thread.daemon = True 260 | self._heartbeat_checker_thread.start() 261 | logger.info('Started the heartbeat-checker thread: %s', str(self._heartbeat_checker_thread)) 262 | 263 | 264 | def _check_heartbeat_loop(self): 265 | """ 266 | Keeps checking the locks to ensure that they are being updated as expected. 267 | 268 | The method has a while loop that wakes up on a periodic basis (as defined by the _heartbeat_period) 269 | and invokes the _check_heartbeat() method on each lock. 270 | """ 271 | while not self._shutting_down: 272 | logger.info('Starting a check_heartbeat loop') 273 | start_time = time.monotonic() 274 | locks = self._locks.copy() 275 | 276 | for uid, lock in locks.items(): 277 | self._check_heartbeat(lock) 278 | 279 | # After all the locks have been "heartbeat"-ed, sleep before the next run (if needed) 280 | logger.info('Finished the check_heartbeat loop') 281 | end_time = time.monotonic() 282 | next_start_time = start_time + self._heartbeat_period.total_seconds() 283 | if end_time < next_start_time and not self._shutting_down: 284 | time.sleep( next_start_time - end_time ) 285 | else: 286 | logger.warning('Checking heartbeats for all the locks took longer than the _heartbeat_period') 287 | 288 | 289 | def _check_heartbeat(self, lock): 290 | """ 291 | Checks that the given lock's lease expiry is within the safe-period. 292 | 293 | As this method is called on a background thread, it uses the app_callback to let the 294 | (lock requestor) app know when there are significant events in the lock lifecycle. 295 | 296 | 1) LOCK_IN_DANGER 297 | When the heartbeat for a given lock has failed multiple times, and it is 298 | now in danger of going past its lease-duration without a successful heartbeat - at which 299 | point, any other client waiting to acquire the lock will consider it abandoned and take 300 | over. In this case, the app_callback should try to expedite the processing, either 301 | commit or rollback its changes quickly, and release the lock. 302 | 303 | :param DynamoDBLock lock: the lock instance that needs its lease to be renewed 304 | """ 305 | logger.info('Checking a DynamoDBLock heartbeat: %s', lock.unique_identifier) 306 | 307 | with lock.thread_lock: 308 | try: 309 | # the ddb-lock might have been released while waiting for the thread-lock 310 | if lock.unique_identifier not in self._locks: return 311 | 312 | # skip if the lock is not in the LOCKED state 313 | if lock.status != DynamoDBLock.LOCKED: 314 | logger.info('Skipping the check as the lock is not locked any more: %s', lock.status) 315 | return 316 | 317 | # if the lock is in danger, invoke the app-callback 318 | safe_period_end_time = lock.last_updated_time + self._safe_period.total_seconds() 319 | if time.monotonic() < safe_period_end_time: 320 | logger.info('Lock is safe: %s', lock.unique_identifier) 321 | # let's leave the lock.status as-is i.e. LOCKED 322 | else: 323 | logger.warning('Lock is in danger: %s', lock.unique_identifier) 324 | # let's flag the in-memory instance as being in danger 325 | lock.status = DynamoDBLock.IN_DANGER 326 | # callback - the app should abort its processing, and release the lock 327 | self._call_app_callback(lock, DynamoDBLockError.LOCK_IN_DANGER) 328 | 329 | logger.debug('Successfully checked the heartbeat: %s', lock.unique_identifier) 330 | except Exception: 331 | logger.warning('Unexpected error while checking heartbeat: %s', lock.unique_identifier, exc_info=True) 332 | 333 | 334 | def _call_app_callback(self, lock, code): 335 | """ 336 | Utility function to route the app_callback through the thread-pool-executor 337 | 338 | :param DynamoDBLock lock: the lock for which the event is being fired 339 | :param str code: the notification event-type 340 | """ 341 | self._app_callback_executor.submit(lock.app_callback, code, lock) 342 | 343 | 344 | def acquire_lock(self, 345 | partition_key, 346 | sort_key=_DEFAULT_SORT_KEY_VALUE, 347 | retry_period=None, 348 | retry_timeout=None, 349 | additional_attributes=None, 350 | app_callback=None, 351 | ): 352 | """ 353 | Acquires a distributed DynaomDBLock for the given key(s). 354 | 355 | If the lock is currently held by a different client, then this client will keep retrying on 356 | a periodic basis. In that case, a few different things can happen: 357 | 358 | 1) The other client releases the lock - basically deleting it from the database 359 | Which would allow this client to try and insert its own record instead. 360 | 2) The other client dies, and the lock stops getting updated by the heartbeat thread. 361 | While waiting for a lock, this client keeps track of the local-time whenever it sees the lock's 362 | record-version-number change. From that point-in-time, it needs to wait for a period of time 363 | equal to the lock's lease duration before concluding that the lock has been abandoned and try 364 | to overwrite the database entry with its own lock. 365 | 3) This client goes over the max-retry-timeout-period 366 | While waiting for the other client to release the lock (or for the lock's lease to expire), this 367 | client may go over the retry_timeout period (as provided by the caller) - in which case, a 368 | DynamoDBLockError with code == ACQUIRE_TIMEOUT will be thrown. 369 | 4) Race-condition amongst multiple lock-clients waiting to acquire lock 370 | Whenever the "old" lock is released (or expires), there may be multiple "new" clients trying 371 | to grab the lock - in which case, one of those would succeed, and the rest of them would get 372 | a "conditional-update-exception". This is just logged and swallowed internally - and the 373 | client moves on to another sleep-retry cycle. 374 | 5) Any other error/exception 375 | Would be wrapped inside a DynamoDBLockError and raised to the caller. 376 | 377 | :param str partition_key: The primary lock identifier 378 | :param str sort_key: Forms a "composite identifier" along with the partition_key. Defaults to '-' 379 | :param datetime.timedelta retry_period: If the lock is not immediately available, how long 380 | should we wait between retries? Defaults to heartbeat_period. 381 | :param datetime.timedelta retry_timeout: If the lock is not available for an extended period, 382 | how long should we keep trying before giving up and timing out? This value should be set 383 | higher than the lease_duration to ensure that other clients can pick up locks abandoned 384 | by one client. Defaults to lease_duration + heartbeat_period. 385 | :param dict additional_attributes: Arbitrary application metadata to be stored with the lock 386 | :param Callable app_callback: Callback function that can be used to notify the app of lock entering 387 | the danger period, or an unexpected release 388 | :rtype: DynamoDBLock 389 | :return: A distributed lock instance 390 | """ 391 | logger.info('Trying to acquire lock for: %s, %s', partition_key, sort_key) 392 | 393 | # plug in default values as needed 394 | if not retry_period: retry_period = self._heartbeat_period 395 | if not retry_timeout: retry_timeout = self._lease_duration + self._heartbeat_period 396 | 397 | # create the "new" lock that needs to be acquired 398 | new_lock = DynamoDBLock( 399 | partition_key=partition_key, 400 | sort_key=sort_key, 401 | owner_name=self._owner_name, 402 | lease_duration=self._lease_duration.total_seconds(), 403 | record_version_number=str( uuid.uuid4() ), 404 | expiry_time=int(time.time() + self._expiry_period.total_seconds()), 405 | additional_attributes=additional_attributes, 406 | app_callback=app_callback, 407 | lock_client=self, 408 | ) 409 | 410 | start_time = time.monotonic() 411 | retry_timeout_time = start_time + retry_timeout.total_seconds() 412 | retry_count = 0 413 | last_record_version_number = None 414 | last_version_fetch_time = -1.0 415 | while True: 416 | if self._shutting_down: 417 | raise DynamoDBLockError(DynamoDBLockError.CLIENT_SHUTDOWN, 'Client already shut down') 418 | 419 | try: 420 | # need to bump up the expiry time - to account for the sleep between tries 421 | new_lock.last_updated_time = time.monotonic() 422 | new_lock.expiry_time = int(time.time() + self._expiry_period.total_seconds()) 423 | 424 | logger.debug('Checking the database for existing owner: %s', new_lock.unique_identifier) 425 | existing_lock = self._get_lock_from_dynamodb(partition_key, sort_key) 426 | 427 | if existing_lock is None: 428 | logger.debug('No existing lock - attempting to add one: %s', new_lock.unique_identifier) 429 | self._add_new_lock_to_dynamodb(new_lock) 430 | logger.debug('Added to the DDB. Adding to in-memory map: %s', new_lock.unique_identifier) 431 | new_lock.status = DynamoDBLock.LOCKED 432 | self._locks[new_lock.unique_identifier] = new_lock 433 | logger.info('Successfully added a new lock: %s', str(new_lock)) 434 | return new_lock 435 | else: 436 | if existing_lock.record_version_number != last_record_version_number: 437 | logger.debug('Existing lock\'s record_version_number changed: %s, %s, %s', 438 | new_lock.unique_identifier, 439 | last_record_version_number, 440 | existing_lock.record_version_number) 441 | # if the record_version_number changes, the lock gets a fresh lease of life 442 | # keep track of the time we first saw this record_version_number 443 | last_record_version_number = existing_lock.record_version_number 444 | last_version_fetch_time = time.monotonic() 445 | else: 446 | logger.debug('Existing lock\'s record_version_number has not changed: %s, %s', 447 | new_lock.unique_identifier, 448 | last_record_version_number) 449 | # if the record_version_number has not changed for more than _lease_duration period, 450 | # it basically means that the owner thread/process has died. 451 | last_version_elapsed_time = time.monotonic() - last_version_fetch_time 452 | if last_version_elapsed_time > existing_lock.lease_duration: 453 | logger.warning('Existing lock\'s lease has expired: %s', str(existing_lock)) 454 | self._overwrite_existing_lock_in_dynamodb(new_lock, last_record_version_number) 455 | logger.debug('Added to the DDB. Adding to in-memory map: %s', new_lock.unique_identifier) 456 | new_lock.status = DynamoDBLock.LOCKED 457 | self._locks[new_lock.unique_identifier] = new_lock 458 | logger.info('Successfully updated with the new lock: %s', str(new_lock)) 459 | return new_lock 460 | except ClientError as e: 461 | if e.response['Error']['Code'] == 'ConditionalCheckFailedException': 462 | logger.info( 463 | 'Someone else beat us to it - just log-it, sleep and retry: %s', 464 | new_lock.unique_identifier 465 | ) 466 | else: 467 | raise DynamoDBLockError(DynamoDBLockError.UNKNOWN, str(e)) 468 | except Exception as e: 469 | raise DynamoDBLockError(DynamoDBLockError.UNKNOWN, str(e)) 470 | 471 | # sleep and retry 472 | retry_count += 1 473 | curr_loop_end_time = time.monotonic() 474 | next_loop_start_time = start_time + retry_count * retry_period.total_seconds() 475 | if next_loop_start_time > retry_timeout_time: 476 | raise DynamoDBLockError( 477 | DynamoDBLockError.ACQUIRE_TIMEOUT, 478 | 'acquire_lock() timed out: ' + new_lock.unique_identifier 479 | ) 480 | elif next_loop_start_time > curr_loop_end_time: 481 | logger.info('Sleeping before a retry: %s', new_lock.unique_identifier) 482 | time.sleep(next_loop_start_time - curr_loop_end_time) 483 | 484 | 485 | def release_lock(self, lock, best_effort=True): 486 | """ 487 | Releases the given lock - by deleting it from the database. 488 | 489 | It allows the caller app to indicate whether it wishes to be informed of all errors/exceptions, 490 | or just have the lock-client swallow all of them. A typical usage pattern would include acquiring 491 | the lock, making app changes, and releasing the lock. By the time the app is releasing the lock, 492 | it would generally be too late to respond to any errors encountered during the release phase - but, 493 | the app may still wish to get informed and log it somewhere of offline re-conciliation/follow-up. 494 | 495 | :param DynamoDBLock lock: The lock instance that needs to be released 496 | :param bool best_effort: If True, any exception when calling DynamoDB will be ignored 497 | and the clean up steps will continue, hence the lock item in DynamoDb might not 498 | be updated / deleted but will eventually expire. Defaults to True. 499 | """ 500 | logger.info('Releasing the lock: %s', str(lock)) 501 | 502 | with lock.thread_lock: 503 | try: 504 | # if the lock is not in a locked state, it's a no-op (i.e. released or stolen/invalid) 505 | if lock.status not in [DynamoDBLock.LOCKED, DynamoDBLock.IN_DANGER]: 506 | logger.info('Skipping the release as the lock is not locked any more: %s', lock.status) 507 | return 508 | 509 | # if this client did not create the lock being released 510 | if lock.unique_identifier not in self._locks: 511 | if best_effort: 512 | logger.warning('Lock not owned by this client: %s', str(lock)) 513 | return 514 | else: 515 | raise DynamoDBLockError(DynamoDBLockError.LOCK_NOT_OWNED, 'Lock is not owned by this client') 516 | 517 | # first, remove from in-memory locks - will stop the heartbeats 518 | # even if the database call fails, it will auto-release after the lease expires 519 | lock.status = DynamoDBLock.RELEASED 520 | del self._locks[lock.unique_identifier] 521 | 522 | # then, remove it from the database 523 | self._dynamodb_table.delete_item( 524 | Key={ 525 | self._partition_key_name: lock.partition_key, 526 | self._sort_key_name: lock.sort_key 527 | }, 528 | ConditionExpression='attribute_exists(#pk) AND attribute_exists(#sk) AND #rvn = :rvn', 529 | ExpressionAttributeNames={ 530 | '#pk': self._partition_key_name, 531 | '#sk': self._sort_key_name, 532 | '#rvn': self._COL_RECORD_VERSION_NUMBER, 533 | }, 534 | ExpressionAttributeValues={ 535 | ':rvn': lock.record_version_number, 536 | } 537 | ) 538 | 539 | logger.info('Successfully released the lock: %s', lock.unique_identifier) 540 | except DynamoDBLockError as e: 541 | raise e 542 | except ClientError as e: 543 | if best_effort: 544 | logger.warning('DynamoDb error while releasing lock: %s', lock.unique_identifier, exc_info=True) 545 | elif e.response['Error']['Code'] == 'ConditionalCheckFailedException': 546 | # Note: this is slightly different from the Java impl - which would just returns false 547 | raise DynamoDBLockError(DynamoDBLockError.LOCK_STOLEN, 'Lock was stolen by someone else') 548 | else: 549 | raise DynamoDBLockError(DynamoDBLockError.UNKNOWN, str(e)) 550 | except Exception as e: 551 | if best_effort: 552 | logger.warning('Unknown error while releasing lock: %s', lock.unique_identifier, exc_info=True) 553 | else: 554 | raise DynamoDBLockError(DynamoDBLockError.UNKNOWN, str(e)) 555 | 556 | 557 | def _get_lock_from_dynamodb(self, partition_key, sort_key): 558 | """ 559 | Loads the lock from the database - or returns None if not available. 560 | 561 | :rtype: BaseDynamoDBLock 562 | """ 563 | logger.debug('Getting the lock from dynamodb for: %s, %s', partition_key, sort_key) 564 | result = self._dynamodb_table.get_item( 565 | Key={ 566 | self._partition_key_name: partition_key, 567 | self._sort_key_name: sort_key 568 | }, 569 | ConsistentRead=True 570 | ) 571 | if 'Item' in result: 572 | return self._get_lock_from_item( result['Item'] ) 573 | else: 574 | return None 575 | 576 | 577 | def _add_new_lock_to_dynamodb(self, lock): 578 | """ 579 | Adds a new lock into the database - while checking that it does not exist already. 580 | 581 | :param DynamoDBLock lock: The lock instance that needs to be added to the database. 582 | """ 583 | logger.debug('Adding a new lock: %s', str(lock)) 584 | self._dynamodb_table.put_item( 585 | Item=self._get_item_from_lock(lock), 586 | ConditionExpression='NOT(attribute_exists(#pk) AND attribute_exists(#sk))', 587 | ExpressionAttributeNames={ 588 | '#pk': self._partition_key_name, 589 | '#sk': self._sort_key_name, 590 | }, 591 | ) 592 | 593 | 594 | def _overwrite_existing_lock_in_dynamodb(self, lock, record_version_number): 595 | """ 596 | Overwrites an existing lock in the database - while checking that the version has not changed. 597 | 598 | :param DynamoDBLock lock: The new lock instance that needs to overwrite the old one in the database. 599 | :param str record_version_number: The version-number for the old lock instance in the database. 600 | """ 601 | logger.debug('Overwriting existing-rvn: %s with new lock: %s', record_version_number, str(lock)) 602 | self._dynamodb_table.put_item( 603 | Item=self._get_item_from_lock(lock), 604 | ConditionExpression='attribute_exists(#pk) AND attribute_exists(#sk) AND #rvn = :old_rvn', 605 | ExpressionAttributeNames={ 606 | '#pk': self._partition_key_name, 607 | '#sk': self._sort_key_name, 608 | '#rvn': self._COL_RECORD_VERSION_NUMBER, 609 | }, 610 | ExpressionAttributeValues={ 611 | ':old_rvn': record_version_number, 612 | } 613 | ) 614 | 615 | 616 | def _get_lock_from_item(self, item): 617 | """ 618 | Converts a DynamoDB 'Item' dict to a BaseDynamoDBLock instance 619 | 620 | :param dict item: The DynamoDB 'Item' dict object to be de-serialized. 621 | :rtype: BaseDynamoDBLock 622 | """ 623 | logger.debug('Get lock from item: %s', str(item)) 624 | lock = BaseDynamoDBLock( 625 | partition_key=item.pop(self._partition_key_name), 626 | sort_key=item.pop(self._sort_key_name), 627 | owner_name=item.pop(self._COL_OWNER_NAME), 628 | lease_duration=float(item.pop(self._COL_LEASE_DURATION)), 629 | record_version_number=item.pop(self._COL_RECORD_VERSION_NUMBER), 630 | expiry_time=int(item.pop(self._ttl_attribute_name)), 631 | additional_attributes=item 632 | ) 633 | return lock 634 | 635 | 636 | def _get_item_from_lock(self, lock): 637 | """ 638 | Converts a BaseDynamoDBLock (or subclass) instance to a DynamoDB 'Item' dict 639 | 640 | :param BaseDynamoDBLock lock: The lock instance to be serialized. 641 | :rtype: dict 642 | """ 643 | logger.debug('Get item from lock: %s', str(lock)) 644 | item = lock.additional_attributes.copy() 645 | item.update({ 646 | self._partition_key_name: lock.partition_key, 647 | self._sort_key_name: lock.sort_key, 648 | self._COL_OWNER_NAME: lock.owner_name, 649 | self._COL_LEASE_DURATION: Decimal.from_float(lock.lease_duration), 650 | self._COL_RECORD_VERSION_NUMBER: lock.record_version_number, 651 | self._ttl_attribute_name: lock.expiry_time 652 | }) 653 | return item 654 | 655 | 656 | def _release_all_locks(self): 657 | """ 658 | Iterates over all the locks and releases each one. 659 | """ 660 | logger.info('Releasing all locks: %d', len(self._locks)) 661 | for uid, lock in self._locks.copy().items(): 662 | self.release_lock(lock, best_effort=True) 663 | # TODO: should we fire app-callback to indicate the force-release 664 | # self._call_app_callback(lock, DynamoDBLockError.LOCK_STOLEN) 665 | 666 | 667 | def close(self, release_locks=False): 668 | """ 669 | Shuts down the background thread - and releases all locks if so asked. 670 | 671 | By default, this method will NOT release all the locks - as releasing the locks while 672 | the application is still making changes assuming that it has the lock can be dangerous. 673 | As soon as a lock is released by this client, some other client may pick it up, and the 674 | associated app may start processing the underlying business entity in parallel. 675 | 676 | It is recommended that the application manage its shutdown-lifecycle such that all the 677 | worker threads operating under these locks are first terminated (committed or rolled-back), 678 | the corresponding locks released (one at a time - by each worker thread), and then the 679 | lock_client.close() method is called. Alternatively, consider letting the process die 680 | without releasing all the locks - they will be auto-released when their lease runs out 681 | after a while. 682 | 683 | :param bool release_locks: if True, releases all the locks. Defaults to False. 684 | """ 685 | if self._shutting_down: return 686 | logger.info('Shutting down') 687 | self._shutting_down = True 688 | self._heartbeat_sender_thread.join() 689 | self._heartbeat_checker_thread.join() 690 | if release_locks: self._release_all_locks() 691 | 692 | 693 | def __str__(self): 694 | """ 695 | Returns a readable string representation of this instance. 696 | """ 697 | return '%s::%s' % (self.__class__.__name__, self.__dict__) 698 | 699 | 700 | @classmethod 701 | def create_dynamodb_table(cls, 702 | dynamodb_client, 703 | table_name=_DEFAULT_TABLE_NAME, 704 | partition_key_name=_DEFAULT_PARTITION_KEY_NAME, 705 | sort_key_name=_DEFAULT_SORT_KEY_NAME, 706 | ttl_attribute_name=_DEFAULT_TTL_ATTRIBUTE_NAME, 707 | read_capacity=_DEFAULT_READ_CAPACITY, 708 | write_capacity=_DEFAULT_WRITE_CAPACITY): 709 | 710 | """ 711 | Helper method to create the DynamoDB table 712 | 713 | :param boto3.DynamoDB.Client dynamodb_client: mandatory argument 714 | :param str table_name: defaults to 'DynamoDBLockTable' 715 | :param str partition_key_name: defaults to 'lock_key' 716 | :param str sort_key_name: defaults to 'sort_key' 717 | :param str ttl_attribute_name: defaults to 'expiry_time' 718 | :param int read_capacity: the max TPS for strongly-consistent reads; defaults to 5 719 | :param int write_capacity: the max TPS for write operations; defaults to 5 720 | """ 721 | logger.info("Creating the lock table: %s", table_name) 722 | dynamodb_client.create_table( 723 | TableName=table_name, 724 | KeySchema=[ 725 | { 726 | 'AttributeName': partition_key_name, 727 | 'KeyType': 'HASH' 728 | }, 729 | { 730 | 'AttributeName': sort_key_name, 731 | 'KeyType': 'RANGE' 732 | }, 733 | ], 734 | AttributeDefinitions=[ 735 | { 736 | 'AttributeName': partition_key_name, 737 | 'AttributeType': 'S' 738 | }, 739 | { 740 | 'AttributeName': sort_key_name, 741 | 'AttributeType': 'S' 742 | }, 743 | ], 744 | ProvisionedThroughput={ 745 | 'ReadCapacityUnits': read_capacity, 746 | 'WriteCapacityUnits': write_capacity 747 | }, 748 | ) 749 | cls._wait_for_table_to_be_active(dynamodb_client, table_name) 750 | 751 | logger.info("Updating the table with time_to_live configuration") 752 | dynamodb_client.update_time_to_live( 753 | TableName=table_name, 754 | TimeToLiveSpecification={ 755 | 'Enabled': True, 756 | 'AttributeName': ttl_attribute_name 757 | } 758 | ) 759 | cls._wait_for_table_to_be_active(dynamodb_client, table_name) 760 | 761 | 762 | @classmethod 763 | def _wait_for_table_to_be_active(cls, dynamodb_client, table_name): 764 | logger.info("Waiting till the table becomes ACTIVE") 765 | while True: 766 | response = dynamodb_client.describe_table(TableName=table_name) 767 | status = response.get('Table', {}).get('TableStatus', 'UNKNOWN') 768 | logger.info("Table status: %s", status) 769 | if status == 'ACTIVE': 770 | break 771 | else: 772 | time.sleep(2) 773 | 774 | 775 | 776 | class BaseDynamoDBLock: 777 | """ 778 | Represents a distributed lock - as stored in DynamoDB. 779 | 780 | Typically used within the code to represent a lock held by some other lock-client. 781 | """ 782 | 783 | def __init__(self, 784 | partition_key, 785 | sort_key, 786 | owner_name, 787 | lease_duration, 788 | record_version_number, 789 | expiry_time, 790 | additional_attributes 791 | ): 792 | """ 793 | :param str partition_key: The primary lock identifier 794 | :param str sort_key: If present, forms a "composite identifier" along with the partition_key 795 | :param str owner_name: The owner name - typically from the lock_client 796 | :param float lease_duration: The lease duration in seconds - typically from the lock_client 797 | :param str record_version_number: A "liveness" indicating GUID - changes with every heartbeat 798 | :param int expiry_time: Epoch timestamp in seconds after which DynamoDB will auto-delete the record 799 | :param dict additional_attributes: Arbitrary application metadata to be stored with the lock 800 | """ 801 | self.partition_key = partition_key 802 | self.sort_key = sort_key 803 | self.owner_name = owner_name 804 | self.lease_duration = lease_duration 805 | self.record_version_number = record_version_number 806 | self.expiry_time = expiry_time 807 | self.additional_attributes = additional_attributes or {} 808 | # additional properties 809 | self.unique_identifier = quote(partition_key) + '|' + quote(sort_key) 810 | 811 | 812 | def __str__(self): 813 | """ 814 | Returns a readable string representation of this instance. 815 | """ 816 | return '%s::%s' % (self.__class__.__name__, self.__dict__) 817 | 818 | 819 | 820 | class DynamoDBLock(BaseDynamoDBLock): 821 | """ 822 | Represents a lock that is owned by a local DynamoDBLockClient instance. 823 | """ 824 | 825 | PENDING = 'PENDING' 826 | LOCKED = 'LOCKED' 827 | RELEASED = 'RELEASED' 828 | IN_DANGER = 'IN_DANGER' 829 | INVALID = 'INVALID' 830 | 831 | def __init__(self, 832 | partition_key, 833 | sort_key, 834 | owner_name, 835 | lease_duration, 836 | record_version_number, 837 | expiry_time, 838 | additional_attributes, 839 | app_callback, 840 | lock_client, 841 | ): 842 | """ 843 | :param str partition_key: The primary lock identifier 844 | :param str sort_key: If present, forms a "composite identifier" along with the partition_key 845 | :param str owner_name: The owner name - typically from the lock_client 846 | :param float lease_duration: The lease duration - typically from the lock_client 847 | :param str record_version_number: Changes with every heartbeat - the "liveness" indicator 848 | :param int expiry_time: Epoch timestamp in seconds after which DynamoDB will auto-delete the record 849 | :param dict additional_attributes: Arbitrary application metadata to be stored with the lock 850 | 851 | :param Callable app_callback: Callback function that can be used to notify the app of lock entering 852 | the danger period, or an unexpected release 853 | :param DynamoDBLockClient lock_client: The client that "owns" this lock 854 | """ 855 | BaseDynamoDBLock.__init__(self, 856 | partition_key, 857 | sort_key, 858 | owner_name, 859 | lease_duration, 860 | record_version_number, 861 | expiry_time, 862 | additional_attributes 863 | ) 864 | self.app_callback = app_callback 865 | self.lock_client = lock_client 866 | # additional properties 867 | self.last_updated_time = time.monotonic() 868 | self.thread_lock = threading.RLock() 869 | self.status = self.PENDING 870 | 871 | 872 | def __enter__(self): 873 | """ 874 | No-op - returns itself 875 | """ 876 | logger.debug('Entering: %s', self.unique_identifier) 877 | return self 878 | 879 | 880 | def __exit__(self, exc_type, exc_value, traceback): 881 | """ 882 | Releases the lock - with best_effort=True 883 | """ 884 | logger.debug('Exiting: %s', self.unique_identifier) 885 | self.release(best_effort=True) 886 | return True 887 | 888 | 889 | def release(self, best_effort=True): 890 | """ 891 | Calls the lock_client.release_lock(self, True) method 892 | 893 | :param bool best_effort: If True, any exception when calling DynamoDB will be ignored 894 | and the clean up steps will continue, hence the lock item in DynamoDb might not 895 | be updated / deleted but will eventually expire. Defaults to True. 896 | """ 897 | logger.debug('Releasing: %s', self.unique_identifier) 898 | self.lock_client.release_lock(self, best_effort) 899 | 900 | 901 | 902 | class DynamoDBLockError(Exception): 903 | """ 904 | Wrapper for all kinds of errors that might occur during the acquire and release calls. 905 | """ 906 | 907 | # code-constants 908 | CLIENT_SHUTDOWN = 'CLIENT_SHUTDOWN' 909 | ACQUIRE_TIMEOUT = 'ACQUIRE_TIMEOUT' 910 | LOCK_NOT_OWNED = 'LOCK_NOT_OWNED' 911 | LOCK_STOLEN = 'LOCK_STOLEN' 912 | LOCK_IN_DANGER = 'LOCK_IN_DANGER' 913 | UNKNOWN = 'UNKNOWN' 914 | 915 | 916 | def __init__(self, 917 | code='UNKNOWN', 918 | message='Unknown error' 919 | ): 920 | Exception.__init__(self) 921 | self.code = code 922 | self.message = message 923 | 924 | 925 | def __str__(self): 926 | """ 927 | Returns a readable string representation of this instance. 928 | """ 929 | return "%s: %s - %s" % (self.__class__.__name__, self.code, self.message) 930 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | boto3==1.9.146 2 | botocore==1.12.146 3 | -------------------------------------------------------------------------------- /requirements_dev.txt: -------------------------------------------------------------------------------- 1 | pip==19.1.1 2 | bumpversion==0.5.3 3 | wheel==0.33.4 4 | watchdog==0.9.0 5 | flake8==3.7.7 6 | tox==3.9.0 7 | coverage==4.5.3 8 | Sphinx==2.0.1 9 | twine==1.13.0 10 | sphinx_rtd_theme==0.4.3 11 | pytest==4.5.0 12 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 0.9.1 3 | commit = True 4 | tag = True 5 | 6 | [bumpversion:file:setup.py] 7 | search = version='{current_version}' 8 | replace = version='{new_version}' 9 | 10 | [bumpversion:file:python_dynamodb_lock/__init__.py] 11 | search = __version__ = '{current_version}' 12 | replace = __version__ = '{new_version}' 13 | 14 | [bdist_wheel] 15 | universal = 1 16 | 17 | [flake8] 18 | exclude = docs, tests 19 | max-line-length = 120 20 | ignore = E303, E201, E202, E701 21 | 22 | [aliases] 23 | 24 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """The setup script.""" 5 | 6 | from setuptools import setup, find_packages 7 | 8 | with open('README.rst') as readme_file: 9 | readme = readme_file.read() 10 | 11 | with open('HISTORY.rst') as history_file: 12 | history = history_file.read() 13 | 14 | requirements = [ 15 | 'boto3', 16 | 'botocore', 17 | ] 18 | 19 | setup_requirements = [ ] 20 | 21 | test_requirements = [ ] 22 | 23 | setup( 24 | author="Mohan Kishore", 25 | author_email='mohankishore@yahoo.com', 26 | classifiers=[ 27 | 'Development Status :: 4 - Beta', 28 | 'Intended Audience :: Developers', 29 | 'License :: OSI Approved :: Apache Software License', 30 | 'Natural Language :: English', 31 | 'Programming Language :: Python :: 3', 32 | 'Programming Language :: Python :: 3.6', 33 | 'Programming Language :: Python :: 3.7', 34 | ], 35 | description="Python library that emulates the java-based dynamo-db-client from awslabs", 36 | install_requires=requirements, 37 | license="Apache Software License 2.0", 38 | long_description=readme + '\n\n' + history, 39 | include_package_data=True, 40 | keywords='python_dynamodb_lock', 41 | name='python_dynamodb_lock', 42 | packages=find_packages(include=['python_dynamodb_lock']), 43 | setup_requires=setup_requirements, 44 | test_suite='tests', 45 | tests_require=test_requirements, 46 | url='https://github.com/mohankishore/python_dynamodb_lock', 47 | version='0.9.1', 48 | zip_safe=False, 49 | ) 50 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """Unit test package for python_dynamodb_lock.""" 4 | -------------------------------------------------------------------------------- /tests/test_python_dynamodb_lock.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """Tests for `python_dynamodb_lock` package.""" 5 | 6 | 7 | import unittest 8 | from unittest import mock 9 | import sys 10 | import logging 11 | logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) 12 | 13 | 14 | from python_dynamodb_lock.python_dynamodb_lock import * 15 | 16 | 17 | class TestDynamoDBLockClient(unittest.TestCase): 18 | """Tests for `python_dynamodb_lock` package.""" 19 | 20 | def setUp(self): 21 | """Set up test fixtures, if any.""" 22 | self.app_callbacks = [] 23 | self.ddb_resource = mock.MagicMock(name='ddb_resource') 24 | self.lock_client = DynamoDBLockClient( 25 | self.ddb_resource, 26 | heartbeat_period=datetime.timedelta(milliseconds=100), 27 | safe_period=datetime.timedelta(milliseconds=600), 28 | lease_duration=datetime.timedelta(milliseconds=1000), 29 | expiry_period=datetime.timedelta(milliseconds=5000), 30 | ) 31 | # switch the table reference; easier than patching etc. 32 | self.ddb_table = mock.MagicMock(name='ddb_table') 33 | self.lock_client._dynamodb_table = self.ddb_table 34 | 35 | def tearDown(self): 36 | """Tear down test fixtures, if any.""" 37 | self.lock_client.close() 38 | 39 | def app_callback(self, code, lock): 40 | """Just adds the (code, lock) tuple to an in-memory array""" 41 | self.app_callbacks.append( (code, lock) ) 42 | 43 | 44 | # __init__ tests 45 | 46 | def test_minimal_args_client(self): 47 | minimal_args_client = DynamoDBLockClient(self.ddb_table) 48 | self.assertIsNotNone(minimal_args_client) 49 | 50 | 51 | # send_heartbeat tests 52 | 53 | def test_background_threads(self): 54 | heartbeat_sender = self.lock_client._heartbeat_sender_thread 55 | self.assertIsNotNone(heartbeat_sender) 56 | self.assertTrue(heartbeat_sender.isDaemon()) 57 | self.assertTrue(heartbeat_sender.isAlive()) 58 | heartbeat_checker = self.lock_client._heartbeat_checker_thread 59 | self.assertIsNotNone(heartbeat_checker) 60 | self.assertTrue(heartbeat_checker.isDaemon()) 61 | self.assertTrue(heartbeat_checker.isAlive()) 62 | # now, close the client 63 | self.lock_client.close() 64 | # and check that the threads are also shutdown 65 | self.assertFalse(heartbeat_sender.isAlive()) 66 | self.assertFalse(heartbeat_checker.isAlive()) 67 | 68 | 69 | def test_send_heartbeat_success(self): 70 | self.ddb_table.update_item = mock.MagicMock('update_item') 71 | self.lock_client.acquire_lock('key') 72 | time.sleep(200/1000) # 200 millis 73 | self.ddb_table.update_item.assert_called() 74 | 75 | 76 | def test_send_heartbeat_lock_stolen(self): 77 | self.ddb_table.update_item = mock.MagicMock('update_item') 78 | self.lock_client.acquire_lock('key', app_callback=self.app_callback) 79 | time.sleep(200/1000) 80 | self.ddb_table.update_item.side_effect = ClientError({ 81 | 'Error': { 'Code': 'ConditionalCheckFailedException' } 82 | }, 'update_item') 83 | time.sleep(200/1000) 84 | self.ddb_table.update_item.side_effect = None 85 | self.assertEqual(len(self.lock_client._locks), 0) 86 | self.assertEqual(len(self.app_callbacks), 1) 87 | (code, lock) = self.app_callbacks.pop(0) 88 | self.assertEqual(code, DynamoDBLockError.LOCK_STOLEN) 89 | 90 | 91 | def test_send_heartbeat_ddb_error(self): 92 | self.ddb_table.update_item = mock.MagicMock('update_item') 93 | self.lock_client.acquire_lock('key', app_callback=self.app_callback) 94 | time.sleep(200/1000) 95 | self.ddb_table.update_item.side_effect = ClientError({ 96 | 'Error': { 'Code': 'SomeOtherDynamoDBError' } 97 | }, 'update_item') 98 | time.sleep(200/1000) 99 | self.ddb_table.update_item.side_effect = None 100 | self.assertEqual(len(self.app_callbacks), 0) # ignore other DDB Errors 101 | 102 | 103 | def test_send_heartbeat_other_error(self): 104 | self.ddb_table.update_item = mock.MagicMock('update_item') 105 | self.lock_client.acquire_lock('key', app_callback=self.app_callback) 106 | time.sleep(200/1000) 107 | self.ddb_table.update_item.side_effect = RuntimeError('TestError') 108 | time.sleep(200/1000) 109 | self.ddb_table.update_item.side_effect = None 110 | self.assertEqual(len(self.app_callbacks), 0) # ignore other Runtime Errors 111 | 112 | 113 | # this tests the heartbeat_checker_thread and the app_callback_executor 114 | def test_send_heartbeat_in_danger(self): 115 | self.ddb_table.update_item = mock.MagicMock('update_item') 116 | self.ddb_table.update_item.side_effect = RuntimeError('TestError') 117 | self.lock_client.acquire_lock('key', app_callback=self.app_callback) 118 | time.sleep(850/1000) 119 | self.ddb_table.update_item.side_effect = None 120 | self.assertTrue(len(self.app_callbacks) == 1) 121 | (code, lock) = self.app_callbacks.pop(0) 122 | self.assertEqual(code, DynamoDBLockError.LOCK_IN_DANGER) 123 | 124 | 125 | # acquire_lock tests 126 | 127 | def test_acquire_lock_success(self): 128 | self.ddb_table.get_item = mock.MagicMock('get_item') 129 | self.ddb_table.put_item = mock.MagicMock('put_item') 130 | lock = self.lock_client.acquire_lock('key') 131 | self.assertIsNotNone(lock) 132 | self.ddb_table.get_item.assert_called() 133 | self.ddb_table.put_item.assert_called() 134 | locks = self.lock_client._locks 135 | self.assertEqual(len(locks), 1) 136 | self.assertTrue(lock.unique_identifier in locks) 137 | self.assertEqual(locks[lock.unique_identifier], lock) 138 | 139 | 140 | def test_acquire_lock_after_close(self): 141 | self.lock_client.close() 142 | try: 143 | self.lock_client.acquire_lock('key') 144 | self.fail('Expected an error') 145 | except DynamoDBLockError as e: 146 | self.assertEqual(e.code, DynamoDBLockError.CLIENT_SHUTDOWN) 147 | 148 | 149 | def test_acquire_lock_after_release(self): 150 | self.ddb_table.get_item = mock.MagicMock('get_item') 151 | self.ddb_table.get_item.side_effect = [ 152 | # first call, return a valid item 153 | { 154 | 'Item': { 155 | 'lock_key': 'key', 156 | 'sort_key': '-', 157 | 'owner_name': 'owner', 158 | 'lease_duration': 0.3, 159 | 'record_version_number': 'xyz', 160 | 'expiry_time': 100, 161 | } 162 | }, 163 | # second call, act as if its been deleted 164 | {} 165 | ] 166 | start_time = time.monotonic() 167 | lock = self.lock_client.acquire_lock('key', retry_period=datetime.timedelta(milliseconds=100)) 168 | end_time = time.monotonic() 169 | self.assertIsNotNone(lock) 170 | self.assertTrue((end_time - start_time) * 1000 >= 100) 171 | 172 | 173 | def test_acquire_lock_after_lease_expires(self): 174 | self.ddb_table.get_item = mock.MagicMock('get_item') 175 | self.ddb_table.get_item.side_effect = lambda **kwargs: { 176 | 'Item': { 177 | 'lock_key': 'key', 178 | 'sort_key': '-', 179 | 'owner_name': 'owner', 180 | 'lease_duration': 0.3, 181 | 'record_version_number': 'xyz', 182 | 'expiry_time': 100, 183 | } 184 | } 185 | start_time = time.monotonic() 186 | lock = self.lock_client.acquire_lock('key', retry_period=datetime.timedelta(milliseconds=100)) 187 | end_time = time.monotonic() 188 | self.assertIsNotNone(lock) 189 | self.assertTrue((end_time - start_time) * 1000 >= 300) 190 | 191 | 192 | def test_acquire_lock_retry_timeout(self): 193 | self.ddb_table.get_item = mock.MagicMock('get_item') 194 | self.ddb_table.get_item.side_effect = lambda **kwargs: { 195 | 'Item': { 196 | 'lock_key': 'key', 197 | 'sort_key': '-', 198 | 'owner_name': 'owner', 199 | 'lease_duration': 0.6, 200 | 'record_version_number': 'xyz', 201 | 'expiry_time': 100, 202 | } 203 | } 204 | start_time = time.monotonic() 205 | try: 206 | self.lock_client.acquire_lock( 207 | 'key', 208 | retry_period=datetime.timedelta(milliseconds=100), 209 | retry_timeout=datetime.timedelta(milliseconds=300)) 210 | self.fail('Expected an error') 211 | except DynamoDBLockError as e: 212 | end_time = time.monotonic() 213 | self.assertEqual(e.code, DynamoDBLockError.ACQUIRE_TIMEOUT) 214 | self.assertTrue((end_time - start_time) * 1000 >= 200) 215 | # at 220ms, it would error out, instead of sleeping for another 100ms 216 | 217 | def test_acquire_lock_race_condition(self): 218 | # test the get-none, put-error, retry, get-none, put-success case 219 | self.ddb_table.get_item = mock.MagicMock('get_item') 220 | self.ddb_table.put_item = mock.MagicMock('put_item') 221 | self.ddb_table.get_item.side_effect = lambda **kwargs: {} 222 | self.ddb_table.put_item.side_effect = [ 223 | ClientError({ 224 | 'Error': { 'Code': 'ConditionalCheckFailedException' } 225 | }, 'put_item'), 226 | {} 227 | ] 228 | start_time = time.monotonic() 229 | lock = self.lock_client.acquire_lock('key', retry_period=datetime.timedelta(milliseconds=100)) 230 | end_time = time.monotonic() 231 | self.assertIsNotNone(lock) 232 | self.assertTrue((end_time - start_time) * 1000 >= 100) 233 | 234 | def test_acquire_lock_ddb_error(self): 235 | # test the get-none, put-error 236 | self.ddb_table.get_item = mock.MagicMock('get_item') 237 | self.ddb_table.put_item = mock.MagicMock('put_item') 238 | self.ddb_table.get_item.side_effect = lambda **kwargs: {} 239 | self.ddb_table.put_item.side_effect = ClientError({ 240 | 'Error': { 'Code': 'SomeOtherDynamoDBError' } 241 | }, 'put_item') 242 | try: 243 | self.lock_client.acquire_lock('key', retry_period=datetime.timedelta(milliseconds=100)) 244 | self.fail('Expected an error') 245 | except DynamoDBLockError as e: 246 | self.assertEqual(e.code, DynamoDBLockError.UNKNOWN) 247 | 248 | def test_acquire_lock_other_error(self): 249 | # test the get-none, put-error 250 | self.ddb_table.get_item = mock.MagicMock('get_item') 251 | self.ddb_table.put_item = mock.MagicMock('put_item') 252 | self.ddb_table.get_item.side_effect = lambda **kwargs: {} 253 | self.ddb_table.put_item.side_effect = RuntimeError('TestError') 254 | try: 255 | self.lock_client.acquire_lock('key', retry_period=datetime.timedelta(milliseconds=100)) 256 | self.fail('Expected an error') 257 | except DynamoDBLockError as e: 258 | self.assertEqual(e.code, DynamoDBLockError.UNKNOWN) 259 | 260 | 261 | # release_lock tests 262 | 263 | def test_release_lock_success(self): 264 | self.ddb_table.delete_item = mock.MagicMock('delete_item') 265 | lock = self.lock_client.acquire_lock('key') 266 | self.lock_client.release_lock(lock) 267 | self.assertTrue(lock.unique_identifier not in self.lock_client._locks) 268 | self.ddb_table.delete_item.assert_called_once() 269 | 270 | 271 | def test_release_lock_not_owned(self): 272 | other_lock_client = DynamoDBLockClient(self.ddb_table) 273 | other_lock = other_lock_client.acquire_lock('key') 274 | try: 275 | self.lock_client.release_lock(other_lock, best_effort=False) 276 | self.fail('Expected an error') 277 | except DynamoDBLockError as e: 278 | self.assertEqual(e.code, DynamoDBLockError.LOCK_NOT_OWNED) 279 | 280 | 281 | def test_release_lock_after_stolen(self): 282 | self.ddb_table.delete_item = mock.MagicMock('delete_item') 283 | self.ddb_table.delete_item.side_effect = [ 284 | ClientError({ 285 | 'Error': { 'Code': 'ConditionalCheckFailedException' } 286 | }, 'delete_item'), 287 | ] 288 | lock = self.lock_client.acquire_lock('key') 289 | try: 290 | self.lock_client.release_lock(lock, best_effort=False) 291 | self.fail('Expected an error') 292 | except DynamoDBLockError as e: 293 | self.assertEqual(e.code, DynamoDBLockError.LOCK_STOLEN) 294 | self.assertTrue(lock.unique_identifier not in self.lock_client._locks) 295 | 296 | def test_release_lock_ddb_error(self): 297 | self.ddb_table.delete_item = mock.MagicMock('delete_item') 298 | self.ddb_table.delete_item.side_effect = [ 299 | ClientError({ 300 | 'Error': { 'Code': 'SomeOtherDynamoDBError' } 301 | }, 'delete_item'), 302 | ] 303 | lock = self.lock_client.acquire_lock('key') 304 | try: 305 | self.lock_client.release_lock(lock, best_effort=False) 306 | self.fail('Expected an error') 307 | except DynamoDBLockError as e: 308 | self.assertEqual(e.code, DynamoDBLockError.UNKNOWN) 309 | self.assertTrue(lock.unique_identifier not in self.lock_client._locks) 310 | 311 | def test_release_lock_other_error(self): 312 | self.ddb_table.delete_item = mock.MagicMock('delete_item') 313 | self.ddb_table.delete_item.side_effect = RuntimeError('TestError') 314 | lock = self.lock_client.acquire_lock('key') 315 | try: 316 | self.lock_client.release_lock(lock, best_effort=False) 317 | self.fail('Expected an error') 318 | except DynamoDBLockError as e: 319 | self.assertEqual(e.code, DynamoDBLockError.UNKNOWN) 320 | self.assertTrue(lock.unique_identifier not in self.lock_client._locks) 321 | 322 | 323 | # release_lock tests - with best_effort=True 324 | 325 | def test_best_effort_release_lock_not_owned(self): 326 | other_lock_client = DynamoDBLockClient(self.ddb_table) 327 | other_lock = other_lock_client.acquire_lock('key') 328 | self.lock_client.release_lock(other_lock) 329 | 330 | 331 | def test_best_effort_release_lock_after_stolen(self): 332 | self.ddb_table.delete_item = mock.MagicMock('delete_item') 333 | self.ddb_table.delete_item.side_effect = [ 334 | ClientError({ 335 | 'Error': { 'Code': 'ConditionalCheckFailedException' } 336 | }, 'delete_item'), 337 | ] 338 | lock = self.lock_client.acquire_lock('key') 339 | self.lock_client.release_lock(lock) 340 | self.assertTrue(lock.unique_identifier not in self.lock_client._locks) 341 | 342 | def test_best_effort_release_lock_ddb_error(self): 343 | self.ddb_table.delete_item = mock.MagicMock('delete_item') 344 | self.ddb_table.delete_item.side_effect = [ 345 | ClientError({ 346 | 'Error': { 'Code': 'SomeOtherDynamoDBError' } 347 | }, 'delete_item'), 348 | ] 349 | lock = self.lock_client.acquire_lock('key') 350 | self.lock_client.release_lock(lock) 351 | self.assertTrue(lock.unique_identifier not in self.lock_client._locks) 352 | 353 | def test_best_effort_release_lock_other_error(self): 354 | self.ddb_table.delete_item = mock.MagicMock('delete_item') 355 | self.ddb_table.delete_item.side_effect = RuntimeError('TestError') 356 | lock = self.lock_client.acquire_lock('key') 357 | self.lock_client.release_lock(lock) 358 | self.assertTrue(lock.unique_identifier not in self.lock_client._locks) 359 | 360 | 361 | # lock-to-item serialize/deserialize tests 362 | 363 | def test_lock_to_item(self): 364 | lock = BaseDynamoDBLock('p', 's', 'o', 5, 'r', 10, { 'k': 'v'}) 365 | item = self.lock_client._get_item_from_lock(lock) or {} 366 | self.assertEqual(item[self.lock_client._partition_key_name], 'p') 367 | self.assertEqual(item[self.lock_client._sort_key_name], 's') 368 | self.assertEqual(item['owner_name'], 'o') 369 | self.assertEqual(item['lease_duration'], 5) 370 | self.assertEqual(item['record_version_number'], 'r') 371 | self.assertEqual(item['expiry_time'], 10) 372 | self.assertEqual(item['k'], 'v') 373 | 374 | 375 | def test_item_to_lock(self): 376 | item = { 377 | self.lock_client._partition_key_name: 'p2', 378 | self.lock_client._sort_key_name: 's2', 379 | 'owner_name': 'o2', 380 | 'lease_duration': 52, 381 | 'record_version_number': 'r2', 382 | 'expiry_time': 102, 383 | 'k2': 'v2' 384 | } 385 | lock = self.lock_client._get_lock_from_item(item) 386 | self.assertEqual(lock.partition_key, 'p2') 387 | self.assertEqual(lock.sort_key, 's2') 388 | self.assertEqual(lock.owner_name, 'o2') 389 | self.assertEqual(lock.lease_duration, 52) 390 | self.assertEqual(lock.record_version_number, 'r2') 391 | self.assertEqual(lock.expiry_time, 102) 392 | self.assertDictEqual(lock.additional_attributes, { 'k2': 'v2' }) 393 | 394 | 395 | # close() tests 396 | 397 | def test_close_without_release_locks(self): 398 | self.ddb_table.get_item = mock.MagicMock('get_item') 399 | self.ddb_table.put_item = mock.MagicMock('put_item') 400 | lock = self.lock_client.acquire_lock('key') 401 | self.lock_client.close() 402 | self.assertTrue(lock.unique_identifier in self.lock_client._locks) 403 | 404 | 405 | def test_close_with_release_locks(self): 406 | self.ddb_table.get_item = mock.MagicMock('get_item') 407 | self.ddb_table.put_item = mock.MagicMock('put_item') 408 | lock = self.lock_client.acquire_lock('key') 409 | self.lock_client.close(release_locks=True) 410 | self.assertFalse(lock.unique_identifier in self.lock_client._locks) 411 | 412 | 413 | # context-manager methods 414 | 415 | def test_lock_with_enter_exit(self): 416 | self.ddb_table.get_item = mock.MagicMock('get_item') 417 | self.ddb_table.put_item = mock.MagicMock('put_item') 418 | self.ddb_table.delete_item = mock.MagicMock('delete_item') 419 | with self.lock_client.acquire_lock('key') as lock: 420 | self.assertIsNotNone(lock) 421 | self.ddb_table.get_item.assert_called() 422 | self.ddb_table.put_item.assert_called() 423 | print('Lock: %s' % (str(lock))) 424 | self.ddb_table.delete_item.assert_called() 425 | 426 | -------------------------------------------------------------------------------- /tests_integration/test_app.py: -------------------------------------------------------------------------------- 1 | # hack to enable relative imports 2 | import os, sys 3 | sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) 4 | 5 | # import the module/classes under test 6 | from python_dynamodb_lock.python_dynamodb_lock import * 7 | 8 | # import other dependencies 9 | import argparse 10 | import boto3 11 | 12 | import logging 13 | logging.basicConfig(stream=sys.stdout, level=logging.INFO) 14 | 15 | """ 16 | This file is meant to be used after downloading and setting up a local DynamoDB instance. 17 | Ref: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.DownloadingAndRunning.html 18 | """ 19 | 20 | 21 | def create_table(args): 22 | # DynamoDB 23 | ddb_client = boto3.client( 24 | 'dynamodb', 25 | endpoint_url='http://localhost:8000', 26 | aws_access_key_id='non-blank', 27 | aws_secret_access_key='non-blank', 28 | region_name='non-blank' 29 | ) 30 | # Create Table 31 | DynamoDBLockClient.create_dynamodb_table( 32 | ddb_client, 33 | table_name=args.table_name, 34 | partition_key_name=args.partition_key_name, 35 | sort_key_name=args.sort_key_name, 36 | ttl_attribute_name=args.ttl_attribute_name, 37 | read_capacity=args.read_capacity, 38 | write_capacity=args.write_capacity 39 | ) 40 | 41 | 42 | def acquire_lock(args): 43 | # the one mandatory argument for acquiring a lock 44 | if not args.partition_key: 45 | raise RuntimeError('Need to provide --partition_key') 46 | 47 | # DynamoDB 48 | ddb_resource = boto3.resource( 49 | 'dynamodb', 50 | endpoint_url='http://localhost:8000', 51 | aws_access_key_id='non-blank', 52 | aws_secret_access_key='non-blank', 53 | region_name='non-blank' 54 | ) 55 | # The Lock-Client 56 | lock_client = DynamoDBLockClient( 57 | ddb_resource, 58 | table_name=args.table_name, 59 | partition_key_name=args.partition_key_name, 60 | sort_key_name=args.sort_key_name, 61 | ttl_attribute_name=args.ttl_attribute_name, 62 | heartbeat_period=datetime.timedelta(seconds=args.heartbeat_period), 63 | safe_period=datetime.timedelta(seconds=args.safe_period), 64 | lease_duration=datetime.timedelta(seconds=args.lease_duration), 65 | ) 66 | # The Lock itself 67 | lock = lock_client.acquire_lock( 68 | args.partition_key, 69 | sort_key=args.sort_key, 70 | retry_period=datetime.timedelta(seconds=args.retry_period), 71 | retry_timeout=datetime.timedelta(seconds=args.retry_timeout), 72 | app_callback=_app_callback 73 | ) 74 | # And, hold as needed 75 | if args.hold_period > 0: 76 | time.sleep(args.hold_period) 77 | lock.release() 78 | else: 79 | while True: time.sleep(60) 80 | 81 | 82 | def _app_callback(code, lock): 83 | logger.warning('CALLBACK: %s - %s', code, str(lock)) 84 | 85 | 86 | # basically, the main method 87 | if __name__ == "__main__": 88 | parser = argparse.ArgumentParser( 89 | description='Test app to verify the python_dynamodb_lock module', 90 | formatter_class=argparse.ArgumentDefaultsHelpFormatter 91 | ) 92 | 93 | mode = parser.add_mutually_exclusive_group(required=True) 94 | mode.add_argument( 95 | '--create_table', 96 | help='Create the DynamoDB table', 97 | action='store_true', 98 | ) 99 | mode.add_argument( 100 | '--acquire_lock', 101 | help='Acquire a named lock', 102 | action='store_true', 103 | ) 104 | 105 | common_group = parser.add_argument_group(title='(common arguments)') 106 | common_group.add_argument( 107 | '--table_name', 108 | help='Name of the DynamoDB table', 109 | required=False, 110 | default=DynamoDBLockClient._DEFAULT_TABLE_NAME 111 | ) 112 | common_group.add_argument( 113 | '--partition_key_name', 114 | help='Name of the partition-key column', 115 | required=False, 116 | default=DynamoDBLockClient._DEFAULT_PARTITION_KEY_NAME 117 | ) 118 | common_group.add_argument( 119 | '--sort_key_name', 120 | help='Name of the sort-key column', 121 | required=False, 122 | default=DynamoDBLockClient._DEFAULT_SORT_KEY_NAME 123 | ) 124 | common_group.add_argument( 125 | '--ttl_attribute_name', 126 | help='Name of the TTL attribute', 127 | required=False, 128 | default=DynamoDBLockClient._DEFAULT_TTL_ATTRIBUTE_NAME 129 | ) 130 | 131 | create_table_group = parser.add_argument_group(title='--create_table') 132 | create_table_group.add_argument( 133 | '--read_capacity', 134 | help='The max TPS for strongly-consistent reads', 135 | required=False, 136 | default=DynamoDBLockClient._DEFAULT_READ_CAPACITY, 137 | type=int 138 | ) 139 | create_table_group.add_argument( 140 | '--write_capacity', 141 | help='The max TPS for write operations', 142 | required=False, 143 | default=DynamoDBLockClient._DEFAULT_WRITE_CAPACITY, 144 | type=int 145 | ) 146 | 147 | acquire_lock_group = parser.add_argument_group(title='--acquire_lock') 148 | acquire_lock_group.add_argument( 149 | '--partition_key', 150 | help='Value for the partition-key column', 151 | ) 152 | acquire_lock_group.add_argument( 153 | '--sort_key', 154 | help='Value for the sort-key column', 155 | required=False, 156 | default=DynamoDBLockClient._DEFAULT_SORT_KEY_VALUE 157 | ) 158 | acquire_lock_group.add_argument( 159 | '--retry_period', 160 | help='How long to wait between retries?', 161 | required=False, 162 | default=2, 163 | type=int 164 | ) 165 | acquire_lock_group.add_argument( 166 | '--retry_timeout', 167 | help='How long to retry before giving up?', 168 | required=False, 169 | default=10, 170 | type=int 171 | ) 172 | acquire_lock_group.add_argument( 173 | '--hold_period', 174 | help='How long to hold the lock before releasing it?', 175 | required=False, 176 | default=-1, 177 | type=int 178 | ) 179 | acquire_lock_group.add_argument( 180 | '--heartbeat_period', 181 | help='How long to wait between consecutive heartbeat-all-locks?', 182 | required=False, 183 | default=DynamoDBLockClient._DEFAULT_HEARTBEAT_PERIOD.total_seconds(), 184 | type=int 185 | ) 186 | acquire_lock_group.add_argument( 187 | '--safe_period', 188 | help='How long can a lock go without a successful heartbeat - before it is deemed to be in danger?', 189 | required=False, 190 | default=DynamoDBLockClient._DEFAULT_SAFE_PERIOD.total_seconds(), 191 | type=int 192 | ) 193 | acquire_lock_group.add_argument( 194 | '--lease_duration', 195 | help='How long can a lock go without a successful heartbeat - before it is deemed to be abandoned?', 196 | required=False, 197 | default=DynamoDBLockClient._DEFAULT_LEASE_DURATION.total_seconds(), 198 | type=int 199 | ) 200 | 201 | args = parser.parse_args() 202 | if args.create_table: 203 | create_table(args) 204 | elif args.acquire_lock: 205 | acquire_lock(args) 206 | else: 207 | logger.error('Invalid arguments: %s', str(args)) 208 | 209 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py36, py37, flake8 3 | 4 | [travis] 5 | python = 6 | 3.7: py37 7 | 3.6: py36 8 | 9 | [testenv:flake8] 10 | basepython = python 11 | deps = flake8 12 | commands = flake8 python_dynamodb_lock 13 | 14 | [testenv] 15 | setenv = 16 | PYTHONPATH = {toxinidir} 17 | 18 | commands = python setup.py test 19 | 20 | --------------------------------------------------------------------------------