├── .coveragerc ├── .gitignore ├── .gitreview ├── .mailmap ├── .pre-commit-config.yaml ├── .stestr.conf ├── .zuul.yaml ├── CONTRIBUTING.rst ├── HACKING.rst ├── LICENSE ├── README.rst ├── doc ├── requirements.txt └── source │ ├── admin │ └── index.rst │ ├── conf.py │ ├── configuration │ └── index.rst │ ├── contributor │ ├── contributing.rst │ ├── history.rst │ └── index.rst │ ├── index.rst │ ├── install │ └── index.rst │ ├── reference │ ├── fixture.lockutils.rst │ ├── index.rst │ ├── lockutils.rst │ ├── opts.rst │ ├── processutils.rst │ └── watchdog.rst │ └── user │ └── index.rst ├── oslo_concurrency ├── __init__.py ├── _i18n.py ├── fixture │ ├── __init__.py │ └── lockutils.py ├── locale │ ├── de │ │ └── LC_MESSAGES │ │ │ └── oslo_concurrency.po │ ├── en_GB │ │ └── LC_MESSAGES │ │ │ └── oslo_concurrency.po │ ├── es │ │ └── LC_MESSAGES │ │ │ └── oslo_concurrency.po │ └── fr │ │ └── LC_MESSAGES │ │ └── oslo_concurrency.po ├── lockutils.py ├── opts.py ├── prlimit.py ├── processutils.py ├── tests │ ├── __init__.py │ └── unit │ │ ├── __init__.py │ │ ├── test_lockutils.py │ │ ├── test_lockutils_eventlet.py │ │ └── test_processutils.py ├── version.py └── watchdog.py ├── pyproject.toml ├── releasenotes ├── notes │ ├── add-option-for-fair-locks-b6d660e97683cec6.yaml │ ├── add-python-exec-kwarg-3a7a0c0849f9bb21.yaml │ ├── add_reno-3b4ae0789e9c45b4.yaml │ ├── deprecate-eventlet-within-lockutils-cba49086d7a65042.yaml │ ├── deprecate-windows-support-fcb77dddf82de36b.yaml │ ├── drop-python27-support-7d837a45dae941bb.yaml │ ├── log_acquiring_lock-1b224c0b1562ec97.yaml │ ├── remove-defaut-section-fallback-a90a6d2fd10671bc.yaml │ ├── remove-py38-dcdd342ee21f8118.yaml │ ├── remove-windows-bad63cd41c15235d.yaml │ └── timeout-c3fb65acda04c1c7.yaml └── source │ ├── 2023.1.rst │ ├── 2023.2.rst │ ├── 2024.1.rst │ ├── 2024.2.rst │ ├── 2025.1.rst │ ├── _static │ └── .placeholder │ ├── _templates │ └── .placeholder │ ├── conf.py │ ├── index.rst │ ├── locale │ ├── en_GB │ │ └── LC_MESSAGES │ │ │ └── releasenotes.po │ └── fr │ │ └── LC_MESSAGES │ │ └── releasenotes.po │ ├── newton.rst │ ├── ocata.rst │ ├── pike.rst │ ├── queens.rst │ ├── rocky.rst │ ├── stein.rst │ ├── train.rst │ ├── unreleased.rst │ ├── ussuri.rst │ └── victoria.rst ├── requirements.txt ├── setup.cfg ├── setup.py ├── test-requirements.txt └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | source = oslo_concurrency 4 | omit = oslo_concurrency/tests/* 5 | 6 | [report] 7 | ignore_errors = True 8 | precision = 2 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Add patterns in here to exclude files created by tools integrated with this 2 | # repository, such as test frameworks from the project's recommended workflow, 3 | # rendered documentation and package builds. 4 | # 5 | # Don't add patterns to exclude files created by preferred personal tools 6 | # (editors, IDEs, your operating system itself even). These should instead be 7 | # maintained outside the repository, for example in a ~/.gitignore file added 8 | # with: 9 | # 10 | # git config --global core.excludesfile '~/.gitignore' 11 | 12 | # Bytecompiled Python 13 | *.py[cod] 14 | 15 | # C extensions 16 | *.so 17 | 18 | # Packages 19 | *.egg 20 | *.egg-info 21 | dist 22 | build 23 | eggs 24 | parts 25 | bin 26 | var 27 | sdist 28 | develop-eggs 29 | .installed.cfg 30 | lib 31 | lib64 32 | 33 | # Installer logs 34 | pip-log.txt 35 | 36 | # Unit test / coverage reports 37 | .coverage 38 | cover 39 | .tox 40 | .stestr 41 | 42 | # Translations 43 | *.mo 44 | 45 | # Complexity 46 | output/*.html 47 | output/*/index.html 48 | 49 | # Sphinx 50 | doc/build 51 | doc/source/reference/api 52 | 53 | # pbr generates these 54 | AUTHORS 55 | ChangeLog 56 | 57 | # reno build 58 | releasenotes/build 59 | RELEASENOTES.rst 60 | releasenotes/notes/reno.cache 61 | 62 | # coverage results 63 | .coverage.* 64 | -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=review.opendev.org 3 | port=29418 4 | project=openstack/oslo.concurrency.git -------------------------------------------------------------------------------- /.mailmap: -------------------------------------------------------------------------------- 1 | # Format is: 2 | # 3 | # -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v5.0.0 4 | hooks: 5 | - id: trailing-whitespace 6 | # Replaces or checks mixed line ending 7 | - id: mixed-line-ending 8 | args: ['--fix', 'lf'] 9 | exclude: '.*\.(svg)$' 10 | # Forbid files which have a UTF-8 byte-order marker 11 | - id: check-byte-order-marker 12 | # Checks that non-binary executables have a proper shebang 13 | - id: check-executables-have-shebangs 14 | # Check for files that contain merge conflict strings. 15 | - id: check-merge-conflict 16 | # Check for debugger imports and py37+ breakpoint() 17 | # calls in python source 18 | - id: debug-statements 19 | - id: check-yaml 20 | files: .*\.(yaml|yml)$ 21 | - repo: https://opendev.org/openstack/hacking 22 | rev: 7.0.0 23 | hooks: 24 | - id: hacking 25 | additional_dependencies: [] 26 | - repo: https://github.com/PyCQA/bandit 27 | rev: 1.7.10 28 | hooks: 29 | - id: bandit 30 | args: ['-x', 'tests', '-s', 'B311,B404,B603,B606'] 31 | - repo: https://github.com/asottile/pyupgrade 32 | rev: v3.18.0 33 | hooks: 34 | - id: pyupgrade 35 | args: [--py3-only] 36 | -------------------------------------------------------------------------------- /.stestr.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | test_path=./oslo_concurrency/tests/unit 3 | top_path=./ 4 | -------------------------------------------------------------------------------- /.zuul.yaml: -------------------------------------------------------------------------------- 1 | - project: 2 | templates: 3 | - check-requirements 4 | - lib-forward-testing-python3 5 | - openstack-cover-jobs 6 | - openstack-python3-jobs 7 | - periodic-stable-jobs 8 | - publish-openstack-docs-pti 9 | - release-notes-jobs-python3 10 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | If you would like to contribute to the development of oslo's libraries, 2 | first you must take a look to this page: 3 | 4 | https://specs.openstack.org/openstack/oslo-specs/specs/policy/contributing.html 5 | 6 | If you would like to contribute to the development of OpenStack, 7 | you must follow the steps in this page: 8 | 9 | https://docs.openstack.org/infra/manual/developers.html 10 | 11 | Once those steps have been completed, changes to OpenStack 12 | should be submitted for review via the Gerrit tool, following 13 | the workflow documented at: 14 | 15 | https://docs.openstack.org/infra/manual/developers.html#development-workflow 16 | 17 | Pull requests submitted through GitHub will be ignored. 18 | 19 | Bugs should be filed on Launchpad, not GitHub: 20 | 21 | https://bugs.launchpad.net/oslo.concurrency 22 | -------------------------------------------------------------------------------- /HACKING.rst: -------------------------------------------------------------------------------- 1 | Style Commandments 2 | ================== 3 | 4 | Read the OpenStack Style Commandments https://docs.openstack.org/hacking/lastest/ 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Team and repository tags 3 | ======================== 4 | 5 | .. image:: https://governance.openstack.org/tc/badges/oslo.concurrency.svg 6 | :target: https://governance.openstack.org/tc/reference/tags/index.html 7 | 8 | .. Change things from this point on 9 | 10 | ================ 11 | oslo.concurrency 12 | ================ 13 | 14 | .. image:: https://img.shields.io/pypi/v/oslo.concurrency.svg 15 | :target: https://pypi.org/project/oslo.concurrency/ 16 | :alt: Latest Version 17 | 18 | The oslo.concurrency library has utilities for safely running multi-thread, 19 | multi-process applications using locking mechanisms and for running 20 | external processes. 21 | 22 | * Free software: Apache license 23 | * Documentation: https://docs.openstack.org/oslo.concurrency/latest/ 24 | * Source: https://opendev.org/openstack/oslo.concurrency 25 | * Bugs: https://bugs.launchpad.net/oslo.concurrency 26 | * Release Notes: https://docs.openstack.org/releasenotes/oslo.concurrency/ 27 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx>=2.0.0 # BSD 2 | openstackdocstheme>=2.2.0 # Apache-2.0 3 | reno>=3.1.0 # Apache-2.0 4 | fixtures>=3.0.0 # Apache-2.0/BSD 5 | sphinxcontrib-apidoc>=0.2.0 # BSD 6 | -------------------------------------------------------------------------------- /doc/source/admin/index.rst: -------------------------------------------------------------------------------- 1 | ===================== 2 | Administrator Guide 3 | ===================== 4 | 5 | This section contains information useful to administrators operating a service 6 | that uses oslo.concurrency. 7 | 8 | Lock File Management 9 | ==================== 10 | 11 | For services that use oslo.concurrency's external lock functionality for 12 | interprocess locking, lock files will be stored in the location specified 13 | by the ``lock_path`` config option in the ``oslo_concurrency`` section. 14 | These lock files are not automatically deleted by oslo.concurrency because 15 | the library has no way to know when the service is done with a lock, and 16 | deleting a lock file that is being held by the service would cause 17 | concurrency problems. Some services do delete lock files when they are done 18 | with them, but deletion of a service's lock files while the service is 19 | running should only be done by the service itself. External cleanup methods 20 | cannot reasonably know when a lock is no longer needed. 21 | 22 | However, to prevent the ``lock_path`` directory from growing indefinitely, 23 | it is a good idea to occasionally delete all the lock files from it. The only 24 | safe time to do this is when the service is not running, such as after a 25 | reboot or when the service is down for maintenance. In the latter case, make 26 | sure that all related services (such as api, worker, conductor, etc) are down 27 | If any process that might hold locks is still running, deleting lock files may 28 | introduce inconsistency in the service. One possible approach to this cleanup 29 | is to put the ``lock_path`` in tmpfs so it will be automatically cleared on 30 | reboot. 31 | 32 | Note that in general, leftover lock files are a cosmetic nuisance at worst. 33 | If you do run into a functional problem as a result of large numbers of 34 | lock files, please report it to the Oslo team so we can look into other 35 | mitigation strategies. 36 | 37 | Frequently Asked Questions 38 | ========================== 39 | 40 | What is the history of the lock file issue? 41 | ------------------------------------------- 42 | 43 | It comes up every few months when a deployer of OpenStack notices that they 44 | have a large number of lock files lying around, apparently unused. A thread 45 | is started on the mailing list and one of the Oslo developers has to provide 46 | an explanation of why it works the way it does. This FAQ is intended to be an 47 | official replacement for the one-off explanation that is usually given. 48 | 49 | The code responsible for this behavior has actually moved to the 50 | `fasteners `_ project, and there 51 | is an 52 | `issue addressing the leftover lock files `_ 53 | there. It covers much of the technical history of the problem, as well as 54 | some proposed solutions. 55 | 56 | Why hasn't this been fixed yet? 57 | ------------------------------- 58 | 59 | Because to the Oslo developers' knowledge, no one has ever had a functional 60 | issue as a result of leftover lock files. This makes it a lower priority 61 | problem, and because of the complexity of fixing it nobody has been able to 62 | yet. If functional issues are found, they should be reported as a bug 63 | against oslo.concurrency so they can be tracked. In the meantime, this will 64 | likely continue to be treated as a cosmetic annoyance and prioritized 65 | appropriately. 66 | 67 | Why aren't lock files deleted when the lock is released? 68 | -------------------------------------------------------- 69 | 70 | In our testing, when a lock file was deleted while another process was waiting 71 | for it, it created a sort of split-brain situation between any process that had 72 | been waiting for the deleted file, and any process that attempted to lock the 73 | file after it had been deleted. Essentially, two processes could end up holding 74 | the same lock at the same time, which made this an unacceptable solution. 75 | 76 | Why don't you use some other method of interprocess locking? 77 | ------------------------------------------------------------ 78 | 79 | We tried. Both Posix and SysV IPC were explored as alternatives. Unfortunately, 80 | both have significant issues on Linux. Posix locks cannot be broken if the 81 | process holding them crashes (at least not without a reboot). SysV locks have 82 | a limited range of numerical ids, and because oslo.concurrency supports 83 | string-based lock names, the possibility of collisions when hashing names was 84 | too high. It was deemed better to have the file-based locking mechanism that 85 | would always work than a different method that introduced serious new problems. 86 | 87 | Bonus Question: Why doesn't ``lock_path`` default to a temp directory? 88 | ---------------------------------------------------------------------- 89 | 90 | Because every process that may need to hold a lock must use the same value 91 | for ``lock_path`` or it becomes useless. If we allowed ``lock_path`` to be 92 | unset and just created a temp directory on startup, each process would create 93 | its own temp directory and there would be no actual coordination between them. 94 | 95 | While this isn't strictly related to the lock file issue, it is another FAQ 96 | about oslo.concurrency so it made sense to mention it here. 97 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2020 Red Hat, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # -- General configuration ---------------------------------------------------- 17 | 18 | # Add any Sphinx extension module names here, as strings. They can be 19 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 20 | extensions = [ 21 | 'sphinx.ext.autodoc', 22 | 'sphinxcontrib.apidoc', 23 | 'openstackdocstheme', 24 | 'oslo_config.sphinxext', 25 | ] 26 | 27 | # openstackdocstheme options 28 | openstackdocs_repo_name = 'openstack/oslo.concurrency' 29 | openstackdocs_bug_project = 'oslo.concurrency' 30 | openstackdocs_bug_tag = '' 31 | 32 | # The master toctree document. 33 | master_doc = 'index' 34 | 35 | # General information about the project. 36 | copyright = '2014, OpenStack Foundation' 37 | 38 | # If true, '()' will be appended to :func: etc. cross-reference text. 39 | add_function_parentheses = True 40 | 41 | # If true, the current module name will be prepended to all description 42 | # unit titles (such as .. function::). 43 | add_module_names = True 44 | 45 | # The name of the Pygments (syntax highlighting) style to use. 46 | pygments_style = 'native' 47 | 48 | # -- Options for HTML output ------------------------------------------------- 49 | 50 | html_theme = 'openstackdocs' 51 | 52 | # -- sphinxcontrib.apidoc configuration -------------------------------------- 53 | 54 | apidoc_module_dir = '../../' 55 | apidoc_output_dir = 'reference/api' 56 | apidoc_excluded_paths = [ 57 | 'oslo_concurrency/tests', 58 | 'oslo_concurrency/_*', 59 | 'setup.py', 60 | ] 61 | -------------------------------------------------------------------------------- /doc/source/configuration/index.rst: -------------------------------------------------------------------------------- 1 | ======================= 2 | Configuration Options 3 | ======================= 4 | 5 | oslo.concurrency uses oslo.config to define and manage configuration options 6 | to allow the deployer to control how an application uses this library. 7 | 8 | .. show-options:: oslo.concurrency 9 | -------------------------------------------------------------------------------- /doc/source/contributor/contributing.rst: -------------------------------------------------------------------------------- 1 | ============== 2 | Contributing 3 | ============== 4 | 5 | .. include:: ../../../CONTRIBUTING.rst 6 | -------------------------------------------------------------------------------- /doc/source/contributor/history.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../../ChangeLog 2 | -------------------------------------------------------------------------------- /doc/source/contributor/index.rst: -------------------------------------------------------------------------------- 1 | ===================== 2 | Contributor's Guide 3 | ===================== 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | contributing 9 | history 10 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | ============================================ 2 | Welcome to oslo.concurrency's documentation! 3 | ============================================ 4 | 5 | The `oslo`_ concurrency library has utilities for safely running multi-thread, 6 | multi-process applications using locking mechanisms and for running 7 | external processes. 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | install/index 13 | admin/index 14 | user/index 15 | configuration/index 16 | contributor/index 17 | reference/index 18 | 19 | 20 | Release Notes 21 | ============= 22 | 23 | Read also the `oslo.concurrency Release Notes 24 | `_. 25 | 26 | 27 | Indices and tables 28 | ================== 29 | 30 | * :ref:`genindex` 31 | * :ref:`modindex` 32 | * :ref:`search` 33 | 34 | .. _oslo: https://wiki.openstack.org/wiki/Oslo 35 | -------------------------------------------------------------------------------- /doc/source/install/index.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Installation 3 | ============ 4 | 5 | At the command line:: 6 | 7 | $ pip install oslo.concurrency 8 | 9 | Or, if you have virtualenvwrapper installed:: 10 | 11 | $ mkvirtualenv oslo.concurrency 12 | $ pip install oslo.concurrency -------------------------------------------------------------------------------- /doc/source/reference/fixture.lockutils.rst: -------------------------------------------------------------------------------- 1 | =========================================== 2 | :mod:`oslo_concurrency.fixture.lockutils` 3 | =========================================== 4 | 5 | .. automodule:: oslo_concurrency.fixture.lockutils 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/source/reference/index.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | API Reference 3 | =============== 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | fixture.lockutils 9 | lockutils 10 | opts 11 | processutils 12 | watchdog 13 | api/modules 14 | -------------------------------------------------------------------------------- /doc/source/reference/lockutils.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | :mod:`oslo_concurrency.lockutils` 3 | =================================== 4 | 5 | .. automodule:: oslo_concurrency.lockutils 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/source/reference/opts.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | :mod:`oslo_concurrency.opts` 3 | ============================== 4 | 5 | .. automodule:: oslo_concurrency.opts 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/source/reference/processutils.rst: -------------------------------------------------------------------------------- 1 | ====================================== 2 | :mod:`oslo_concurrency.processutils` 3 | ====================================== 4 | 5 | .. automodule:: oslo_concurrency.processutils 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/source/reference/watchdog.rst: -------------------------------------------------------------------------------- 1 | ================================== 2 | :mod:`oslo_concurrency.watchdog` 3 | ================================== 4 | 5 | .. automodule:: oslo_concurrency.watchdog 6 | :members: 7 | :undoc-members: 8 | :show-inheritance: 9 | -------------------------------------------------------------------------------- /doc/source/user/index.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | Usage 3 | ======= 4 | 5 | To use oslo.concurrency in a project, import the relevant module. For 6 | example:: 7 | 8 | from oslo_concurrency import lockutils 9 | from oslo_concurrency import processutils 10 | 11 | .. seealso:: 12 | 13 | * :doc:`API Documentation <../reference/index>` 14 | 15 | Locking a function (local to a process) 16 | ======================================= 17 | 18 | To ensure that a function (which is not thread safe) is only used in 19 | a thread safe manner (typically such type of function should be refactored 20 | to avoid this problem but if not then the following can help):: 21 | 22 | @lockutils.synchronized('not_thread_safe') 23 | def not_thread_safe(): 24 | pass 25 | 26 | Once decorated later callers of this function will be able to call into 27 | this method and the contract that two threads will **not** enter this 28 | function at the same time will be upheld. Make sure that the names of the 29 | locks used are carefully chosen (typically by namespacing them to your 30 | app so that other apps will not chose the same names). 31 | 32 | Locking a function (local to a process as well as across process) 33 | ================================================================= 34 | 35 | To ensure that a function (which is not thread safe **or** multi-process 36 | safe) is only used in a safe manner (typically such type of function should 37 | be refactored to avoid this problem but if not then the following can help):: 38 | 39 | @lockutils.synchronized('not_thread_process_safe', external=True) 40 | def not_thread_process_safe(): 41 | pass 42 | 43 | Once decorated later callers of this function will be able to call into 44 | this method and the contract that two threads (or any two processes) 45 | will **not** enter this function at the same time will be upheld. Make 46 | sure that the names of the locks used are carefully chosen (typically by 47 | namespacing them to your app so that other apps will not chose the same 48 | names). 49 | 50 | Enabling fair locking 51 | ===================== 52 | 53 | By default there is no requirement that the lock is ``fair``. That is, it's 54 | possible for a thread to block waiting for the lock, then have another thread 55 | block waiting for the lock, and when the lock is released by the current owner 56 | the second waiter could acquire the lock before the first. In an extreme case 57 | you could have a whole string of other threads acquire the lock before the 58 | first waiter acquires it, resulting in unpredictable amounts of latency. 59 | 60 | For cases where this is a problem, it's possible to specify the use of fair 61 | locks:: 62 | 63 | @lockutils.synchronized('not_thread_process_safe', fair=True) 64 | def not_thread_process_safe(): 65 | pass 66 | 67 | When using fair locks the lock itself is slightly more expensive (which 68 | shouldn't matter in most cases), but it will ensure that all threads that 69 | block waiting for the lock will acquire it in the order that they blocked. 70 | 71 | The exception to this is when specifying both ``external`` and ``fair`` 72 | locks. In this case, the ordering *within* a given process will be fair, but 73 | the ordering *between* processes will be determined by the behaviour of the 74 | underlying OS. 75 | 76 | Common ways to prefix/namespace the synchronized decorator 77 | ========================================================== 78 | 79 | Since it is **highly** recommended to prefix (or namespace) the usage 80 | of the synchronized there are a few helpers that can make this much easier 81 | to achieve. 82 | 83 | An example is:: 84 | 85 | myapp_synchronized = lockutils.synchronized_with_prefix("myapp") 86 | 87 | Then further usage of the ``lockutils.synchronized`` would instead now use 88 | this decorator created above instead of using ``lockutils.synchronized`` 89 | directly. 90 | 91 | Command Line Wrapper 92 | ==================== 93 | 94 | ``oslo.concurrency`` includes a command line tool for use in test jobs 95 | that need the environment variable :envvar:`OSLO_LOCK_PATH` set. To 96 | use it, prefix the command to be run with 97 | :command:`lockutils-wrapper`. For example:: 98 | 99 | $ lockutils-wrapper env | grep OSLO_LOCK_PATH 100 | OSLO_LOCK_PATH=/tmp/tmpbFHK45 101 | -------------------------------------------------------------------------------- /oslo_concurrency/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/oslo.concurrency/08987d8af202622ec5ea00bea76c6f6588b07960/oslo_concurrency/__init__.py -------------------------------------------------------------------------------- /oslo_concurrency/_i18n.py: -------------------------------------------------------------------------------- 1 | # Copyright 2014 Mirantis Inc. 2 | # 3 | # All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | import oslo_i18n 18 | 19 | _translators = oslo_i18n.TranslatorFactory(domain='oslo_concurrency') 20 | 21 | # The primary translation function using the well-known name "_" 22 | _ = _translators.primary 23 | -------------------------------------------------------------------------------- /oslo_concurrency/fixture/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/oslo.concurrency/08987d8af202622ec5ea00bea76c6f6588b07960/oslo_concurrency/fixture/__init__.py -------------------------------------------------------------------------------- /oslo_concurrency/fixture/lockutils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2011 OpenStack Foundation. 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | import fixtures 17 | from oslo_config import fixture as config 18 | 19 | from oslo_concurrency import lockutils 20 | 21 | 22 | class LockFixture(fixtures.Fixture): 23 | """External locking fixture. 24 | 25 | This fixture is basically an alternative to the synchronized decorator with 26 | the external flag so that tearDowns and addCleanups will be included in 27 | the lock context for locking between tests. The fixture is recommended to 28 | be the first line in a test method, like so:: 29 | 30 | def test_method(self): 31 | self.useFixture(LockFixture('lock_name')) 32 | ... 33 | 34 | or the first line in setUp if all the test methods in the class are 35 | required to be serialized. Something like:: 36 | 37 | class TestCase(testtools.testcase): 38 | def setUp(self): 39 | self.useFixture(LockFixture('lock_name')) 40 | super(TestCase, self).setUp() 41 | ... 42 | 43 | This is because addCleanups are put on a LIFO queue that gets run after the 44 | test method exits. (either by completing or raising an exception) 45 | """ 46 | def __init__(self, name, lock_file_prefix=None): 47 | self.mgr = lockutils.lock(name, lock_file_prefix, True) 48 | 49 | def setUp(self): 50 | super().setUp() 51 | self.addCleanup(self.mgr.__exit__, None, None, None) 52 | self.lock = self.mgr.__enter__() 53 | 54 | 55 | class ExternalLockFixture(fixtures.Fixture): 56 | """Configure lock_path so external locks can be used in unit tests. 57 | 58 | Creates a temporary directory to hold file locks and sets the oslo.config 59 | lock_path opt to use it. This can be used to enable external locking 60 | on a per-test basis, rather than globally with the OSLO_LOCK_PATH 61 | environment variable. 62 | 63 | Example:: 64 | 65 | def test_method(self): 66 | self.useFixture(ExternalLockFixture()) 67 | something_that_needs_external_locks() 68 | 69 | Alternatively, the useFixture call could be placed in a test class's 70 | setUp method to provide this functionality to all tests in the class. 71 | 72 | .. versionadded:: 0.3 73 | """ 74 | def setUp(self): 75 | super().setUp() 76 | temp_dir = self.useFixture(fixtures.TempDir()) 77 | conf = self.useFixture(config.Config(lockutils.CONF)).config 78 | conf(lock_path=temp_dir.path, group='oslo_concurrency') 79 | -------------------------------------------------------------------------------- /oslo_concurrency/locale/de/LC_MESSAGES/oslo_concurrency.po: -------------------------------------------------------------------------------- 1 | # Translations template for oslo.concurrency. 2 | # Copyright (C) 2015 ORGANIZATION 3 | # This file is distributed under the same license as the oslo.concurrency 4 | # project. 5 | # 6 | # Translators: 7 | # Christian Berendt , 2014 8 | # Ettore Atalan , 2014 9 | # Andreas Jaeger , 2016. #zanata 10 | msgid "" 11 | msgstr "" 12 | "Project-Id-Version: oslo.concurrency 3.9.1.dev3\n" 13 | "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" 14 | "POT-Creation-Date: 2016-06-07 17:48+0000\n" 15 | "MIME-Version: 1.0\n" 16 | "Content-Type: text/plain; charset=UTF-8\n" 17 | "Content-Transfer-Encoding: 8bit\n" 18 | "PO-Revision-Date: 2016-06-08 06:36+0000\n" 19 | "Last-Translator: Andreas Jaeger \n" 20 | "Language: de\n" 21 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 22 | "Generated-By: Babel 2.0\n" 23 | "X-Generator: Zanata 3.7.3\n" 24 | "Language-Team: German\n" 25 | 26 | #, python-format 27 | msgid "" 28 | "%(desc)r\n" 29 | "command: %(cmd)r\n" 30 | "exit code: %(code)r\n" 31 | "stdout: %(stdout)r\n" 32 | "stderr: %(stderr)r" 33 | msgstr "" 34 | "%(desc)r\n" 35 | "Kommando: %(cmd)r\n" 36 | "Abschlusscode: %(code)r\n" 37 | "Stdout: %(stdout)r\n" 38 | "Stderr: %(stderr)r" 39 | 40 | #, python-format 41 | msgid "" 42 | "%(description)s\n" 43 | "Command: %(cmd)s\n" 44 | "Exit code: %(exit_code)s\n" 45 | "Stdout: %(stdout)r\n" 46 | "Stderr: %(stderr)r" 47 | msgstr "" 48 | "%(description)s\n" 49 | "Befehl: %(cmd)s.\n" 50 | "Beendigungscode: %(exit_code)s.\n" 51 | "Standardausgabe: %(stdout)r\n" 52 | "Standardfehler: %(stderr)r" 53 | 54 | #, python-format 55 | msgid "%r failed. Not Retrying." 56 | msgstr "%r fehlgeschlagen. Wird nicht wiederholt." 57 | 58 | #, python-format 59 | msgid "%r failed. Retrying." 60 | msgstr "%r fehlgeschlagen. Neuversuch." 61 | 62 | msgid "" 63 | "Calling lockutils directly is no longer supported. Please use the lockutils-" 64 | "wrapper console script instead." 65 | msgstr "" 66 | "Ein direkter Aufruf von lockutils wird nicht mehr unterstützt. Verwenden Sie " 67 | "stattdessen das lockutils-wrapper Konsolescript." 68 | 69 | msgid "Command requested root, but did not specify a root helper." 70 | msgstr "Kommando braucht root, es wurde aber kein root helper spezifiziert." 71 | 72 | msgid "Environment not supported over SSH" 73 | msgstr "Umgebung wird nicht über SSH unterstützt" 74 | 75 | #, python-format 76 | msgid "" 77 | "Got an OSError\n" 78 | "command: %(cmd)r\n" 79 | "errno: %(errno)r" 80 | msgstr "" 81 | "OS Fehler aufgetreten:\n" 82 | "Kommando: %(cmd)r\n" 83 | "Fehlernummer: %(errno)r" 84 | 85 | #, python-format 86 | msgid "Got invalid arg log_errors: %r" 87 | msgstr "Ungültiges Argument für log_errors: %r" 88 | 89 | #, python-format 90 | msgid "Got unknown keyword args: %r" 91 | msgstr "Ungültige Schlüsswelwortargumente: %r" 92 | 93 | #, python-format 94 | msgid "Running cmd (subprocess): %s" 95 | msgstr "Führe Kommando (subprocess) aus: %s" 96 | 97 | msgid "Unexpected error while running command." 98 | msgstr "Unerwarteter Fehler bei der Ausführung des Kommandos." 99 | 100 | msgid "process_input not supported over SSH" 101 | msgstr "process_input wird nicht über SSH unterstützt" 102 | -------------------------------------------------------------------------------- /oslo_concurrency/locale/en_GB/LC_MESSAGES/oslo_concurrency.po: -------------------------------------------------------------------------------- 1 | # Translations template for oslo.concurrency. 2 | # Copyright (C) 2015 ORGANIZATION 3 | # This file is distributed under the same license as the oslo.concurrency 4 | # project. 5 | # 6 | # Translators: 7 | # Andi Chandler , 2014-2015 8 | # Andreas Jaeger , 2016. #zanata 9 | # Andi Chandler , 2017. #zanata 10 | # Andi Chandler , 2020. #zanata 11 | # Andi Chandler , 2022. #zanata 12 | msgid "" 13 | msgstr "" 14 | "Project-Id-Version: oslo.concurrency VERSION\n" 15 | "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" 16 | "POT-Creation-Date: 2022-05-11 16:22+0000\n" 17 | "MIME-Version: 1.0\n" 18 | "Content-Type: text/plain; charset=UTF-8\n" 19 | "Content-Transfer-Encoding: 8bit\n" 20 | "PO-Revision-Date: 2022-06-13 07:42+0000\n" 21 | "Last-Translator: Andi Chandler \n" 22 | "Language: en_GB\n" 23 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 24 | "Generated-By: Babel 2.0\n" 25 | "X-Generator: Zanata 4.3.3\n" 26 | "Language-Team: English (United Kingdom)\n" 27 | 28 | #, python-format 29 | msgid "" 30 | "%(desc)r\n" 31 | "command: %(cmd)r\n" 32 | "exit code: %(code)r\n" 33 | "stdout: %(stdout)r\n" 34 | "stderr: %(stderr)r" 35 | msgstr "" 36 | "%(desc)r\n" 37 | "command: %(cmd)r\n" 38 | "exit code: %(code)r\n" 39 | "stdout: %(stdout)r\n" 40 | "stderr: %(stderr)r" 41 | 42 | #, python-format 43 | msgid "" 44 | "%(description)s\n" 45 | "Command: %(cmd)s\n" 46 | "Exit code: %(exit_code)s\n" 47 | "Stdout: %(stdout)r\n" 48 | "Stderr: %(stderr)r" 49 | msgstr "" 50 | "%(description)s\n" 51 | "Command: %(cmd)s\n" 52 | "Exit code: %(exit_code)s\n" 53 | "Stdout: %(stdout)r\n" 54 | "Stderr: %(stderr)r" 55 | 56 | #, python-format 57 | msgid "%r failed. Not Retrying." 58 | msgstr "%r failed. Not Retrying." 59 | 60 | #, python-format 61 | msgid "%r failed. Retrying." 62 | msgstr "%r failed. Retrying." 63 | 64 | msgid "" 65 | "Calling lockutils directly is no longer supported. Please use the lockutils-" 66 | "wrapper console script instead." 67 | msgstr "" 68 | "Calling lockutils directly is no longer supported. Please use the lockutils-" 69 | "wrapper console script instead." 70 | 71 | msgid "Command requested root, but did not specify a root helper." 72 | msgstr "Command requested root, but did not specify a root helper." 73 | 74 | msgid "Disabling blocking is not supported when using fair locks." 75 | msgstr "Disabling blocking is not supported when using fair locks." 76 | 77 | msgid "Environment not supported over SSH" 78 | msgstr "Environment not supported over SSH" 79 | 80 | #, python-format 81 | msgid "" 82 | "Got an OSError\n" 83 | "command: %(cmd)r\n" 84 | "errno: %(errno)r" 85 | msgstr "" 86 | "Got an OSError\n" 87 | "command: %(cmd)r\n" 88 | "errno: %(errno)r" 89 | 90 | #, python-format 91 | msgid "Got invalid arg log_errors: %r" 92 | msgstr "Got invalid arg log_errors: %r" 93 | 94 | #, python-format 95 | msgid "Got unknown keyword args: %r" 96 | msgstr "Got unknown keyword args: %r" 97 | 98 | msgid "" 99 | "Process resource limits are ignored as this feature is not supported on " 100 | "Windows." 101 | msgstr "" 102 | "Process resource limits are ignored as this feature is not supported on " 103 | "Windows." 104 | 105 | #, python-format 106 | msgid "Running cmd (subprocess): %s" 107 | msgstr "Running cmd (subprocess): %s" 108 | 109 | msgid "Specifying semaphores is not supported when using fair locks." 110 | msgstr "Specifying semaphores is not supported when using fair locks." 111 | 112 | msgid "Unexpected error while running command." 113 | msgstr "Unexpected error while running command." 114 | 115 | msgid "process_input not supported over SSH" 116 | msgstr "process_input not supported over SSH" 117 | -------------------------------------------------------------------------------- /oslo_concurrency/locale/es/LC_MESSAGES/oslo_concurrency.po: -------------------------------------------------------------------------------- 1 | # Translations template for oslo.concurrency. 2 | # Copyright (C) 2015 ORGANIZATION 3 | # This file is distributed under the same license as the oslo.concurrency 4 | # project. 5 | # 6 | # Translators: 7 | # Adriana Chisco Landazábal , 2015 8 | # Andreas Jaeger , 2016. #zanata 9 | msgid "" 10 | msgstr "" 11 | "Project-Id-Version: oslo.concurrency 3.6.1.dev10\n" 12 | "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" 13 | "POT-Creation-Date: 2016-04-19 12:20+0000\n" 14 | "MIME-Version: 1.0\n" 15 | "Content-Type: text/plain; charset=UTF-8\n" 16 | "Content-Transfer-Encoding: 8bit\n" 17 | "PO-Revision-Date: 2015-06-22 09:35+0000\n" 18 | "Last-Translator: Adriana Chisco Landazábal \n" 19 | "Language: es\n" 20 | "Plural-Forms: nplurals=2; plural=(n != 1);\n" 21 | "Generated-By: Babel 2.0\n" 22 | "X-Generator: Zanata 3.7.3\n" 23 | "Language-Team: Spanish\n" 24 | 25 | #, python-format 26 | msgid "" 27 | "%(desc)r\n" 28 | "command: %(cmd)r\n" 29 | "exit code: %(code)r\n" 30 | "stdout: %(stdout)r\n" 31 | "stderr: %(stderr)r" 32 | msgstr "" 33 | "%(desc)r\n" 34 | "comando: %(cmd)r\n" 35 | "código de salida: %(code)r\n" 36 | "stdout: %(stdout)r\n" 37 | "stderr: %(stderr)r" 38 | 39 | #, python-format 40 | msgid "" 41 | "%(description)s\n" 42 | "Command: %(cmd)s\n" 43 | "Exit code: %(exit_code)s\n" 44 | "Stdout: %(stdout)r\n" 45 | "Stderr: %(stderr)r" 46 | msgstr "" 47 | "%(description)s\n" 48 | "Comando: %(cmd)s\n" 49 | "Código de salida: %(exit_code)s\n" 50 | "Stdout: %(stdout)r\n" 51 | "Stderr: %(stderr)r" 52 | 53 | #, python-format 54 | msgid "%r failed. Not Retrying." 55 | msgstr "%r ha fallado. No se está intentando de nuevo." 56 | 57 | #, python-format 58 | msgid "%r failed. Retrying." 59 | msgstr "%r ha fallado. Intentando de nuevo." 60 | 61 | msgid "" 62 | "Calling lockutils directly is no longer supported. Please use the lockutils-" 63 | "wrapper console script instead." 64 | msgstr "" 65 | "Ya no se soporta llamar LockUtil. Por favor utilice a cambio la consola " 66 | "script lockutils-wrapper." 67 | 68 | msgid "Command requested root, but did not specify a root helper." 69 | msgstr "Comando ha solicitado root, pero no especificó un auxiliar root." 70 | 71 | msgid "Environment not supported over SSH" 72 | msgstr "Ambiente no soportado a través de SSH" 73 | 74 | #, python-format 75 | msgid "" 76 | "Got an OSError\n" 77 | "command: %(cmd)r\n" 78 | "errno: %(errno)r" 79 | msgstr "" 80 | "Se obtuvo error de Sistema Operativo\n" 81 | "comando: %(cmd)r\n" 82 | "errno: %(errno)r" 83 | 84 | #, python-format 85 | msgid "Got invalid arg log_errors: %r" 86 | msgstr "Se obtuvo argumento no válido: %r" 87 | 88 | #, python-format 89 | msgid "Got unknown keyword args: %r" 90 | msgstr "Se obtuvieron argumentos de palabra clave: %r" 91 | 92 | #, python-format 93 | msgid "Running cmd (subprocess): %s" 94 | msgstr "Ejecutando cmd (subproceso): %s" 95 | 96 | msgid "Unexpected error while running command." 97 | msgstr "Error inesperado mientras se ejecutaba el comando." 98 | 99 | msgid "process_input not supported over SSH" 100 | msgstr "entrada de proceso no soportada a través de SSH" 101 | -------------------------------------------------------------------------------- /oslo_concurrency/locale/fr/LC_MESSAGES/oslo_concurrency.po: -------------------------------------------------------------------------------- 1 | # Translations template for oslo.concurrency. 2 | # Copyright (C) 2015 ORGANIZATION 3 | # This file is distributed under the same license as the oslo.concurrency 4 | # project. 5 | # 6 | # Translators: 7 | # Maxime COQUEREL , 2015 8 | # Andreas Jaeger , 2016. #zanata 9 | msgid "" 10 | msgstr "" 11 | "Project-Id-Version: oslo.concurrency 3.6.1.dev10\n" 12 | "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" 13 | "POT-Creation-Date: 2016-04-19 12:20+0000\n" 14 | "MIME-Version: 1.0\n" 15 | "Content-Type: text/plain; charset=UTF-8\n" 16 | "Content-Transfer-Encoding: 8bit\n" 17 | "PO-Revision-Date: 2015-06-10 11:06+0000\n" 18 | "Last-Translator: openstackjenkins \n" 19 | "Language: fr\n" 20 | "Plural-Forms: nplurals=2; plural=(n > 1);\n" 21 | "Generated-By: Babel 2.0\n" 22 | "X-Generator: Zanata 3.7.3\n" 23 | "Language-Team: French\n" 24 | 25 | #, python-format 26 | msgid "" 27 | "%(desc)r\n" 28 | "command: %(cmd)r\n" 29 | "exit code: %(code)r\n" 30 | "stdout: %(stdout)r\n" 31 | "stderr: %(stderr)r" 32 | msgstr "" 33 | "%(desc)r\n" 34 | "commande: %(cmd)r\n" 35 | "Code de sortie: %(code)r\n" 36 | "stdout: %(stdout)r\n" 37 | "stderr: %(stderr)r" 38 | 39 | #, python-format 40 | msgid "" 41 | "%(description)s\n" 42 | "Command: %(cmd)s\n" 43 | "Exit code: %(exit_code)s\n" 44 | "Stdout: %(stdout)r\n" 45 | "Stderr: %(stderr)r" 46 | msgstr "" 47 | "%(description)s\n" 48 | "Commande: %(cmd)s\n" 49 | "Code de sortie: %(exit_code)s\n" 50 | "Stdout: %(stdout)r\n" 51 | "Stderr: %(stderr)r" 52 | 53 | #, python-format 54 | msgid "%r failed. Not Retrying." 55 | msgstr "Echec de %r. Nouvelle tentative." 56 | 57 | #, python-format 58 | msgid "%r failed. Retrying." 59 | msgstr "Echec de %r. Nouvelle tentative." 60 | 61 | msgid "" 62 | "Calling lockutils directly is no longer supported. Please use the lockutils-" 63 | "wrapper console script instead." 64 | msgstr "" 65 | "Lockutils appelant directement n'est plus pris en charge. Merci d'utiliser " 66 | "le script de la console lockutils -wrapper à la place." 67 | 68 | msgid "Command requested root, but did not specify a root helper." 69 | msgstr "La commande exigeait root, mais n'indiquait pas comment obtenir root." 70 | 71 | msgid "Environment not supported over SSH" 72 | msgstr "Environnement non prise en charge sur SSH" 73 | 74 | #, python-format 75 | msgid "" 76 | "Got an OSError\n" 77 | "command: %(cmd)r\n" 78 | "errno: %(errno)r" 79 | msgstr "" 80 | "Erreur du Système\n" 81 | "commande: %(cmd)r\n" 82 | "errno: %(errno)r" 83 | 84 | #, python-format 85 | msgid "Got invalid arg log_errors: %r" 86 | msgstr "Argument reçu non valide log_errors: %r" 87 | 88 | #, python-format 89 | msgid "Got unknown keyword args: %r" 90 | msgstr "Ags, mot clé inconnu: %r" 91 | 92 | #, python-format 93 | msgid "Running cmd (subprocess): %s" 94 | msgstr "Exécution de la commande (sous-processus): %s" 95 | 96 | msgid "Unexpected error while running command." 97 | msgstr "Erreur inattendue lors de l’exécution de la commande." 98 | 99 | msgid "process_input not supported over SSH" 100 | msgstr "process_input non pris en charge sur SSH" 101 | -------------------------------------------------------------------------------- /oslo_concurrency/lockutils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2011 OpenStack Foundation. 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | import contextlib 17 | import errno 18 | import functools 19 | import logging 20 | import os 21 | import shutil 22 | import subprocess 23 | import sys 24 | import tempfile 25 | import threading 26 | import weakref 27 | 28 | import debtcollector 29 | import fasteners 30 | from oslo_config import cfg 31 | from oslo_utils import reflection 32 | from oslo_utils import timeutils 33 | 34 | from oslo_concurrency._i18n import _ 35 | 36 | try: 37 | # import eventlet optionally 38 | import eventlet 39 | from eventlet import patcher as eventlet_patcher 40 | except ImportError: 41 | eventlet = None 42 | eventlet_patcher = None 43 | 44 | 45 | LOG = logging.getLogger(__name__) 46 | 47 | 48 | _opts = [ 49 | cfg.BoolOpt('disable_process_locking', default=False, 50 | help='Enables or disables inter-process locks.'), 51 | cfg.StrOpt('lock_path', 52 | default=os.environ.get("OSLO_LOCK_PATH"), 53 | help='Directory to use for lock files. For security, the ' 54 | 'specified directory should only be writable by the user ' 55 | 'running the processes that need locking. ' 56 | 'Defaults to environment variable OSLO_LOCK_PATH. ' 57 | 'If external locks are used, a lock path must be set.') 58 | ] 59 | 60 | 61 | def _register_opts(conf): 62 | conf.register_opts(_opts, group='oslo_concurrency') 63 | 64 | 65 | CONF = cfg.CONF 66 | _register_opts(CONF) 67 | 68 | 69 | def set_defaults(lock_path): 70 | """Set value for lock_path. 71 | 72 | This can be used by tests to set lock_path to a temporary directory. 73 | """ 74 | cfg.set_defaults(_opts, lock_path=lock_path) 75 | 76 | 77 | def get_lock_path(conf): 78 | """Return the path used for external file-based locks. 79 | 80 | :param conf: Configuration object 81 | :type conf: oslo_config.cfg.ConfigOpts 82 | 83 | .. versionadded:: 1.8 84 | """ 85 | _register_opts(conf) 86 | return conf.oslo_concurrency.lock_path 87 | 88 | 89 | class ReaderWriterLock(fasteners.ReaderWriterLock): 90 | """A reader/writer lock. 91 | 92 | .. versionadded:: 0.4 93 | """ 94 | def __init__(self, *args, **kwargs): 95 | super().__init__(*args, **kwargs) 96 | # Until https://github.com/eventlet/eventlet/issues/731 is resolved 97 | # we need to use eventlet.getcurrent instead of 98 | # threading.current_thread if we are running in a monkey patched 99 | # environment 100 | if eventlet is not None and eventlet_patcher is not None: 101 | if eventlet_patcher.is_monkey_patched('thread'): 102 | debtcollector.deprecate( 103 | "Eventlet support is deprecated and will be removed.") 104 | self._current_thread = eventlet.getcurrent 105 | 106 | 107 | InterProcessLock = fasteners.InterProcessLock 108 | 109 | 110 | class FairLocks: 111 | """A garbage collected container of fair locks. 112 | 113 | With a fair lock, contending lockers will get the lock in the order in 114 | which they tried to acquire it. 115 | 116 | This collection internally uses a weak value dictionary so that when a 117 | lock is no longer in use (by any threads) it will automatically be 118 | removed from this container by the garbage collector. 119 | """ 120 | 121 | def __init__(self): 122 | self._locks = weakref.WeakValueDictionary() 123 | self._lock = threading.Lock() 124 | 125 | def get(self, name): 126 | """Gets (or creates) a lock with a given name. 127 | 128 | :param name: The lock name to get/create (used to associate 129 | previously created names with the same lock). 130 | 131 | Returns an newly constructed lock (or an existing one if it was 132 | already created for the given name). 133 | """ 134 | with self._lock: 135 | try: 136 | return self._locks[name] 137 | except KeyError: 138 | # The fasteners module specifies that 139 | # ReaderWriterLock.write_lock() will give FIFO behaviour, 140 | # so we don't need to do anything special ourselves. 141 | rwlock = ReaderWriterLock() 142 | self._locks[name] = rwlock 143 | return rwlock 144 | 145 | 146 | _fair_locks = FairLocks() 147 | 148 | 149 | def internal_fair_lock(name): 150 | return _fair_locks.get(name) 151 | 152 | 153 | class Semaphores: 154 | """A garbage collected container of semaphores. 155 | 156 | This collection internally uses a weak value dictionary so that when a 157 | semaphore is no longer in use (by any threads) it will automatically be 158 | removed from this container by the garbage collector. 159 | 160 | .. versionadded:: 0.3 161 | """ 162 | 163 | def __init__(self): 164 | self._semaphores = weakref.WeakValueDictionary() 165 | self._lock = threading.Lock() 166 | 167 | def get(self, name): 168 | """Gets (or creates) a semaphore with a given name. 169 | 170 | :param name: The semaphore name to get/create (used to associate 171 | previously created names with the same semaphore). 172 | 173 | Returns an newly constructed semaphore (or an existing one if it was 174 | already created for the given name). 175 | """ 176 | with self._lock: 177 | try: 178 | return self._semaphores[name] 179 | except KeyError: 180 | sem = threading.Semaphore() 181 | self._semaphores[name] = sem 182 | return sem 183 | 184 | def __len__(self): 185 | """Returns how many semaphores exist at the current time.""" 186 | return len(self._semaphores) 187 | 188 | 189 | _semaphores = Semaphores() 190 | 191 | 192 | def _get_lock_path(name, lock_file_prefix, lock_path=None): 193 | # NOTE(mikal): the lock name cannot contain directory 194 | # separators 195 | name = name.replace(os.sep, '_') 196 | if lock_file_prefix: 197 | sep = '' if lock_file_prefix.endswith('-') else '-' 198 | name = '{}{}{}'.format(lock_file_prefix, sep, name) 199 | 200 | local_lock_path = lock_path or CONF.oslo_concurrency.lock_path 201 | 202 | if not local_lock_path: 203 | raise cfg.RequiredOptError('lock_path', 'oslo_concurrency') 204 | 205 | return os.path.join(local_lock_path, name) 206 | 207 | 208 | def external_lock(name, lock_file_prefix=None, lock_path=None): 209 | lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) 210 | 211 | return InterProcessLock(lock_file_path) 212 | 213 | 214 | def remove_external_lock_file(name, lock_file_prefix=None, lock_path=None, 215 | semaphores=None): 216 | """Remove an external lock file when it's not used anymore 217 | This will be helpful when we have a lot of lock files 218 | """ 219 | with internal_lock(name, semaphores=semaphores): 220 | lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) 221 | try: 222 | os.remove(lock_file_path) 223 | except OSError as exc: 224 | if exc.errno != errno.ENOENT: 225 | LOG.warning('Failed to remove file %(file)s', 226 | {'file': lock_file_path}) 227 | 228 | 229 | class AcquireLockFailedException(Exception): 230 | def __init__(self, lock_name): 231 | self.message = "Failed to acquire the lock %s" % lock_name 232 | 233 | def __str__(self): 234 | return self.message 235 | 236 | 237 | def internal_lock(name, semaphores=None, blocking=True): 238 | @contextlib.contextmanager 239 | def nonblocking(lock): 240 | """Try to acquire the internal lock without blocking.""" 241 | if not lock.acquire(blocking=False): 242 | raise AcquireLockFailedException(name) 243 | try: 244 | yield lock 245 | finally: 246 | lock.release() 247 | 248 | if semaphores is None: 249 | semaphores = _semaphores 250 | lock = semaphores.get(name) 251 | 252 | return nonblocking(lock) if not blocking else lock 253 | 254 | 255 | @contextlib.contextmanager 256 | def lock(name, lock_file_prefix=None, external=False, lock_path=None, 257 | do_log=True, semaphores=None, delay=0.01, fair=False, blocking=True): 258 | """Context based lock 259 | 260 | This function yields a `threading.Semaphore` instance (if we don't use 261 | eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is 262 | True, in which case, it'll yield an InterProcessLock instance. 263 | 264 | :param lock_file_prefix: The lock_file_prefix argument is used to provide 265 | lock files on disk with a meaningful prefix. 266 | 267 | :param external: The external keyword argument denotes whether this lock 268 | should work across multiple processes. This means that if two different 269 | workers both run a method decorated with @synchronized('mylock', 270 | external=True), only one of them will execute at a time. 271 | 272 | :param lock_path: The path in which to store external lock files. For 273 | external locking to work properly, this must be the same for all 274 | references to the lock. 275 | 276 | :param do_log: Whether to log acquire/release messages. This is primarily 277 | intended to reduce log message duplication when `lock` is used from the 278 | `synchronized` decorator. 279 | 280 | :param semaphores: Container that provides semaphores to use when locking. 281 | This ensures that threads inside the same application can not collide, 282 | due to the fact that external process locks are unaware of a processes 283 | active threads. 284 | 285 | :param delay: Delay between acquisition attempts (in seconds). 286 | 287 | :param fair: Whether or not we want a "fair" lock where contending lockers 288 | will get the lock in the order in which they tried to acquire it. 289 | 290 | :param blocking: Whether to wait forever to try to acquire the lock. 291 | Incompatible with fair locks because those provided by the fasteners 292 | module doesn't implements a non-blocking behavior. 293 | 294 | .. versionchanged:: 0.2 295 | Added *do_log* optional parameter. 296 | 297 | .. versionchanged:: 0.3 298 | Added *delay* and *semaphores* optional parameters. 299 | """ 300 | if fair: 301 | if semaphores is not None: 302 | raise NotImplementedError(_('Specifying semaphores is not ' 303 | 'supported when using fair locks.')) 304 | if blocking is not True: 305 | raise NotImplementedError(_('Disabling blocking is not supported ' 306 | 'when using fair locks.')) 307 | # The fasteners module specifies that write_lock() provides fairness. 308 | int_lock = internal_fair_lock(name).write_lock() 309 | else: 310 | int_lock = internal_lock(name, semaphores=semaphores, 311 | blocking=blocking) 312 | if do_log: 313 | LOG.debug('Acquiring lock "%s"', name) 314 | with int_lock: 315 | if do_log: 316 | LOG.debug('Acquired lock "%(lock)s"', {'lock': name}) 317 | try: 318 | if external and not CONF.oslo_concurrency.disable_process_locking: 319 | ext_lock = external_lock(name, lock_file_prefix, lock_path) 320 | gotten = ext_lock.acquire(delay=delay, blocking=blocking) 321 | if not gotten: 322 | raise AcquireLockFailedException(name) 323 | if do_log: 324 | LOG.debug('Acquired external semaphore "%(lock)s"', 325 | {'lock': name}) 326 | try: 327 | yield ext_lock 328 | finally: 329 | ext_lock.release() 330 | else: 331 | yield int_lock 332 | finally: 333 | if do_log: 334 | LOG.debug('Releasing lock "%(lock)s"', {'lock': name}) 335 | 336 | 337 | def lock_with_prefix(lock_file_prefix): 338 | """Partial object generator for the lock context manager. 339 | 340 | Redefine lock in each project like so:: 341 | 342 | (in nova/utils.py) 343 | from oslo_concurrency import lockutils 344 | 345 | _prefix = 'nova' 346 | lock = lockutils.lock_with_prefix(_prefix) 347 | lock_cleanup = lockutils.remove_external_lock_file_with_prefix(_prefix) 348 | 349 | 350 | (in nova/foo.py) 351 | from nova import utils 352 | 353 | with utils.lock('mylock'): 354 | ... 355 | 356 | Eventually clean up with:: 357 | 358 | lock_cleanup('mylock') 359 | 360 | :param lock_file_prefix: A string used to provide lock files on disk with a 361 | meaningful prefix. Will be separated from the lock name with a hyphen, 362 | which may optionally be included in the lock_file_prefix (e.g. 363 | ``'nova'`` and ``'nova-'`` are equivalent). 364 | """ 365 | return functools.partial(lock, lock_file_prefix=lock_file_prefix) 366 | 367 | 368 | def synchronized(name, lock_file_prefix=None, external=False, lock_path=None, 369 | semaphores=None, delay=0.01, fair=False, blocking=True): 370 | """Synchronization decorator. 371 | 372 | Decorating a method like so:: 373 | 374 | @synchronized('mylock') 375 | def foo(self, *args): 376 | ... 377 | 378 | ensures that only one thread will execute the foo method at a time. 379 | 380 | Different methods can share the same lock:: 381 | 382 | @synchronized('mylock') 383 | def foo(self, *args): 384 | ... 385 | 386 | @synchronized('mylock') 387 | def bar(self, *args): 388 | ... 389 | 390 | This way only one of either foo or bar can be executing at a time. 391 | 392 | .. versionchanged:: 0.3 393 | Added *delay* and *semaphores* optional parameter. 394 | """ 395 | 396 | def wrap(f): 397 | 398 | @functools.wraps(f) 399 | def inner(*args, **kwargs): 400 | t1 = timeutils.now() 401 | t2 = None 402 | gotten = True 403 | f_name = reflection.get_callable_name(f) 404 | try: 405 | LOG.debug('Acquiring lock "%s" by "%s"', name, f_name) 406 | with lock(name, lock_file_prefix, external, lock_path, 407 | do_log=False, semaphores=semaphores, delay=delay, 408 | fair=fair, blocking=blocking): 409 | t2 = timeutils.now() 410 | LOG.debug('Lock "%(name)s" acquired by "%(function)s" :: ' 411 | 'waited %(wait_secs)0.3fs', 412 | {'name': name, 413 | 'function': f_name, 414 | 'wait_secs': (t2 - t1)}) 415 | return f(*args, **kwargs) 416 | except AcquireLockFailedException: 417 | gotten = False 418 | finally: 419 | t3 = timeutils.now() 420 | if t2 is None: 421 | held_secs = "N/A" 422 | else: 423 | held_secs = "%0.3fs" % (t3 - t2) 424 | LOG.debug('Lock "%(name)s" "%(gotten)s" by "%(function)s" ::' 425 | ' held %(held_secs)s', 426 | {'name': name, 427 | 'gotten': 'released' if gotten else 'unacquired', 428 | 'function': f_name, 429 | 'held_secs': held_secs}) 430 | return inner 431 | 432 | return wrap 433 | 434 | 435 | def synchronized_with_prefix(lock_file_prefix): 436 | """Partial object generator for the synchronization decorator. 437 | 438 | Redefine @synchronized in each project like so:: 439 | 440 | (in nova/utils.py) 441 | from oslo_concurrency import lockutils 442 | 443 | _prefix = 'nova' 444 | synchronized = lockutils.synchronized_with_prefix(_prefix) 445 | lock_cleanup = lockutils.remove_external_lock_file_with_prefix(_prefix) 446 | 447 | 448 | (in nova/foo.py) 449 | from nova import utils 450 | 451 | @utils.synchronized('mylock') 452 | def bar(self, *args): 453 | ... 454 | 455 | Eventually clean up with:: 456 | 457 | lock_cleanup('mylock') 458 | 459 | :param lock_file_prefix: A string used to provide lock files on disk with a 460 | meaningful prefix. Will be separated from the lock name with a hyphen, 461 | which may optionally be included in the lock_file_prefix (e.g. 462 | ``'nova'`` and ``'nova-'`` are equivalent). 463 | """ 464 | 465 | return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) 466 | 467 | 468 | def remove_external_lock_file_with_prefix(lock_file_prefix): 469 | """Partial object generator for the remove lock file function. 470 | 471 | Redefine remove_external_lock_file_with_prefix in each project like so:: 472 | 473 | (in nova/utils.py) 474 | from oslo_concurrency import lockutils 475 | 476 | _prefix = 'nova' 477 | synchronized = lockutils.synchronized_with_prefix(_prefix) 478 | lock = lockutils.lock_with_prefix(_prefix) 479 | lock_cleanup = lockutils.remove_external_lock_file_with_prefix(_prefix) 480 | 481 | (in nova/foo.py) 482 | from nova import utils 483 | 484 | @utils.synchronized('mylock') 485 | def bar(self, *args): 486 | ... 487 | 488 | def baz(self, *args): 489 | ... 490 | with utils.lock('mylock'): 491 | ... 492 | ... 493 | 494 | 495 | 496 | The lock_file_prefix argument is used to provide lock files on disk with a 497 | meaningful prefix. 498 | """ 499 | return functools.partial(remove_external_lock_file, 500 | lock_file_prefix=lock_file_prefix) 501 | 502 | 503 | def _lock_wrapper(argv): 504 | """Create a dir for locks and pass it to command from arguments 505 | 506 | This is exposed as a console script entry point named 507 | lockutils-wrapper 508 | 509 | If you run this: 510 | lockutils-wrapper stestr run 511 | 512 | a temporary directory will be created for all your locks and passed to all 513 | your tests in an environment variable. The temporary dir will be deleted 514 | afterwards and the return value will be preserved. 515 | """ 516 | 517 | lock_dir = tempfile.mkdtemp() 518 | os.environ["OSLO_LOCK_PATH"] = lock_dir 519 | try: 520 | ret_val = subprocess.call(argv[1:]) 521 | finally: 522 | shutil.rmtree(lock_dir, ignore_errors=True) 523 | return ret_val 524 | 525 | 526 | def main(): 527 | sys.exit(_lock_wrapper(sys.argv)) 528 | 529 | 530 | if __name__ == '__main__': 531 | raise NotImplementedError(_('Calling lockutils directly is no longer ' 532 | 'supported. Please use the ' 533 | 'lockutils-wrapper console script instead.')) 534 | -------------------------------------------------------------------------------- /oslo_concurrency/opts.py: -------------------------------------------------------------------------------- 1 | # Copyright 2014 Red Hat, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import copy 16 | 17 | from oslo_concurrency import lockutils 18 | 19 | __all__ = [ 20 | 'list_opts', 21 | ] 22 | 23 | 24 | def list_opts(): 25 | """Return a list of oslo.config options available in the library. 26 | 27 | The returned list includes all oslo.config options which may be registered 28 | at runtime by the library. 29 | 30 | Each element of the list is a tuple. The first element is the name of the 31 | group under which the list of elements in the second element will be 32 | registered. A group name of None corresponds to the [DEFAULT] group in 33 | config files. 34 | 35 | This function is also discoverable via the 'oslo_concurrency' entry point 36 | under the 'oslo.config.opts' namespace. 37 | 38 | The purpose of this is to allow tools like the Oslo sample config file 39 | generator to discover the options exposed to users by this library. 40 | 41 | :returns: a list of (group_name, opts) tuples 42 | """ 43 | return [('oslo_concurrency', copy.deepcopy(lockutils._opts))] 44 | -------------------------------------------------------------------------------- /oslo_concurrency/prlimit.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Red Hat. 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | import argparse 17 | import os 18 | import resource 19 | import shutil 20 | import sys 21 | 22 | USAGE_PROGRAM = ('%s -m oslo_concurrency.prlimit' 23 | % os.path.basename(sys.executable)) 24 | 25 | RESOURCES = ( 26 | # argparse argument => resource 27 | ('as', resource.RLIMIT_AS), 28 | ('core', resource.RLIMIT_CORE), 29 | ('cpu', resource.RLIMIT_CPU), 30 | ('data', resource.RLIMIT_DATA), 31 | ('fsize', resource.RLIMIT_FSIZE), 32 | ('memlock', resource.RLIMIT_MEMLOCK), 33 | ('nofile', resource.RLIMIT_NOFILE), 34 | ('nproc', resource.RLIMIT_NPROC), 35 | ('rss', resource.RLIMIT_RSS), 36 | ('stack', resource.RLIMIT_STACK), 37 | ) 38 | 39 | 40 | def parse_args(): 41 | parser = argparse.ArgumentParser(description='prlimit', prog=USAGE_PROGRAM) 42 | parser.add_argument('--as', type=int, 43 | help='Address space limit in bytes') 44 | parser.add_argument('--core', type=int, 45 | help='Core file size limit in bytes') 46 | parser.add_argument('--cpu', type=int, 47 | help='CPU time limit in seconds') 48 | parser.add_argument('--data', type=int, 49 | help='Data size limit in bytes') 50 | parser.add_argument('--fsize', type=int, 51 | help='File size limit in bytes') 52 | parser.add_argument('--memlock', type=int, 53 | help='Locked memory limit in bytes') 54 | parser.add_argument('--nofile', type=int, 55 | help='Maximum number of open files') 56 | parser.add_argument('--nproc', type=int, 57 | help='Maximum number of processes') 58 | parser.add_argument('--rss', type=int, 59 | help='Maximum Resident Set Size (RSS) in bytes') 60 | parser.add_argument('--stack', type=int, 61 | help='Stack size limit in bytes') 62 | parser.add_argument('program', 63 | help='Program (absolute path)') 64 | parser.add_argument('program_args', metavar="arg", nargs='...', 65 | help='Program parameters') 66 | 67 | args = parser.parse_args() 68 | return args 69 | 70 | 71 | def main(): 72 | args = parse_args() 73 | 74 | program = args.program 75 | if not os.path.isabs(program): 76 | # program uses a relative path: try to find the absolute path 77 | # to the executable 78 | program_abs = shutil.which(program) 79 | if program_abs: 80 | program = program_abs 81 | 82 | for arg_name, rlimit in RESOURCES: 83 | value = getattr(args, arg_name) 84 | if value is None: 85 | continue 86 | try: 87 | resource.setrlimit(rlimit, (value, value)) 88 | except ValueError as exc: 89 | print("%s: failed to set the %s resource limit: %s" 90 | % (USAGE_PROGRAM, arg_name.upper(), exc), 91 | file=sys.stderr) 92 | sys.exit(1) 93 | 94 | try: 95 | os.execv(program, [program] + args.program_args) 96 | except Exception as exc: 97 | print("%s: failed to execute %s: %s" 98 | % (USAGE_PROGRAM, program, exc), 99 | file=sys.stderr) 100 | sys.exit(1) 101 | 102 | 103 | if __name__ == "__main__": 104 | main() 105 | -------------------------------------------------------------------------------- /oslo_concurrency/processutils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2011 OpenStack Foundation. 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | """ 17 | System-level utilities and helper functions. 18 | """ 19 | 20 | import functools 21 | import logging 22 | import multiprocessing 23 | import os 24 | import random 25 | import shlex 26 | import signal 27 | import subprocess 28 | import sys 29 | import time 30 | 31 | import enum 32 | from oslo_utils import encodeutils 33 | from oslo_utils import strutils 34 | from oslo_utils import timeutils 35 | 36 | from oslo_concurrency._i18n import _ 37 | 38 | 39 | LOG = logging.getLogger(__name__) 40 | 41 | 42 | class InvalidArgumentError(Exception): 43 | def __init__(self, message=None): 44 | super().__init__(message) 45 | 46 | 47 | class UnknownArgumentError(Exception): 48 | def __init__(self, message=None): 49 | super().__init__(message) 50 | 51 | 52 | class ProcessExecutionError(Exception): 53 | def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, 54 | description=None): 55 | super().__init__( 56 | stdout, stderr, exit_code, cmd, description) 57 | self.exit_code = exit_code 58 | self.stderr = stderr 59 | self.stdout = stdout 60 | self.cmd = cmd 61 | self.description = description 62 | 63 | def __str__(self): 64 | description = self.description 65 | if description is None: 66 | description = _("Unexpected error while running command.") 67 | 68 | exit_code = self.exit_code 69 | if exit_code is None: 70 | exit_code = '-' 71 | 72 | message = _('%(description)s\n' 73 | 'Command: %(cmd)s\n' 74 | 'Exit code: %(exit_code)s\n' 75 | 'Stdout: %(stdout)r\n' 76 | 'Stderr: %(stderr)r') % {'description': description, 77 | 'cmd': self.cmd, 78 | 'exit_code': exit_code, 79 | 'stdout': self.stdout, 80 | 'stderr': self.stderr} 81 | return message 82 | 83 | 84 | class NoRootWrapSpecified(Exception): 85 | def __init__(self, message=None): 86 | super().__init__(message) 87 | 88 | 89 | def _subprocess_setup(on_preexec_fn): 90 | # Python installs a SIGPIPE handler by default. This is usually not what 91 | # non-Python subprocesses expect. 92 | signal.signal(signal.SIGPIPE, signal.SIG_DFL) 93 | if on_preexec_fn: 94 | on_preexec_fn() 95 | 96 | 97 | @enum.unique 98 | class LogErrors(enum.IntEnum): 99 | """Enumerations that affect if stdout and stderr are logged on error. 100 | 101 | .. versionadded:: 2.7 102 | """ 103 | 104 | #: No logging on errors. 105 | DEFAULT = 0 106 | 107 | #: Log an error on **each** occurence of an error. 108 | ALL = 1 109 | 110 | #: Log an error on the last attempt that errored **only**. 111 | FINAL = 2 112 | 113 | 114 | # Retain these aliases for a number of releases... 115 | LOG_ALL_ERRORS = LogErrors.ALL 116 | LOG_FINAL_ERROR = LogErrors.FINAL 117 | LOG_DEFAULT_ERROR = LogErrors.DEFAULT 118 | 119 | 120 | class ProcessLimits: 121 | """Resource limits on a process. 122 | 123 | Attributes: 124 | 125 | * address_space: Address space limit in bytes 126 | * core_file_size: Core file size limit in bytes 127 | * cpu_time: CPU time limit in seconds 128 | * data_size: Data size limit in bytes 129 | * file_size: File size limit in bytes 130 | * memory_locked: Locked memory limit in bytes 131 | * number_files: Maximum number of open files 132 | * number_processes: Maximum number of processes 133 | * resident_set_size: Maximum Resident Set Size (RSS) in bytes 134 | * stack_size: Stack size limit in bytes 135 | 136 | This object can be used for the *prlimit* parameter of :func:`execute`. 137 | """ 138 | 139 | _LIMITS = { 140 | "address_space": "--as", 141 | "core_file_size": "--core", 142 | "cpu_time": "--cpu", 143 | "data_size": "--data", 144 | "file_size": "--fsize", 145 | "memory_locked": "--memlock", 146 | "number_files": "--nofile", 147 | "number_processes": "--nproc", 148 | "resident_set_size": "--rss", 149 | "stack_size": "--stack", 150 | } 151 | 152 | def __init__(self, **kw): 153 | for limit in self._LIMITS: 154 | setattr(self, limit, kw.pop(limit, None)) 155 | 156 | if kw: 157 | raise ValueError("invalid limits: %s" 158 | % ', '.join(sorted(kw.keys()))) 159 | 160 | def prlimit_args(self): 161 | """Create a list of arguments for the prlimit command line.""" 162 | args = [] 163 | for limit in self._LIMITS: 164 | val = getattr(self, limit) 165 | if val is not None: 166 | args.append("{}={}".format(self._LIMITS[limit], val)) 167 | return args 168 | 169 | 170 | def execute(*cmd, **kwargs): 171 | """Helper method to shell out and execute a command through subprocess. 172 | 173 | Allows optional retry. 174 | 175 | :param cmd: Passed to subprocess.Popen. 176 | :type cmd: string 177 | :param cwd: Set the current working directory 178 | :type cwd: string 179 | :param process_input: Send to opened process. 180 | :type process_input: string or bytes 181 | :param env_variables: Environment variables and their values that 182 | will be set for the process. 183 | :type env_variables: dict 184 | :param check_exit_code: Single bool, int, or list of allowed exit 185 | codes. Defaults to [0]. Raise 186 | :class:`ProcessExecutionError` unless 187 | program exits with one of these code. 188 | :type check_exit_code: boolean, int, or [int] 189 | :param delay_on_retry: True | False. Defaults to True. If set to True, 190 | wait a short amount of time before retrying. 191 | :type delay_on_retry: boolean 192 | :param attempts: How many times to retry cmd. 193 | :type attempts: int 194 | :param run_as_root: True | False. Defaults to False. If set to True, 195 | the command is prefixed by the command specified 196 | in the root_helper kwarg. 197 | :type run_as_root: boolean 198 | :param root_helper: command to prefix to commands called with 199 | run_as_root=True 200 | :type root_helper: string 201 | :param shell: whether or not there should be a shell used to 202 | execute this command. Defaults to false. 203 | :type shell: boolean 204 | :param loglevel: log level for execute commands. 205 | :type loglevel: int. (Should be logging.DEBUG or logging.INFO) 206 | :param log_errors: Should stdout and stderr be logged on error? 207 | Possible values are 208 | :py:attr:`~.LogErrors.DEFAULT`, 209 | :py:attr:`~.LogErrors.FINAL`, or 210 | :py:attr:`~.LogErrors.ALL`. Note that the 211 | values :py:attr:`~.LogErrors.FINAL` and 212 | :py:attr:`~.LogErrors.ALL` 213 | are **only** relevant when multiple attempts of 214 | command execution are requested using the 215 | ``attempts`` parameter. 216 | :type log_errors: :py:class:`~.LogErrors` 217 | :param binary: On Python 3, return stdout and stderr as bytes if 218 | binary is True, as Unicode otherwise. 219 | :type binary: boolean 220 | :param on_execute: This function will be called upon process creation 221 | with the object as a argument. The Purpose of this 222 | is to allow the caller of `processutils.execute` to 223 | track process creation asynchronously. 224 | :type on_execute: function(:class:`subprocess.Popen`) 225 | :param on_completion: This function will be called upon process 226 | completion with the object as a argument. The 227 | Purpose of this is to allow the caller of 228 | `processutils.execute` to track process completion 229 | asynchronously. 230 | :type on_completion: function(:class:`subprocess.Popen`) 231 | :param preexec_fn: This function will be called 232 | in the child process just before the child 233 | is executed. WARNING: On windows, we silently 234 | drop this preexec_fn as it is not supported by 235 | subprocess.Popen on windows (throws a 236 | ValueError) 237 | :type preexec_fn: function() 238 | :param prlimit: Set resource limits on the child process. See 239 | below for a detailed description. 240 | :type prlimit: :class:`ProcessLimits` 241 | :param python_exec: The python executable to use for enforcing 242 | prlimits. If this is not set or is None, it will 243 | default to use sys.executable. 244 | :type python_exec: string 245 | :param timeout: Timeout (in seconds) to wait for the process 246 | termination. If timeout is reached, 247 | :class:`subprocess.TimeoutExpired` is raised. 248 | :type timeout: int 249 | :returns: (stdout, stderr) from process execution 250 | :raises: :class:`UnknownArgumentError` on 251 | receiving unknown arguments 252 | :raises: :class:`ProcessExecutionError` 253 | :raises: :class:`OSError` 254 | :raises: :class:`subprocess.TimeoutExpired` 255 | 256 | The *prlimit* parameter can be used to set resource limits on the child 257 | process. If this parameter is used, the child process will be spawned by a 258 | wrapper process which will set limits before spawning the command. 259 | 260 | .. versionchanged:: 3.17 261 | *process_input* can now be either bytes or string on python3. 262 | 263 | .. versionchanged:: 3.4 264 | Added *prlimit* optional parameter. 265 | 266 | .. versionchanged:: 1.5 267 | Added *cwd* optional parameter. 268 | 269 | .. versionchanged:: 1.9 270 | Added *binary* optional parameter. On Python 3, *stdout* and *stderr* 271 | are now returned as Unicode strings by default, or bytes if *binary* is 272 | true. 273 | 274 | .. versionchanged:: 2.1 275 | Added *on_execute* and *on_completion* optional parameters. 276 | 277 | .. versionchanged:: 2.3 278 | Added *preexec_fn* optional parameter. 279 | """ 280 | 281 | cwd = kwargs.pop('cwd', None) 282 | process_input = kwargs.pop('process_input', None) 283 | if process_input is not None: 284 | process_input = encodeutils.to_utf8(process_input) 285 | env_variables = kwargs.pop('env_variables', None) 286 | check_exit_code = kwargs.pop('check_exit_code', [0]) 287 | ignore_exit_code = False 288 | delay_on_retry = kwargs.pop('delay_on_retry', True) 289 | attempts = kwargs.pop('attempts', 1) 290 | run_as_root = kwargs.pop('run_as_root', False) 291 | root_helper = kwargs.pop('root_helper', '') 292 | shell = kwargs.pop('shell', False) 293 | loglevel = kwargs.pop('loglevel', logging.DEBUG) 294 | log_errors = kwargs.pop('log_errors', None) 295 | if log_errors is None: 296 | log_errors = LogErrors.DEFAULT 297 | binary = kwargs.pop('binary', False) 298 | on_execute = kwargs.pop('on_execute', None) 299 | on_completion = kwargs.pop('on_completion', None) 300 | preexec_fn = kwargs.pop('preexec_fn', None) 301 | prlimit = kwargs.pop('prlimit', None) 302 | python_exec = kwargs.pop('python_exec', None) or sys.executable 303 | timeout = kwargs.pop('timeout', None) 304 | 305 | if isinstance(check_exit_code, bool): 306 | ignore_exit_code = not check_exit_code 307 | check_exit_code = [0] 308 | elif isinstance(check_exit_code, int): 309 | check_exit_code = [check_exit_code] 310 | 311 | if kwargs: 312 | raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs) 313 | 314 | if isinstance(log_errors, int): 315 | log_errors = LogErrors(log_errors) 316 | if not isinstance(log_errors, LogErrors): 317 | raise InvalidArgumentError(_('Got invalid arg log_errors: %r') % 318 | log_errors) 319 | 320 | if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: 321 | if not root_helper: 322 | raise NoRootWrapSpecified( 323 | message=_('Command requested root, but did not ' 324 | 'specify a root helper.')) 325 | if shell: 326 | # root helper has to be injected into the command string 327 | cmd = [' '.join((root_helper, cmd[0]))] + list(cmd[1:]) 328 | else: 329 | # root helper has to be tokenized into argument list 330 | cmd = shlex.split(root_helper) + list(cmd) 331 | 332 | cmd = [str(c) for c in cmd] 333 | 334 | if prlimit: 335 | args = [python_exec, '-m', 'oslo_concurrency.prlimit'] 336 | args.extend(prlimit.prlimit_args()) 337 | args.append('--') 338 | args.extend(cmd) 339 | cmd = args 340 | 341 | sanitized_cmd = strutils.mask_password(' '.join(cmd)) 342 | 343 | watch = timeutils.StopWatch() 344 | while attempts > 0: 345 | attempts -= 1 346 | watch.restart() 347 | 348 | try: 349 | LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) 350 | _PIPE = subprocess.PIPE # pylint: disable=E1101 351 | 352 | on_preexec_fn = functools.partial(_subprocess_setup, preexec_fn) 353 | 354 | obj = subprocess.Popen(cmd, 355 | stdin=_PIPE, 356 | stdout=_PIPE, 357 | stderr=_PIPE, 358 | close_fds=True, 359 | preexec_fn=on_preexec_fn, 360 | shell=shell, 361 | cwd=cwd, 362 | env=env_variables) # nosec:B602 363 | 364 | if on_execute: 365 | on_execute(obj) 366 | 367 | try: 368 | result = obj.communicate(process_input, timeout=timeout) 369 | 370 | obj.stdin.close() # pylint: disable=E1101 371 | _returncode = obj.returncode # pylint: disable=E1101 372 | LOG.log(loglevel, 'CMD "%s" returned: %s in %0.3fs', 373 | sanitized_cmd, _returncode, watch.elapsed()) 374 | except subprocess.TimeoutExpired: 375 | LOG.log(loglevel, 'CMD "%s" reached timeout in %0.3fs', 376 | sanitized_cmd, watch.elapsed()) 377 | raise 378 | finally: 379 | if on_completion: 380 | on_completion(obj) 381 | 382 | if not ignore_exit_code and _returncode not in check_exit_code: 383 | (stdout, stderr) = result 384 | stdout = os.fsdecode(stdout) 385 | stderr = os.fsdecode(stderr) 386 | sanitized_stdout = strutils.mask_password(stdout) 387 | sanitized_stderr = strutils.mask_password(stderr) 388 | raise ProcessExecutionError(exit_code=_returncode, 389 | stdout=sanitized_stdout, 390 | stderr=sanitized_stderr, 391 | cmd=sanitized_cmd) 392 | if not binary and result is not None: 393 | (stdout, stderr) = result 394 | # Decode from the locale using using the surrogateescape error 395 | # handler (decoding cannot fail) 396 | stdout = os.fsdecode(stdout) 397 | stderr = os.fsdecode(stderr) 398 | return (stdout, stderr) 399 | else: 400 | return result 401 | 402 | except (ProcessExecutionError, OSError) as err: 403 | # if we want to always log the errors or if this is 404 | # the final attempt that failed and we want to log that. 405 | if log_errors == LOG_ALL_ERRORS or ( 406 | log_errors == LOG_FINAL_ERROR and not attempts): 407 | if isinstance(err, ProcessExecutionError): 408 | format = _('%(desc)r\ncommand: %(cmd)r\n' 409 | 'exit code: %(code)r\nstdout: %(stdout)r\n' 410 | 'stderr: %(stderr)r') 411 | LOG.log(loglevel, format, {"desc": err.description, 412 | "cmd": err.cmd, 413 | "code": err.exit_code, 414 | "stdout": err.stdout, 415 | "stderr": err.stderr}) 416 | else: 417 | format = _('Got an OSError\ncommand: %(cmd)r\n' 418 | 'errno: %(errno)r') 419 | LOG.log(loglevel, format, {"cmd": sanitized_cmd, 420 | "errno": err.errno}) 421 | 422 | if not attempts: 423 | LOG.log(loglevel, _('%r failed. Not Retrying.'), 424 | sanitized_cmd) 425 | raise 426 | else: 427 | LOG.log(loglevel, _('%r failed. Retrying.'), 428 | sanitized_cmd) 429 | if delay_on_retry: 430 | time.sleep(random.randint(20, 200) / 100.0) 431 | finally: 432 | # NOTE(termie): this appears to be necessary to let the subprocess 433 | # call clean something up in between calls, without 434 | # it two execute calls in a row hangs the second one 435 | # NOTE(bnemec): termie's comment above is probably specific to the 436 | # eventlet subprocess module, but since we still 437 | # have to support that we're leaving the sleep. It 438 | # won't hurt anything in the stdlib case anyway. 439 | time.sleep(0) 440 | 441 | 442 | def trycmd(*args, **kwargs): 443 | """A wrapper around execute() to more easily handle warnings and errors. 444 | 445 | Returns an (out, err) tuple of strings containing the output of 446 | the command's stdout and stderr. If 'err' is not empty then the 447 | command can be considered to have failed. 448 | 449 | :param discard_warnings: True | False. Defaults to False. If set to True, 450 | then for succeeding commands, stderr is cleared 451 | :type discard_warnings: boolean 452 | :returns: (out, err) from process execution 453 | 454 | """ 455 | discard_warnings = kwargs.pop('discard_warnings', False) 456 | 457 | try: 458 | out, err = execute(*args, **kwargs) 459 | failed = False 460 | except ProcessExecutionError as exn: 461 | out, err = '', str(exn) 462 | failed = True 463 | 464 | if not failed and discard_warnings and err: 465 | # Handle commands that output to stderr but otherwise succeed 466 | err = '' 467 | 468 | return out, err 469 | 470 | 471 | def ssh_execute(ssh, cmd, process_input=None, 472 | addl_env=None, check_exit_code=True, 473 | binary=False, timeout=None, 474 | sanitize_stdout=True): 475 | """Run a command through SSH. 476 | 477 | :param ssh: An SSH Connection object. 478 | :param cmd: The command string to run. 479 | :param check_exit_code: If an exception should be raised for non-zero 480 | exit. 481 | :param timeout: Max time in secs to wait for command execution. 482 | :param sanitize_stdout: Defaults to True. If set to True, stdout is 483 | sanitized i.e. any sensitive information like 484 | password in command output will be masked. 485 | :returns: (stdout, stderr) from command execution through 486 | SSH. 487 | 488 | .. versionchanged:: 1.9 489 | Added *binary* optional parameter. 490 | """ 491 | sanitized_cmd = strutils.mask_password(cmd) 492 | LOG.debug('Running cmd (SSH): %s', sanitized_cmd) 493 | if addl_env: 494 | raise InvalidArgumentError(_('Environment not supported over SSH')) 495 | 496 | if process_input: 497 | # This is (probably) fixable if we need it... 498 | raise InvalidArgumentError(_('process_input not supported over SSH')) 499 | 500 | stdin_stream, stdout_stream, stderr_stream = ssh.exec_command( 501 | cmd, timeout=timeout) 502 | channel = stdout_stream.channel 503 | 504 | # NOTE(justinsb): This seems suspicious... 505 | # ...other SSH clients have buffering issues with this approach 506 | stdout = stdout_stream.read() 507 | stderr = stderr_stream.read() 508 | 509 | stdin_stream.close() 510 | 511 | exit_status = channel.recv_exit_status() 512 | 513 | # Decode from the locale using using the surrogateescape error handler 514 | # (decoding cannot fail). Decode even if binary is True because 515 | # mask_password() requires Unicode on Python 3 516 | stdout = os.fsdecode(stdout) 517 | stderr = os.fsdecode(stderr) 518 | 519 | if sanitize_stdout: 520 | stdout = strutils.mask_password(stdout) 521 | 522 | stderr = strutils.mask_password(stderr) 523 | 524 | # exit_status == -1 if no exit code was returned 525 | if exit_status != -1: 526 | LOG.debug('Result was %s' % exit_status) 527 | if check_exit_code and exit_status != 0: 528 | # In case of errors in command run, due to poor implementation of 529 | # command executable program, there might be chance that it leaks 530 | # sensitive information like password to stdout. In such cases 531 | # stdout needs to be sanitized even though sanitize_stdout=False. 532 | stdout = strutils.mask_password(stdout) 533 | raise ProcessExecutionError(exit_code=exit_status, 534 | stdout=stdout, 535 | stderr=stderr, 536 | cmd=sanitized_cmd) 537 | 538 | if binary: 539 | # fsencode() is the reverse operation of fsdecode() 540 | stdout = os.fsencode(stdout) 541 | stderr = os.fsencode(stderr) 542 | 543 | return (stdout, stderr) 544 | 545 | 546 | def get_worker_count(): 547 | """Utility to get the default worker count. 548 | 549 | :returns: The number of CPUs if that can be determined, else a default 550 | worker count of 1 is returned. 551 | """ 552 | try: 553 | return multiprocessing.cpu_count() 554 | except NotImplementedError: 555 | return 1 556 | -------------------------------------------------------------------------------- /oslo_concurrency/tests/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2014 Red Hat, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import os 16 | 17 | if os.environ.get('TEST_EVENTLET'): 18 | import eventlet 19 | eventlet.monkey_patch() 20 | -------------------------------------------------------------------------------- /oslo_concurrency/tests/unit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/oslo.concurrency/08987d8af202622ec5ea00bea76c6f6588b07960/oslo_concurrency/tests/unit/__init__.py -------------------------------------------------------------------------------- /oslo_concurrency/tests/unit/test_lockutils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2011 Justin Santa Barbara 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import collections 16 | import errno 17 | import fcntl 18 | import multiprocessing 19 | import os 20 | import signal 21 | import subprocess 22 | import sys 23 | import tempfile 24 | import threading 25 | import time 26 | from unittest import mock 27 | 28 | from oslotest import base as test_base 29 | 30 | from oslo_concurrency.fixture import lockutils as fixtures 31 | from oslo_concurrency import lockutils 32 | from oslo_config import fixture as config 33 | 34 | 35 | def lock_files(handles_dir, out_queue): 36 | with lockutils.lock('external', 'test-', external=True): 37 | # Open some files we can use for locking 38 | handles = [] 39 | for n in range(50): 40 | path = os.path.join(handles_dir, ('file-%s' % n)) 41 | handles.append(open(path, 'w')) 42 | 43 | # Loop over all the handles and try locking the file 44 | # without blocking, keep a count of how many files we 45 | # were able to lock and then unlock. If the lock fails 46 | # we get an IOError and bail out with bad exit code 47 | count = 0 48 | for handle in handles: 49 | try: 50 | fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB) 51 | count += 1 52 | fcntl.flock(handle, fcntl.LOCK_UN) 53 | except OSError: 54 | os._exit(2) 55 | finally: 56 | handle.close() 57 | return out_queue.put(count) 58 | 59 | 60 | class LockTestCase(test_base.BaseTestCase): 61 | 62 | def setUp(self): 63 | super().setUp() 64 | self.config = self.useFixture(config.Config(lockutils.CONF)).config 65 | 66 | def test_synchronized_wrapped_function_metadata(self): 67 | @lockutils.synchronized('whatever', 'test-') 68 | def foo(): 69 | """Bar.""" 70 | pass 71 | 72 | self.assertEqual('Bar.', foo.__doc__, "Wrapped function's docstring " 73 | "got lost") 74 | self.assertEqual('foo', foo.__name__, "Wrapped function's name " 75 | "got mangled") 76 | 77 | def test_lock_internally_different_collections(self): 78 | s1 = lockutils.Semaphores() 79 | s2 = lockutils.Semaphores() 80 | trigger = threading.Event() 81 | who_ran = collections.deque() 82 | 83 | def f(name, semaphores, pull_trigger): 84 | with lockutils.internal_lock('testing', semaphores=semaphores): 85 | if pull_trigger: 86 | trigger.set() 87 | else: 88 | trigger.wait() 89 | who_ran.append(name) 90 | 91 | threads = [ 92 | threading.Thread(target=f, args=(1, s1, True)), 93 | threading.Thread(target=f, args=(2, s2, False)), 94 | ] 95 | for thread in threads: 96 | thread.start() 97 | for thread in threads: 98 | thread.join() 99 | self.assertEqual([1, 2], sorted(who_ran)) 100 | 101 | def test_lock_internally(self): 102 | """We can lock across multiple threads.""" 103 | saved_sem_num = len(lockutils._semaphores) 104 | seen_threads = list() 105 | 106 | def f(_id): 107 | with lockutils.lock('testlock2', 'test-', external=False): 108 | for x in range(10): 109 | seen_threads.append(_id) 110 | 111 | threads = [] 112 | for i in range(10): 113 | thread = threading.Thread(target=f, args=(i,)) 114 | threads.append(thread) 115 | thread.start() 116 | 117 | for thread in threads: 118 | thread.join() 119 | 120 | self.assertEqual(100, len(seen_threads)) 121 | # Looking at the seen threads, split it into chunks of 10, and verify 122 | # that the last 9 match the first in each chunk. 123 | for i in range(10): 124 | for j in range(9): 125 | self.assertEqual(seen_threads[i * 10], 126 | seen_threads[i * 10 + 1 + j]) 127 | 128 | self.assertEqual(saved_sem_num, len(lockutils._semaphores), 129 | "Semaphore leak detected") 130 | 131 | def test_lock_internal_fair(self): 132 | """Check that we're actually fair.""" 133 | 134 | def f(_id): 135 | with lockutils.lock('testlock', 'test-', 136 | external=False, fair=True): 137 | lock_holder.append(_id) 138 | 139 | lock_holder = [] 140 | threads = [] 141 | # While holding the fair lock, spawn a bunch of threads that all try 142 | # to acquire the lock. They will all block. Then release the lock 143 | # and see what happens. 144 | with lockutils.lock('testlock', 'test-', external=False, fair=True): 145 | for i in range(10): 146 | thread = threading.Thread(target=f, args=(i,)) 147 | threads.append(thread) 148 | thread.start() 149 | # Allow some time for the new thread to get queued onto the 150 | # list of pending writers before continuing. This is gross 151 | # but there's no way around it without using knowledge of 152 | # fasteners internals. 153 | time.sleep(0.5) 154 | # Wait for all threads. 155 | for thread in threads: 156 | thread.join() 157 | 158 | self.assertEqual(10, len(lock_holder)) 159 | # Check that the threads each got the lock in fair order. 160 | for i in range(10): 161 | self.assertEqual(i, lock_holder[i]) 162 | 163 | def test_fair_lock_with_semaphore(self): 164 | def do_test(): 165 | s = lockutils.Semaphores() 166 | with lockutils.lock('testlock', 'test-', semaphores=s, fair=True): 167 | pass 168 | self.assertRaises(NotImplementedError, do_test) 169 | 170 | def test_fair_lock_with_nonblocking(self): 171 | def do_test(): 172 | with lockutils.lock('testlock', 'test-', fair=True, 173 | blocking=False): 174 | pass 175 | self.assertRaises(NotImplementedError, do_test) 176 | 177 | def test_nested_synchronized_external_works(self): 178 | """We can nest external syncs.""" 179 | self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency') 180 | sentinel = object() 181 | 182 | @lockutils.synchronized('testlock1', 'test-', external=True) 183 | def outer_lock(): 184 | 185 | @lockutils.synchronized('testlock2', 'test-', external=True) 186 | def inner_lock(): 187 | return sentinel 188 | return inner_lock() 189 | 190 | self.assertEqual(sentinel, outer_lock()) 191 | 192 | def _do_test_lock_externally(self): 193 | """We can lock across multiple processes.""" 194 | children = [] 195 | for n in range(50): 196 | queue = multiprocessing.Queue() 197 | proc = multiprocessing.Process( 198 | target=lock_files, 199 | args=(tempfile.mkdtemp(), queue)) 200 | proc.start() 201 | children.append((proc, queue)) 202 | for child, queue in children: 203 | child.join() 204 | count = queue.get(block=False) 205 | self.assertEqual(50, count) 206 | 207 | def test_lock_externally(self): 208 | self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency') 209 | 210 | self._do_test_lock_externally() 211 | 212 | def test_lock_externally_lock_dir_not_exist(self): 213 | lock_dir = tempfile.mkdtemp() 214 | os.rmdir(lock_dir) 215 | self.config(lock_path=lock_dir, group='oslo_concurrency') 216 | 217 | self._do_test_lock_externally() 218 | 219 | def test_lock_with_prefix(self): 220 | # TODO(efried): Embetter this test 221 | self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency') 222 | foo = lockutils.lock_with_prefix('mypfix-') 223 | 224 | with foo('mylock', external=True): 225 | # We can't check much 226 | pass 227 | 228 | def test_synchronized_with_prefix(self): 229 | lock_name = 'mylock' 230 | lock_pfix = 'mypfix-' 231 | 232 | foo = lockutils.synchronized_with_prefix(lock_pfix) 233 | 234 | @foo(lock_name, external=True) 235 | def bar(dirpath, pfix, name): 236 | return True 237 | 238 | lock_dir = tempfile.mkdtemp() 239 | self.config(lock_path=lock_dir, group='oslo_concurrency') 240 | 241 | self.assertTrue(bar(lock_dir, lock_pfix, lock_name)) 242 | 243 | def test_synchronized_without_prefix(self): 244 | self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency') 245 | 246 | @lockutils.synchronized('lock', external=True) 247 | def test_without_prefix(): 248 | # We can't check much 249 | pass 250 | 251 | test_without_prefix() 252 | 253 | def test_synchronized_prefix_without_hypen(self): 254 | self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency') 255 | 256 | @lockutils.synchronized('lock', 'hypen', True) 257 | def test_without_hypen(): 258 | # We can't check much 259 | pass 260 | 261 | test_without_hypen() 262 | 263 | def test_contextlock(self): 264 | self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency') 265 | 266 | # Note(flaper87): Lock is not external, which means 267 | # a semaphore will be yielded 268 | with lockutils.lock("test") as sem: 269 | self.assertIsInstance(sem, threading.Semaphore) 270 | 271 | # NOTE(flaper87): Lock is external so an InterProcessLock 272 | # will be yielded. 273 | with lockutils.lock("test2", external=True) as lock: 274 | self.assertTrue(lock.exists()) 275 | 276 | with lockutils.lock("test1", external=True) as lock1: 277 | self.assertIsInstance(lock1, lockutils.InterProcessLock) 278 | 279 | def test_contextlock_unlocks(self): 280 | self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency') 281 | 282 | with lockutils.lock("test") as sem: 283 | self.assertIsInstance(sem, threading.Semaphore) 284 | 285 | with lockutils.lock("test2", external=True) as lock: 286 | self.assertTrue(lock.exists()) 287 | 288 | # NOTE(flaper87): Lock should be free 289 | with lockutils.lock("test2", external=True) as lock: 290 | self.assertTrue(lock.exists()) 291 | 292 | # NOTE(flaper87): Lock should be free 293 | # but semaphore should already exist. 294 | with lockutils.lock("test") as sem2: 295 | self.assertEqual(sem, sem2) 296 | 297 | @mock.patch('logging.Logger.info') 298 | @mock.patch('os.remove') 299 | @mock.patch('oslo_concurrency.lockutils._get_lock_path') 300 | def test_remove_lock_external_file_exists(self, path_mock, remove_mock, 301 | log_mock): 302 | lockutils.remove_external_lock_file(mock.sentinel.name, 303 | mock.sentinel.prefix, 304 | mock.sentinel.lock_path) 305 | 306 | path_mock.assert_called_once_with(mock.sentinel.name, 307 | mock.sentinel.prefix, 308 | mock.sentinel.lock_path) 309 | remove_mock.assert_called_once_with(path_mock.return_value) 310 | log_mock.assert_not_called() 311 | 312 | @mock.patch('logging.Logger.warning') 313 | @mock.patch('os.remove', side_effect=OSError(errno.ENOENT, None)) 314 | @mock.patch('oslo_concurrency.lockutils._get_lock_path') 315 | def test_remove_lock_external_file_doesnt_exists(self, path_mock, 316 | remove_mock, log_mock): 317 | lockutils.remove_external_lock_file(mock.sentinel.name, 318 | mock.sentinel.prefix, 319 | mock.sentinel.lock_path) 320 | path_mock.assert_called_once_with(mock.sentinel.name, 321 | mock.sentinel.prefix, 322 | mock.sentinel.lock_path) 323 | remove_mock.assert_called_once_with(path_mock.return_value) 324 | log_mock.assert_not_called() 325 | 326 | @mock.patch('logging.Logger.warning') 327 | @mock.patch('os.remove', side_effect=OSError(errno.EPERM, None)) 328 | @mock.patch('oslo_concurrency.lockutils._get_lock_path') 329 | def test_remove_lock_external_file_permission_error( 330 | self, path_mock, remove_mock, log_mock): 331 | lockutils.remove_external_lock_file(mock.sentinel.name, 332 | mock.sentinel.prefix, 333 | mock.sentinel.lock_path) 334 | path_mock.assert_called_once_with(mock.sentinel.name, 335 | mock.sentinel.prefix, 336 | mock.sentinel.lock_path) 337 | remove_mock.assert_called_once_with(path_mock.return_value) 338 | log_mock.assert_called() 339 | 340 | def test_no_slash_in_b64(self): 341 | # base64(sha1(foobar)) has a slash in it 342 | with lockutils.lock("foobar"): 343 | pass 344 | 345 | 346 | class FileBasedLockingTestCase(test_base.BaseTestCase): 347 | def setUp(self): 348 | super().setUp() 349 | self.lock_dir = tempfile.mkdtemp() 350 | 351 | def test_lock_file_exists(self): 352 | lock_file = os.path.join(self.lock_dir, 'lock-file') 353 | 354 | @lockutils.synchronized('lock-file', external=True, 355 | lock_path=self.lock_dir) 356 | def foo(): 357 | self.assertTrue(os.path.exists(lock_file)) 358 | 359 | foo() 360 | 361 | def test_interprocess_lock(self): 362 | lock_file = os.path.join(self.lock_dir, 'processlock') 363 | 364 | pid = os.fork() 365 | if pid: 366 | # Make sure the child grabs the lock first 367 | start = time.time() 368 | while not os.path.exists(lock_file): 369 | if time.time() - start > 5: 370 | self.fail('Timed out waiting for child to grab lock') 371 | time.sleep(0) 372 | lock1 = lockutils.InterProcessLock('foo') 373 | lock1.lockfile = open(lock_file, 'w') 374 | # NOTE(bnemec): There is a brief window between when the lock file 375 | # is created and when it actually becomes locked. If we happen to 376 | # context switch in that window we may succeed in locking the 377 | # file. Keep retrying until we either get the expected exception 378 | # or timeout waiting. 379 | while time.time() - start < 5: 380 | try: 381 | lock1.trylock() 382 | lock1.unlock() 383 | time.sleep(0) 384 | except OSError: 385 | # This is what we expect to happen 386 | break 387 | else: 388 | self.fail('Never caught expected lock exception') 389 | # We don't need to wait for the full sleep in the child here 390 | os.kill(pid, signal.SIGKILL) 391 | else: 392 | try: 393 | lock2 = lockutils.InterProcessLock('foo') 394 | lock2.lockfile = open(lock_file, 'w') 395 | have_lock = False 396 | while not have_lock: 397 | try: 398 | lock2.trylock() 399 | have_lock = True 400 | except OSError: 401 | pass 402 | finally: 403 | # NOTE(bnemec): This is racy, but I don't want to add any 404 | # synchronization primitives that might mask a problem 405 | # with the one we're trying to test here. 406 | time.sleep(.5) 407 | os._exit(0) 408 | 409 | def test_interprocess_nonblocking_external_lock(self): 410 | """Check that we're not actually blocking between processes.""" 411 | 412 | nb_calls = multiprocessing.Value('i', 0) 413 | 414 | @lockutils.synchronized('foo', blocking=False, external=True, 415 | lock_path=self.lock_dir) 416 | def foo(param): 417 | """Simulate a long-running operation in a process.""" 418 | param.value += 1 419 | time.sleep(.5) 420 | 421 | def other(param): 422 | foo(param) 423 | 424 | process = multiprocessing.Process(target=other, args=(nb_calls, )) 425 | process.start() 426 | # Make sure the other process grabs the lock 427 | start = time.time() 428 | while not os.path.exists(os.path.join(self.lock_dir, 'foo')): 429 | if time.time() - start > 5: 430 | self.fail('Timed out waiting for process to grab lock') 431 | time.sleep(0) 432 | process1 = multiprocessing.Process(target=other, args=(nb_calls, )) 433 | process1.start() 434 | process1.join() 435 | process.join() 436 | self.assertEqual(1, nb_calls.value) 437 | 438 | def test_interthread_external_lock(self): 439 | call_list = [] 440 | 441 | @lockutils.synchronized('foo', external=True, lock_path=self.lock_dir) 442 | def foo(param): 443 | """Simulate a long-running threaded operation.""" 444 | call_list.append(param) 445 | # NOTE(bnemec): This is racy, but I don't want to add any 446 | # synchronization primitives that might mask a problem 447 | # with the one we're trying to test here. 448 | time.sleep(.5) 449 | call_list.append(param) 450 | 451 | def other(param): 452 | foo(param) 453 | 454 | thread = threading.Thread(target=other, args=('other',)) 455 | thread.start() 456 | # Make sure the other thread grabs the lock 457 | # NOTE(bnemec): File locks do not actually work between threads, so 458 | # this test is verifying that the local semaphore is still enforcing 459 | # external locks in that case. This means this test does not have 460 | # the same race problem as the process test above because when the 461 | # file is created the semaphore has already been grabbed. 462 | start = time.time() 463 | while not os.path.exists(os.path.join(self.lock_dir, 'foo')): 464 | if time.time() - start > 5: 465 | self.fail('Timed out waiting for thread to grab lock') 466 | time.sleep(0) 467 | thread1 = threading.Thread(target=other, args=('main',)) 468 | thread1.start() 469 | thread1.join() 470 | thread.join() 471 | self.assertEqual(['other', 'other', 'main', 'main'], call_list) 472 | 473 | def test_interthread_nonblocking_external_lock(self): 474 | call_list = [] 475 | 476 | @lockutils.synchronized('foo', external=True, blocking=False, 477 | lock_path=self.lock_dir) 478 | def foo(param): 479 | """Simulate a long-running threaded operation.""" 480 | call_list.append(param) 481 | time.sleep(.5) 482 | call_list.append(param) 483 | 484 | def other(param): 485 | foo(param) 486 | 487 | thread = threading.Thread(target=other, args=('other',)) 488 | thread.start() 489 | # Make sure the other thread grabs the lock 490 | start = time.time() 491 | while not os.path.exists(os.path.join(self.lock_dir, 'foo')): 492 | if time.time() - start > 5: 493 | self.fail('Timed out waiting for thread to grab lock') 494 | time.sleep(0) 495 | thread1 = threading.Thread(target=other, args=('main',)) 496 | thread1.start() 497 | thread1.join() 498 | thread.join() 499 | self.assertEqual(['other', 'other'], call_list) 500 | 501 | def test_interthread_nonblocking_internal_lock(self): 502 | call_list = [] 503 | 504 | @lockutils.synchronized('foo', blocking=False, 505 | lock_path=self.lock_dir) 506 | def foo(param): 507 | # Simulate a long-running threaded operation. 508 | call_list.append(param) 509 | time.sleep(.5) 510 | call_list.append(param) 511 | 512 | def other(param): 513 | foo(param) 514 | 515 | thread = threading.Thread(target=other, args=('other',)) 516 | thread.start() 517 | # Make sure the other thread grabs the lock 518 | start = time.time() 519 | while not call_list: 520 | if time.time() - start > 5: 521 | self.fail('Timed out waiting for thread to grab lock') 522 | time.sleep(0) 523 | thread1 = threading.Thread(target=other, args=('main',)) 524 | thread1.start() 525 | thread1.join() 526 | thread.join() 527 | self.assertEqual(['other', 'other'], call_list) 528 | 529 | def test_non_destructive(self): 530 | lock_file = os.path.join(self.lock_dir, 'not-destroyed') 531 | with open(lock_file, 'w') as f: 532 | f.write('test') 533 | with lockutils.lock('not-destroyed', external=True, 534 | lock_path=self.lock_dir): 535 | with open(lock_file) as f: 536 | self.assertEqual('test', f.read()) 537 | 538 | 539 | class LockutilsModuleTestCase(test_base.BaseTestCase): 540 | 541 | def setUp(self): 542 | super().setUp() 543 | self.old_env = os.environ.get('OSLO_LOCK_PATH') 544 | if self.old_env is not None: 545 | del os.environ['OSLO_LOCK_PATH'] 546 | 547 | def tearDown(self): 548 | if self.old_env is not None: 549 | os.environ['OSLO_LOCK_PATH'] = self.old_env 550 | super().tearDown() 551 | 552 | def test_main(self): 553 | script = '\n'.join([ 554 | 'import os', 555 | 'lock_path = os.environ.get("OSLO_LOCK_PATH")', 556 | 'assert lock_path is not None', 557 | 'assert os.path.isdir(lock_path)', 558 | ]) 559 | argv = ['', sys.executable, '-c', script] 560 | retval = lockutils._lock_wrapper(argv) 561 | self.assertEqual(0, retval, "Bad OSLO_LOCK_PATH has been set") 562 | 563 | def test_return_value_maintained(self): 564 | script = '\n'.join([ 565 | 'import sys', 566 | 'sys.exit(1)', 567 | ]) 568 | argv = ['', sys.executable, '-c', script] 569 | retval = lockutils._lock_wrapper(argv) 570 | self.assertEqual(1, retval) 571 | 572 | def test_direct_call_explodes(self): 573 | cmd = [sys.executable, '-m', 'oslo_concurrency.lockutils'] 574 | with open(os.devnull, 'w') as devnull: 575 | retval = subprocess.call(cmd, stderr=devnull) 576 | self.assertEqual(1, retval) 577 | 578 | 579 | class TestLockFixture(test_base.BaseTestCase): 580 | 581 | def setUp(self): 582 | super().setUp() 583 | self.config = self.useFixture(config.Config(lockutils.CONF)).config 584 | self.tempdir = tempfile.mkdtemp() 585 | 586 | def _check_in_lock(self): 587 | self.assertTrue(self.lock.exists()) 588 | 589 | def tearDown(self): 590 | self._check_in_lock() 591 | super().tearDown() 592 | 593 | def test_lock_fixture(self): 594 | # Setup lock fixture to test that teardown is inside the lock 595 | self.config(lock_path=self.tempdir, group='oslo_concurrency') 596 | fixture = fixtures.LockFixture('test-lock') 597 | self.useFixture(fixture) 598 | self.lock = fixture.lock 599 | 600 | 601 | class TestGetLockPath(test_base.BaseTestCase): 602 | 603 | def setUp(self): 604 | super().setUp() 605 | self.conf = self.useFixture(config.Config(lockutils.CONF)).conf 606 | 607 | def test_get_default(self): 608 | lockutils.set_defaults(lock_path='/the/path') 609 | self.assertEqual('/the/path', lockutils.get_lock_path(self.conf)) 610 | 611 | def test_get_override(self): 612 | lockutils._register_opts(self.conf) 613 | self.conf.set_override('lock_path', '/alternate/path', 614 | group='oslo_concurrency') 615 | self.assertEqual('/alternate/path', lockutils.get_lock_path(self.conf)) 616 | -------------------------------------------------------------------------------- /oslo_concurrency/tests/unit/test_lockutils_eventlet.py: -------------------------------------------------------------------------------- 1 | # Copyright 2011 Justin Santa Barbara 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import os 16 | import tempfile 17 | 18 | import eventlet 19 | from eventlet import greenpool 20 | from oslotest import base as test_base 21 | 22 | from oslo_concurrency import lockutils 23 | 24 | 25 | class TestFileLocks(test_base.BaseTestCase): 26 | 27 | def test_concurrent_green_lock_succeeds(self): 28 | """Verify spawn_n greenthreads with two locks run concurrently.""" 29 | tmpdir = tempfile.mkdtemp() 30 | self.completed = False 31 | 32 | def locka(wait): 33 | a = lockutils.InterProcessLock(os.path.join(tmpdir, 'a')) 34 | with a: 35 | wait.wait() 36 | self.completed = True 37 | 38 | def lockb(wait): 39 | b = lockutils.InterProcessLock(os.path.join(tmpdir, 'b')) 40 | with b: 41 | wait.wait() 42 | 43 | wait1 = eventlet.event.Event() 44 | wait2 = eventlet.event.Event() 45 | pool = greenpool.GreenPool() 46 | pool.spawn_n(locka, wait1) 47 | pool.spawn_n(lockb, wait2) 48 | wait2.send() 49 | eventlet.sleep(0) 50 | wait1.send() 51 | pool.waitall() 52 | 53 | self.assertTrue(self.completed) 54 | 55 | 56 | class TestInternalLock(test_base.BaseTestCase): 57 | def _test_internal_lock_with_two_threads(self, fair, spawn): 58 | self.other_started = eventlet.event.Event() 59 | self.other_finished = eventlet.event.Event() 60 | 61 | def other(): 62 | self.other_started.send('started') 63 | with lockutils.lock("my-lock", fair=fair): 64 | pass 65 | self.other_finished.send('finished') 66 | 67 | with lockutils.lock("my-lock", fair=fair): 68 | # holding the lock and starting another thread that also wants to 69 | # take it before finishes 70 | spawn(other) 71 | # let the other thread start 72 | self.other_started.wait() 73 | eventlet.sleep(0) 74 | # the other thread should not have finished as it would need the 75 | # lock we are holding 76 | self.assertIsNone( 77 | self.other_finished.wait(0.5), 78 | "Two threads was able to take the same lock", 79 | ) 80 | 81 | # we released the lock, let the other thread take it and run to 82 | # completion 83 | result = self.other_finished.wait() 84 | self.assertEqual('finished', result) 85 | 86 | def test_lock_with_spawn(self): 87 | self._test_internal_lock_with_two_threads( 88 | fair=False, spawn=eventlet.spawn 89 | ) 90 | 91 | def test_lock_with_spawn_n(self): 92 | self._test_internal_lock_with_two_threads( 93 | fair=False, spawn=eventlet.spawn_n 94 | ) 95 | 96 | def test_fair_lock_with_spawn(self): 97 | self._test_internal_lock_with_two_threads( 98 | fair=True, spawn=eventlet.spawn 99 | ) 100 | 101 | def test_fair_lock_with_spawn_n(self): 102 | self._test_internal_lock_with_two_threads( 103 | fair=True, spawn=eventlet.spawn_n 104 | ) 105 | -------------------------------------------------------------------------------- /oslo_concurrency/tests/unit/test_processutils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2011 OpenStack Foundation. 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | import errno 17 | import io 18 | import logging 19 | import multiprocessing 20 | import os 21 | import pickle 22 | import resource 23 | import socket 24 | import stat 25 | import subprocess 26 | import sys 27 | import tempfile 28 | import time 29 | from unittest import mock 30 | 31 | import fixtures 32 | from oslotest import base as test_base 33 | 34 | from oslo_concurrency import processutils 35 | 36 | 37 | PROCESS_EXECUTION_ERROR_LOGGING_TEST = """#!/bin/bash 38 | exit 41""" 39 | 40 | TEST_EXCEPTION_AND_MASKING_SCRIPT = """#!/bin/bash 41 | # This is to test stdout and stderr 42 | # and the command returned in an exception 43 | # when a non-zero exit code is returned 44 | echo onstdout --password='"secret"' 45 | echo onstderr --password='"secret"' 1>&2 46 | exit 38""" 47 | 48 | # This byte sequence is undecodable from most encoding 49 | UNDECODABLE_BYTES = b'[a\x80\xe9\xff]' 50 | 51 | TRUE_UTILITY = (sys.platform.startswith('darwin') and 52 | '/usr/bin/true' or '/bin/true') 53 | 54 | 55 | class UtilsTest(test_base.BaseTestCase): 56 | # NOTE(jkoelker) Moar tests from nova need to be ported. But they 57 | # need to be mock'd out. Currently they require actually 58 | # running code. 59 | def test_execute_unknown_kwargs(self): 60 | self.assertRaises(processutils.UnknownArgumentError, 61 | processutils.execute, 62 | hozer=True) 63 | 64 | @mock.patch.object(multiprocessing, 'cpu_count', return_value=8) 65 | def test_get_worker_count(self, mock_cpu_count): 66 | self.assertEqual(8, processutils.get_worker_count()) 67 | 68 | @mock.patch.object(multiprocessing, 'cpu_count', 69 | side_effect=NotImplementedError()) 70 | def test_get_worker_count_cpu_count_not_implemented(self, 71 | mock_cpu_count): 72 | self.assertEqual(1, processutils.get_worker_count()) 73 | 74 | def test_execute_with_callback(self): 75 | on_execute_callback = mock.Mock() 76 | on_completion_callback = mock.Mock() 77 | processutils.execute(TRUE_UTILITY) 78 | self.assertEqual(0, on_execute_callback.call_count) 79 | self.assertEqual(0, on_completion_callback.call_count) 80 | 81 | processutils.execute(TRUE_UTILITY, on_execute=on_execute_callback, 82 | on_completion=on_completion_callback) 83 | self.assertEqual(1, on_execute_callback.call_count) 84 | self.assertEqual(1, on_completion_callback.call_count) 85 | 86 | @mock.patch.object(subprocess.Popen, "communicate") 87 | def test_execute_with_callback_and_errors(self, mock_comm): 88 | on_execute_callback = mock.Mock() 89 | on_completion_callback = mock.Mock() 90 | 91 | def fake_communicate(*args, timeout=None): 92 | raise OSError("Broken pipe") 93 | 94 | mock_comm.side_effect = fake_communicate 95 | 96 | self.assertRaises(IOError, 97 | processutils.execute, 98 | TRUE_UTILITY, 99 | on_execute=on_execute_callback, 100 | on_completion=on_completion_callback) 101 | self.assertEqual(1, on_execute_callback.call_count) 102 | self.assertEqual(1, on_completion_callback.call_count) 103 | 104 | def test_execute_with_preexec_fn(self): 105 | # NOTE(dims): preexec_fn is set to a callable object, this object 106 | # will be called in the child process just before the child is 107 | # executed. So we cannot pass share variables etc, simplest is to 108 | # check if a specific exception is thrown which can be caught here. 109 | def preexec_fn(): 110 | raise processutils.InvalidArgumentError() 111 | 112 | processutils.execute(TRUE_UTILITY) 113 | 114 | try: 115 | processutils.execute(TRUE_UTILITY, preexec_fn=preexec_fn) 116 | except Exception as e: 117 | if type(e).__name__ != 'SubprocessError': 118 | raise 119 | 120 | 121 | class ProcessExecutionErrorTest(test_base.BaseTestCase): 122 | 123 | def test_defaults(self): 124 | err = processutils.ProcessExecutionError() 125 | self.assertIn('None\n', str(err)) 126 | self.assertIn('code: -\n', str(err)) 127 | 128 | def test_with_description(self): 129 | description = 'The Narwhal Bacons at Midnight' 130 | err = processutils.ProcessExecutionError(description=description) 131 | self.assertIn(description, str(err)) 132 | 133 | def test_with_exit_code(self): 134 | exit_code = 0 135 | err = processutils.ProcessExecutionError(exit_code=exit_code) 136 | self.assertIn(str(exit_code), str(err)) 137 | 138 | def test_with_cmd(self): 139 | cmd = 'telinit' 140 | err = processutils.ProcessExecutionError(cmd=cmd) 141 | self.assertIn(cmd, str(err)) 142 | 143 | def test_with_stdout(self): 144 | stdout = """ 145 | Lo, praise of the prowess of people-kings 146 | of spear-armed Danes, in days long sped, 147 | we have heard, and what honor the athelings won! 148 | Oft Scyld the Scefing from squadroned foes, 149 | from many a tribe, the mead-bench tore, 150 | awing the earls. Since erst he lay 151 | friendless, a foundling, fate repaid him: 152 | for he waxed under welkin, in wealth he throve, 153 | till before him the folk, both far and near, 154 | who house by the whale-path, heard his mandate, 155 | gave him gifts: a good king he! 156 | To him an heir was afterward born, 157 | a son in his halls, whom heaven sent 158 | to favor the folk, feeling their woe 159 | that erst they had lacked an earl for leader 160 | so long a while; the Lord endowed him, 161 | the Wielder of Wonder, with world's renown. 162 | """.strip() 163 | err = processutils.ProcessExecutionError(stdout=stdout) 164 | print(str(err)) 165 | self.assertIn('people-kings', str(err)) 166 | 167 | def test_with_stderr(self): 168 | stderr = 'Cottonian library' 169 | err = processutils.ProcessExecutionError(stderr=stderr) 170 | self.assertIn(stderr, str(err)) 171 | 172 | def test_retry_on_failure(self): 173 | fd, tmpfilename = tempfile.mkstemp() 174 | _, tmpfilename2 = tempfile.mkstemp() 175 | try: 176 | fp = os.fdopen(fd, 'w+') 177 | fp.write('''#!/bin/sh 178 | # If stdin fails to get passed during one of the runs, make a note. 179 | if ! grep -q foo 180 | then 181 | echo 'failure' > "$1" 182 | fi 183 | # If stdin has failed to get passed during this or a previous run, exit early. 184 | if grep failure "$1" 185 | then 186 | exit 1 187 | fi 188 | runs="$(cat $1)" 189 | if [ -z "$runs" ] 190 | then 191 | runs=0 192 | fi 193 | runs=$(($runs + 1)) 194 | echo $runs > "$1" 195 | exit 1 196 | ''') 197 | fp.close() 198 | os.chmod(tmpfilename, 0o755) 199 | self.assertRaises(processutils.ProcessExecutionError, 200 | processutils.execute, 201 | tmpfilename, tmpfilename2, attempts=10, 202 | process_input=b'foo', 203 | delay_on_retry=False) 204 | fp = open(tmpfilename2) 205 | runs = fp.read() 206 | fp.close() 207 | self.assertNotEqual('failure', 'stdin did not ' 208 | 'always get passed ' 209 | 'correctly', 210 | runs.strip()) 211 | runs = int(runs.strip()) 212 | self.assertEqual(10, runs, 'Ran %d times instead of 10.' % (runs,)) 213 | finally: 214 | os.unlink(tmpfilename) 215 | os.unlink(tmpfilename2) 216 | 217 | def test_unknown_kwargs_raises_error(self): 218 | self.assertRaises(processutils.UnknownArgumentError, 219 | processutils.execute, 220 | '/usr/bin/env', 'true', 221 | this_is_not_a_valid_kwarg=True) 222 | 223 | def test_check_exit_code_boolean(self): 224 | processutils.execute('/usr/bin/env', 'false', check_exit_code=False) 225 | self.assertRaises(processutils.ProcessExecutionError, 226 | processutils.execute, 227 | '/usr/bin/env', 'false', check_exit_code=True) 228 | 229 | def test_check_cwd(self): 230 | tmpdir = tempfile.mkdtemp() 231 | out, err = processutils.execute('/usr/bin/env', 232 | 'sh', '-c', 'pwd', 233 | cwd=tmpdir) 234 | self.assertIn(tmpdir, out) 235 | 236 | def test_process_input_with_string(self): 237 | code = ';'.join(('import sys', 238 | 'print(len(sys.stdin.readlines()))')) 239 | args = [sys.executable, '-c', code] 240 | input = "\n".join(['foo', 'bar', 'baz']) 241 | stdout, stderr = processutils.execute(*args, process_input=input) 242 | self.assertEqual("3", stdout.rstrip()) 243 | 244 | def test_check_exit_code_list(self): 245 | processutils.execute('/usr/bin/env', 'sh', '-c', 'exit 101', 246 | check_exit_code=(101, 102)) 247 | processutils.execute('/usr/bin/env', 'sh', '-c', 'exit 102', 248 | check_exit_code=(101, 102)) 249 | self.assertRaises(processutils.ProcessExecutionError, 250 | processutils.execute, 251 | '/usr/bin/env', 'sh', '-c', 'exit 103', 252 | check_exit_code=(101, 102)) 253 | self.assertRaises(processutils.ProcessExecutionError, 254 | processutils.execute, 255 | '/usr/bin/env', 'sh', '-c', 'exit 0', 256 | check_exit_code=(101, 102)) 257 | 258 | def test_no_retry_on_success(self): 259 | fd, tmpfilename = tempfile.mkstemp() 260 | _, tmpfilename2 = tempfile.mkstemp() 261 | try: 262 | fp = os.fdopen(fd, 'w+') 263 | fp.write("""#!/bin/sh 264 | # If we've already run, bail out. 265 | grep -q foo "$1" && exit 1 266 | # Mark that we've run before. 267 | echo foo > "$1" 268 | # Check that stdin gets passed correctly. 269 | grep foo 270 | """) 271 | fp.close() 272 | os.chmod(tmpfilename, 0o755) 273 | processutils.execute(tmpfilename, 274 | tmpfilename2, 275 | process_input=b'foo', 276 | attempts=2) 277 | finally: 278 | os.unlink(tmpfilename) 279 | os.unlink(tmpfilename2) 280 | 281 | # This test and the one below ensures that when communicate raises 282 | # an OSError, we do the right thing(s) 283 | def test_exception_on_communicate_error(self): 284 | mock_comm = self.useFixture(fixtures.MockPatch( 285 | 'subprocess.Popen.communicate', 286 | side_effect=OSError(errno.EAGAIN, 'fake-test'))) 287 | 288 | self.assertRaises(OSError, 289 | processutils.execute, 290 | '/usr/bin/env', 291 | 'false', 292 | check_exit_code=False) 293 | 294 | self.assertEqual(1, mock_comm.mock.call_count) 295 | 296 | def test_retry_on_communicate_error(self): 297 | mock_comm = self.useFixture(fixtures.MockPatch( 298 | 'subprocess.Popen.communicate', 299 | side_effect=OSError(errno.EAGAIN, 'fake-test'))) 300 | 301 | self.assertRaises(OSError, 302 | processutils.execute, 303 | '/usr/bin/env', 304 | 'false', 305 | check_exit_code=False, 306 | attempts=5) 307 | 308 | self.assertEqual(5, mock_comm.mock.call_count) 309 | 310 | def _test_and_check_logging_communicate_errors(self, log_errors=None, 311 | attempts=None): 312 | mock_comm = self.useFixture(fixtures.MockPatch( 313 | 'subprocess.Popen.communicate', 314 | side_effect=OSError(errno.EAGAIN, 'fake-test'))) 315 | 316 | fixture = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) 317 | kwargs = {} 318 | 319 | if log_errors: 320 | kwargs.update({"log_errors": log_errors}) 321 | 322 | if attempts: 323 | kwargs.update({"attempts": attempts}) 324 | 325 | self.assertRaises(OSError, 326 | processutils.execute, 327 | '/usr/bin/env', 328 | 'false', 329 | **kwargs) 330 | 331 | self.assertEqual(attempts if attempts else 1, 332 | mock_comm.mock.call_count) 333 | self.assertIn('Got an OSError', fixture.output) 334 | self.assertIn('errno: %d' % errno.EAGAIN, fixture.output) 335 | self.assertIn("'/usr/bin/env false'", fixture.output) 336 | 337 | def test_logging_on_communicate_error_1(self): 338 | self._test_and_check_logging_communicate_errors( 339 | log_errors=processutils.LOG_FINAL_ERROR, 340 | attempts=None) 341 | 342 | def test_logging_on_communicate_error_2(self): 343 | self._test_and_check_logging_communicate_errors( 344 | log_errors=processutils.LOG_FINAL_ERROR, 345 | attempts=1) 346 | 347 | def test_logging_on_communicate_error_3(self): 348 | self._test_and_check_logging_communicate_errors( 349 | log_errors=processutils.LOG_FINAL_ERROR, 350 | attempts=5) 351 | 352 | def test_logging_on_communicate_error_4(self): 353 | self._test_and_check_logging_communicate_errors( 354 | log_errors=processutils.LOG_ALL_ERRORS, 355 | attempts=None) 356 | 357 | def test_logging_on_communicate_error_5(self): 358 | self._test_and_check_logging_communicate_errors( 359 | log_errors=processutils.LOG_ALL_ERRORS, 360 | attempts=1) 361 | 362 | def test_logging_on_communicate_error_6(self): 363 | self._test_and_check_logging_communicate_errors( 364 | log_errors=processutils.LOG_ALL_ERRORS, 365 | attempts=5) 366 | 367 | def test_with_env_variables(self): 368 | env_vars = {'SUPER_UNIQUE_VAR': 'The answer is 42'} 369 | 370 | out, err = processutils.execute('/usr/bin/env', env_variables=env_vars) 371 | self.assertIsInstance(out, str) 372 | self.assertIsInstance(err, str) 373 | 374 | self.assertIn('SUPER_UNIQUE_VAR=The answer is 42', out) 375 | 376 | def test_binary(self): 377 | env_vars = {'SUPER_UNIQUE_VAR': 'The answer is 42'} 378 | 379 | out, err = processutils.execute('/usr/bin/env', 380 | env_variables=env_vars, 381 | binary=True) 382 | self.assertIsInstance(out, bytes) 383 | self.assertIsInstance(err, bytes) 384 | 385 | self.assertIn(b'SUPER_UNIQUE_VAR=The answer is 42', out) 386 | 387 | def test_exception_and_masking(self): 388 | tmpfilename = self.create_tempfiles( 389 | [["test_exceptions_and_masking", 390 | TEST_EXCEPTION_AND_MASKING_SCRIPT]], ext='bash')[0] 391 | 392 | os.chmod(tmpfilename, (stat.S_IRWXU | 393 | stat.S_IRGRP | 394 | stat.S_IXGRP | 395 | stat.S_IROTH | 396 | stat.S_IXOTH)) 397 | 398 | err = self.assertRaises(processutils.ProcessExecutionError, 399 | processutils.execute, 400 | tmpfilename, 'password="secret"', 401 | 'something') 402 | 403 | self.assertEqual(38, err.exit_code) 404 | self.assertIsInstance(err.stdout, str) 405 | self.assertIsInstance(err.stderr, str) 406 | self.assertIn('onstdout --password="***"', err.stdout) 407 | self.assertIn('onstderr --password="***"', err.stderr) 408 | self.assertEqual(' '.join([tmpfilename, 409 | 'password="***"', 410 | 'something']), 411 | err.cmd) 412 | self.assertNotIn('secret', str(err)) 413 | 414 | def execute_undecodable_bytes(self, out_bytes, err_bytes, 415 | exitcode=0, binary=False): 416 | code = ';'.join(('import sys', 417 | 'sys.stdout.buffer.write(%a)' % out_bytes, 418 | 'sys.stdout.flush()', 419 | 'sys.stderr.buffer.write(%a)' % err_bytes, 420 | 'sys.stderr.flush()', 421 | 'sys.exit(%s)' % exitcode)) 422 | 423 | return processutils.execute(sys.executable, '-c', code, binary=binary) 424 | 425 | def check_undecodable_bytes(self, binary): 426 | out_bytes = b'out: ' + UNDECODABLE_BYTES 427 | err_bytes = b'err: ' + UNDECODABLE_BYTES 428 | out, err = self.execute_undecodable_bytes(out_bytes, err_bytes, 429 | binary=binary) 430 | if not binary: 431 | self.assertEqual(os.fsdecode(out_bytes), out) 432 | self.assertEqual(os.fsdecode(err_bytes), err) 433 | else: 434 | self.assertEqual(out, out_bytes) 435 | self.assertEqual(err, err_bytes) 436 | 437 | def test_undecodable_bytes(self): 438 | self.check_undecodable_bytes(False) 439 | 440 | def test_binary_undecodable_bytes(self): 441 | self.check_undecodable_bytes(True) 442 | 443 | def check_undecodable_bytes_error(self, binary): 444 | out_bytes = b'out: password="secret1" ' + UNDECODABLE_BYTES 445 | err_bytes = b'err: password="secret2" ' + UNDECODABLE_BYTES 446 | exc = self.assertRaises(processutils.ProcessExecutionError, 447 | self.execute_undecodable_bytes, 448 | out_bytes, err_bytes, exitcode=1, 449 | binary=binary) 450 | 451 | out = exc.stdout 452 | err = exc.stderr 453 | out_bytes = b'out: password="***" ' + UNDECODABLE_BYTES 454 | err_bytes = b'err: password="***" ' + UNDECODABLE_BYTES 455 | self.assertEqual(os.fsdecode(out_bytes), out) 456 | self.assertEqual(os.fsdecode(err_bytes), err) 457 | 458 | def test_undecodable_bytes_error(self): 459 | self.check_undecodable_bytes_error(False) 460 | 461 | def test_binary_undecodable_bytes_error(self): 462 | self.check_undecodable_bytes_error(True) 463 | 464 | def test_picklable(self): 465 | exc = processutils.ProcessExecutionError( 466 | stdout='my stdout', stderr='my stderr', 467 | exit_code=42, cmd='my cmd', 468 | description='my description') 469 | exc_message = str(exc) 470 | 471 | exc = pickle.loads(pickle.dumps(exc)) 472 | self.assertEqual('my stdout', exc.stdout) 473 | self.assertEqual('my stderr', exc.stderr) 474 | self.assertEqual(42, exc.exit_code) 475 | self.assertEqual('my cmd', exc.cmd) 476 | self.assertEqual('my description', exc.description) 477 | self.assertEqual(str(exc), exc_message) 478 | 479 | def test_timeout(self): 480 | start = time.time() 481 | # FIXME(dtantsur): I'm not sure what fancy mocking is happening in unit 482 | # tests here, but I cannot check for a more precise exception because 483 | # subprocess.TimeoutException != subprocess.TimeoutException. 484 | # Checking the error message instead. 485 | self.assertRaisesRegex(Exception, 486 | 'timed out after 1 seconds', 487 | processutils.execute, 488 | '/usr/bin/env', 'sh', '-c', 'sleep 10', 489 | timeout=1) 490 | self.assertLess(time.time(), start + 5) 491 | 492 | 493 | class ProcessExecutionErrorLoggingTest(test_base.BaseTestCase): 494 | def setUp(self): 495 | super().setUp() 496 | self.tmpfilename = self.create_tempfiles( 497 | [["process_execution_error_logging_test", 498 | PROCESS_EXECUTION_ERROR_LOGGING_TEST]], 499 | ext='bash')[0] 500 | 501 | os.chmod(self.tmpfilename, (stat.S_IRWXU | stat.S_IRGRP | 502 | stat.S_IXGRP | stat.S_IROTH | 503 | stat.S_IXOTH)) 504 | 505 | def _test_and_check(self, log_errors=None, attempts=None): 506 | fixture = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) 507 | kwargs = {} 508 | 509 | if log_errors: 510 | kwargs.update({"log_errors": log_errors}) 511 | 512 | if attempts: 513 | kwargs.update({"attempts": attempts}) 514 | 515 | err = self.assertRaises(processutils.ProcessExecutionError, 516 | processutils.execute, 517 | self.tmpfilename, 518 | **kwargs) 519 | 520 | self.assertEqual(41, err.exit_code) 521 | self.assertIn(self.tmpfilename, fixture.output) 522 | 523 | def test_with_invalid_log_errors(self): 524 | self.assertRaises(processutils.InvalidArgumentError, 525 | processutils.execute, 526 | self.tmpfilename, 527 | log_errors='invalid') 528 | 529 | def test_with_log_errors_NONE(self): 530 | self._test_and_check(log_errors=None, attempts=None) 531 | 532 | def test_with_log_errors_final(self): 533 | self._test_and_check(log_errors=processutils.LOG_FINAL_ERROR, 534 | attempts=None) 535 | 536 | def test_with_log_errors_all(self): 537 | self._test_and_check(log_errors=processutils.LOG_ALL_ERRORS, 538 | attempts=None) 539 | 540 | def test_multiattempt_with_log_errors_NONE(self): 541 | self._test_and_check(log_errors=None, attempts=3) 542 | 543 | def test_multiattempt_with_log_errors_final(self): 544 | self._test_and_check(log_errors=processutils.LOG_FINAL_ERROR, 545 | attempts=3) 546 | 547 | def test_multiattempt_with_log_errors_all(self): 548 | self._test_and_check(log_errors=processutils.LOG_ALL_ERRORS, 549 | attempts=3) 550 | 551 | 552 | def fake_execute(*cmd, **kwargs): 553 | return 'stdout', 'stderr' 554 | 555 | 556 | def fake_execute_raises(*cmd, **kwargs): 557 | raise processutils.ProcessExecutionError(exit_code=42, 558 | stdout='stdout', 559 | stderr='stderr', 560 | cmd=['this', 'is', 'a', 561 | 'command']) 562 | 563 | 564 | class TryCmdTestCase(test_base.BaseTestCase): 565 | def test_keep_warnings(self): 566 | self.useFixture(fixtures.MonkeyPatch( 567 | 'oslo_concurrency.processutils.execute', fake_execute)) 568 | o, e = processutils.trycmd('this is a command'.split(' ')) 569 | self.assertNotEqual('', o) 570 | self.assertNotEqual('', e) 571 | 572 | def test_keep_warnings_from_raise(self): 573 | self.useFixture(fixtures.MonkeyPatch( 574 | 'oslo_concurrency.processutils.execute', fake_execute_raises)) 575 | o, e = processutils.trycmd('this is a command'.split(' '), 576 | discard_warnings=True) 577 | self.assertIsNotNone(o) 578 | self.assertNotEqual('', e) 579 | 580 | def test_discard_warnings(self): 581 | self.useFixture(fixtures.MonkeyPatch( 582 | 'oslo_concurrency.processutils.execute', fake_execute)) 583 | o, e = processutils.trycmd('this is a command'.split(' '), 584 | discard_warnings=True) 585 | self.assertIsNotNone(o) 586 | self.assertEqual('', e) 587 | 588 | 589 | class FakeSshChannel: 590 | def __init__(self, rc): 591 | self.rc = rc 592 | 593 | def recv_exit_status(self): 594 | return self.rc 595 | 596 | 597 | class FakeSshStream(io.BytesIO): 598 | def setup_channel(self, rc): 599 | self.channel = FakeSshChannel(rc) 600 | 601 | 602 | class FakeSshConnection: 603 | def __init__(self, rc, out=b'stdout', err=b'stderr'): 604 | self.rc = rc 605 | self.out = out 606 | self.err = err 607 | 608 | def exec_command(self, cmd, timeout=None): 609 | if timeout: 610 | raise socket.timeout() 611 | stdout = FakeSshStream(self.out) 612 | stdout.setup_channel(self.rc) 613 | return (io.BytesIO(), 614 | stdout, 615 | io.BytesIO(self.err)) 616 | 617 | 618 | class SshExecuteTestCase(test_base.BaseTestCase): 619 | def test_invalid_addl_env(self): 620 | self.assertRaises(processutils.InvalidArgumentError, 621 | processutils.ssh_execute, 622 | None, 'ls', addl_env='important') 623 | 624 | def test_invalid_process_input(self): 625 | self.assertRaises(processutils.InvalidArgumentError, 626 | processutils.ssh_execute, 627 | None, 'ls', process_input='important') 628 | 629 | def test_timeout_error(self): 630 | self.assertRaises(socket.timeout, 631 | processutils.ssh_execute, 632 | FakeSshConnection(0), 'ls', 633 | timeout=10) 634 | 635 | def test_works(self): 636 | out, err = processutils.ssh_execute(FakeSshConnection(0), 'ls') 637 | self.assertEqual('stdout', out) 638 | self.assertEqual('stderr', err) 639 | self.assertIsInstance(out, str) 640 | self.assertIsInstance(err, str) 641 | 642 | def test_binary(self): 643 | o, e = processutils.ssh_execute(FakeSshConnection(0), 'ls', 644 | binary=True) 645 | self.assertEqual(b'stdout', o) 646 | self.assertEqual(b'stderr', e) 647 | self.assertIsInstance(o, bytes) 648 | self.assertIsInstance(e, bytes) 649 | 650 | def check_undecodable_bytes(self, binary): 651 | out_bytes = b'out: ' + UNDECODABLE_BYTES 652 | err_bytes = b'err: ' + UNDECODABLE_BYTES 653 | conn = FakeSshConnection(0, out=out_bytes, err=err_bytes) 654 | 655 | out, err = processutils.ssh_execute(conn, 'ls', binary=binary) 656 | if not binary: 657 | self.assertEqual(os.fsdecode(out_bytes), out) 658 | self.assertEqual(os.fsdecode(err_bytes), err) 659 | else: 660 | self.assertEqual(out_bytes, out) 661 | self.assertEqual(err_bytes, err) 662 | 663 | def test_undecodable_bytes(self): 664 | self.check_undecodable_bytes(False) 665 | 666 | def test_binary_undecodable_bytes(self): 667 | self.check_undecodable_bytes(True) 668 | 669 | def check_undecodable_bytes_error(self, binary): 670 | out_bytes = b'out: password="secret1" ' + UNDECODABLE_BYTES 671 | err_bytes = b'err: password="secret2" ' + UNDECODABLE_BYTES 672 | conn = FakeSshConnection(1, out=out_bytes, err=err_bytes) 673 | 674 | out_bytes = b'out: password="***" ' + UNDECODABLE_BYTES 675 | err_bytes = b'err: password="***" ' + UNDECODABLE_BYTES 676 | 677 | exc = self.assertRaises(processutils.ProcessExecutionError, 678 | processutils.ssh_execute, 679 | conn, 'ls', 680 | binary=binary, check_exit_code=True) 681 | 682 | out = exc.stdout 683 | err = exc.stderr 684 | self.assertEqual(os.fsdecode(out_bytes), out) 685 | self.assertEqual(os.fsdecode(err_bytes), err) 686 | 687 | def test_undecodable_bytes_error(self): 688 | self.check_undecodable_bytes_error(False) 689 | 690 | def test_binary_undecodable_bytes_error(self): 691 | self.check_undecodable_bytes_error(True) 692 | 693 | def test_fails(self): 694 | self.assertRaises(processutils.ProcessExecutionError, 695 | processutils.ssh_execute, FakeSshConnection(1), 'ls') 696 | 697 | def _test_compromising_ssh(self, rc, check): 698 | fixture = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG)) 699 | fake_stdin = io.BytesIO() 700 | 701 | fake_stdout = mock.Mock() 702 | fake_stdout.channel.recv_exit_status.return_value = rc 703 | fake_stdout.read.return_value = b'password="secret"' 704 | 705 | fake_stderr = mock.Mock() 706 | fake_stderr.read.return_value = b'password="foobar"' 707 | 708 | command = 'ls --password="bar"' 709 | 710 | connection = mock.Mock() 711 | connection.exec_command.return_value = (fake_stdin, fake_stdout, 712 | fake_stderr) 713 | 714 | if check and rc != -1 and rc != 0: 715 | err = self.assertRaises(processutils.ProcessExecutionError, 716 | processutils.ssh_execute, 717 | connection, command, 718 | check_exit_code=check) 719 | 720 | self.assertEqual(rc, err.exit_code) 721 | self.assertEqual('password="***"', err.stdout) 722 | self.assertEqual('password="***"', err.stderr) 723 | self.assertEqual('ls --password="***"', err.cmd) 724 | self.assertNotIn('secret', str(err)) 725 | self.assertNotIn('foobar', str(err)) 726 | 727 | # test ssh_execute with sanitize_stdout=False 728 | err = self.assertRaises(processutils.ProcessExecutionError, 729 | processutils.ssh_execute, 730 | connection, command, 731 | check_exit_code=check, 732 | sanitize_stdout=False) 733 | 734 | self.assertEqual(rc, err.exit_code) 735 | self.assertEqual('password="***"', err.stdout) 736 | self.assertEqual('password="***"', err.stderr) 737 | self.assertEqual('ls --password="***"', err.cmd) 738 | self.assertNotIn('secret', str(err)) 739 | self.assertNotIn('foobar', str(err)) 740 | else: 741 | o, e = processutils.ssh_execute(connection, command, 742 | check_exit_code=check) 743 | self.assertEqual('password="***"', o) 744 | self.assertEqual('password="***"', e) 745 | self.assertIn('password="***"', fixture.output) 746 | self.assertNotIn('bar', fixture.output) 747 | 748 | # test ssh_execute with sanitize_stdout=False 749 | o, e = processutils.ssh_execute(connection, command, 750 | check_exit_code=check, 751 | sanitize_stdout=False) 752 | self.assertEqual('password="secret"', o) 753 | self.assertEqual('password="***"', e) 754 | self.assertIn('password="***"', fixture.output) 755 | self.assertNotIn('bar', fixture.output) 756 | 757 | def test_compromising_ssh1(self): 758 | self._test_compromising_ssh(rc=-1, check=True) 759 | 760 | def test_compromising_ssh2(self): 761 | self._test_compromising_ssh(rc=0, check=True) 762 | 763 | def test_compromising_ssh3(self): 764 | self._test_compromising_ssh(rc=1, check=True) 765 | 766 | def test_compromising_ssh4(self): 767 | self._test_compromising_ssh(rc=1, check=False) 768 | 769 | def test_compromising_ssh5(self): 770 | self._test_compromising_ssh(rc=0, check=False) 771 | 772 | def test_compromising_ssh6(self): 773 | self._test_compromising_ssh(rc=-1, check=False) 774 | 775 | 776 | class PrlimitTestCase(test_base.BaseTestCase): 777 | # Simply program that does nothing and returns an exit code 0. 778 | # Use Python to be portable. 779 | SIMPLE_PROGRAM = [sys.executable, '-c', 'pass'] 780 | 781 | def soft_limit(self, res, substract, default_limit): 782 | # Create a new soft limit for a resource, lower than the current 783 | # soft limit. 784 | soft_limit, hard_limit = resource.getrlimit(res) 785 | if soft_limit <= 0: 786 | soft_limit = default_limit 787 | else: 788 | soft_limit -= substract 789 | return soft_limit 790 | 791 | def memory_limit(self, res): 792 | # Substract 1 kB just to get a different limit. Don't substract too 793 | # much to avoid memory allocation issues. 794 | # 795 | # Use 1 GB by default. Limit high enough to be able to load shared 796 | # libraries. Limit low enough to be work on 32-bit platforms. 797 | return self.soft_limit(res, 1024, 1024 ** 3) 798 | 799 | def limit_address_space(self): 800 | max_memory = self.memory_limit(resource.RLIMIT_AS) 801 | return processutils.ProcessLimits(address_space=max_memory) 802 | 803 | def test_simple(self): 804 | # Simple test running a program (/bin/true) with no parameter 805 | prlimit = self.limit_address_space() 806 | stdout, stderr = processutils.execute(*self.SIMPLE_PROGRAM, 807 | prlimit=prlimit) 808 | self.assertEqual('', stdout.rstrip()) 809 | self.assertEqual(stderr.rstrip(), '') 810 | 811 | def check_limit(self, prlimit, resource, value): 812 | code = ';'.join(('import resource', 813 | 'print(resource.getrlimit(resource.%s))' % resource)) 814 | args = [sys.executable, '-c', code] 815 | stdout, stderr = processutils.execute(*args, prlimit=prlimit) 816 | expected = (value, value) 817 | self.assertEqual(str(expected), stdout.rstrip()) 818 | 819 | def test_address_space(self): 820 | prlimit = self.limit_address_space() 821 | self.check_limit(prlimit, 'RLIMIT_AS', prlimit.address_space) 822 | 823 | def test_core_size(self): 824 | size = self.soft_limit(resource.RLIMIT_CORE, 1, 1024) 825 | prlimit = processutils.ProcessLimits(core_file_size=size) 826 | self.check_limit(prlimit, 'RLIMIT_CORE', prlimit.core_file_size) 827 | 828 | def test_cpu_time(self): 829 | time = self.soft_limit(resource.RLIMIT_CPU, 1, 1024) 830 | prlimit = processutils.ProcessLimits(cpu_time=time) 831 | self.check_limit(prlimit, 'RLIMIT_CPU', prlimit.cpu_time) 832 | 833 | def test_data_size(self): 834 | max_memory = self.memory_limit(resource.RLIMIT_DATA) 835 | prlimit = processutils.ProcessLimits(data_size=max_memory) 836 | self.check_limit(prlimit, 'RLIMIT_DATA', max_memory) 837 | 838 | def test_file_size(self): 839 | size = self.soft_limit(resource.RLIMIT_FSIZE, 1, 1024) 840 | prlimit = processutils.ProcessLimits(file_size=size) 841 | self.check_limit(prlimit, 'RLIMIT_FSIZE', prlimit.file_size) 842 | 843 | def test_memory_locked(self): 844 | max_memory = self.memory_limit(resource.RLIMIT_MEMLOCK) 845 | prlimit = processutils.ProcessLimits(memory_locked=max_memory) 846 | self.check_limit(prlimit, 'RLIMIT_MEMLOCK', max_memory) 847 | 848 | def test_resident_set_size(self): 849 | max_memory = self.memory_limit(resource.RLIMIT_RSS) 850 | prlimit = processutils.ProcessLimits(resident_set_size=max_memory) 851 | self.check_limit(prlimit, 'RLIMIT_RSS', max_memory) 852 | 853 | def test_number_files(self): 854 | nfiles = self.soft_limit(resource.RLIMIT_NOFILE, 1, 1024) 855 | prlimit = processutils.ProcessLimits(number_files=nfiles) 856 | self.check_limit(prlimit, 'RLIMIT_NOFILE', nfiles) 857 | 858 | def test_number_processes(self): 859 | nprocs = self.soft_limit(resource.RLIMIT_NPROC, 1, 65535) 860 | prlimit = processutils.ProcessLimits(number_processes=nprocs) 861 | self.check_limit(prlimit, 'RLIMIT_NPROC', nprocs) 862 | 863 | def test_stack_size(self): 864 | max_memory = self.memory_limit(resource.RLIMIT_STACK) 865 | prlimit = processutils.ProcessLimits(stack_size=max_memory) 866 | self.check_limit(prlimit, 'RLIMIT_STACK', max_memory) 867 | 868 | def test_unsupported_prlimit(self): 869 | self.assertRaises(ValueError, processutils.ProcessLimits, xxx=33) 870 | 871 | def test_relative_path(self): 872 | prlimit = self.limit_address_space() 873 | program = sys.executable 874 | 875 | env = dict(os.environ) 876 | env['PATH'] = os.path.dirname(program) 877 | args = [os.path.basename(program), '-c', 'pass'] 878 | processutils.execute(*args, prlimit=prlimit, env_variables=env) 879 | 880 | def test_execv_error(self): 881 | prlimit = self.limit_address_space() 882 | args = ['/missing_path/dont_exist/program'] 883 | try: 884 | processutils.execute(*args, prlimit=prlimit) 885 | except processutils.ProcessExecutionError as exc: 886 | self.assertEqual(1, exc.exit_code) 887 | self.assertEqual('', exc.stdout) 888 | expected = ('%s -m oslo_concurrency.prlimit: ' 889 | 'failed to execute /missing_path/dont_exist/program: ' 890 | % os.path.basename(sys.executable)) 891 | self.assertIn(expected, exc.stderr) 892 | else: 893 | self.fail("ProcessExecutionError not raised") 894 | 895 | def test_setrlimit_error(self): 896 | prlimit = self.limit_address_space() 897 | 898 | # trying to set a limit higher than the current hard limit 899 | # with setrlimit() should fail. 900 | higher_limit = prlimit.address_space + 1024 901 | 902 | args = [sys.executable, '-m', 'oslo_concurrency.prlimit', 903 | '--as=%s' % higher_limit, 904 | '--'] 905 | args.extend(self.SIMPLE_PROGRAM) 906 | try: 907 | processutils.execute(*args, prlimit=prlimit) 908 | except processutils.ProcessExecutionError as exc: 909 | self.assertEqual(1, exc.exit_code) 910 | self.assertEqual('', exc.stdout) 911 | expected = ('%s -m oslo_concurrency.prlimit: ' 912 | 'failed to set the AS resource limit: ' 913 | % os.path.basename(sys.executable)) 914 | self.assertIn(expected, exc.stderr) 915 | else: 916 | self.fail("ProcessExecutionError not raised") 917 | 918 | @mock.patch.object(processutils.subprocess, 'Popen') 919 | def test_python_exec(self, sub_mock): 920 | mock_subprocess = mock.MagicMock() 921 | mock_subprocess.communicate.return_value = (b'', b'') 922 | sub_mock.return_value = mock_subprocess 923 | args = ['/a/command'] 924 | prlimit = self.limit_address_space() 925 | 926 | processutils.execute(*args, prlimit=prlimit, check_exit_code=False, 927 | python_exec='/fake_path') 928 | python_path = sub_mock.mock_calls[0][1][0][0] 929 | self.assertEqual('/fake_path', python_path) 930 | -------------------------------------------------------------------------------- /oslo_concurrency/version.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 OpenStack Foundation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | 16 | import pbr.version 17 | 18 | version_info = pbr.version.VersionInfo('oslo.concurrency') 19 | -------------------------------------------------------------------------------- /oslo_concurrency/watchdog.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | """ 16 | Watchdog module. 17 | 18 | .. versionadded:: 0.4 19 | """ 20 | 21 | import contextlib 22 | import logging 23 | import threading 24 | 25 | from oslo_utils import timeutils 26 | 27 | 28 | @contextlib.contextmanager 29 | def watch(logger, action, level=logging.DEBUG, after=5.0): 30 | """Log a message if an operation exceeds a time threshold. 31 | 32 | This context manager is expected to be used when you are going to 33 | do an operation in code which might either deadlock or take an 34 | extraordinary amount of time, and you'd like to emit a status 35 | message back to the user that the operation is still ongoing but 36 | has not completed in an expected amount of time. This is more user 37 | friendly than logging 'start' and 'end' events and making users 38 | correlate the events to figure out they ended up in a deadlock. 39 | 40 | :param logger: an object that complies to the logger definition 41 | (has a .log method). 42 | 43 | :param action: a meaningful string that describes the thing you 44 | are about to do. 45 | 46 | :param level: the logging level the message should be emitted 47 | at. Defaults to logging.DEBUG. 48 | 49 | :param after: the duration in seconds before the message is 50 | emitted. Defaults to 5.0 seconds. 51 | 52 | Example usage:: 53 | 54 | FORMAT = '%(asctime)-15s %(message)s' 55 | logging.basicConfig(format=FORMAT) 56 | LOG = logging.getLogger('mylogger') 57 | 58 | with watchdog.watch(LOG, "subprocess call", logging.ERROR): 59 | subprocess.call("sleep 10", shell=True) 60 | print "done" 61 | 62 | """ 63 | watch = timeutils.StopWatch() 64 | watch.start() 65 | 66 | def log(): 67 | msg = "{} not completed after {:0.3f}s".format(action, watch.elapsed()) 68 | logger.log(level, msg) 69 | 70 | timer = threading.Timer(after, log) 71 | timer.start() 72 | try: 73 | yield 74 | finally: 75 | timer.cancel() 76 | timer.join() 77 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["pbr>=6.1.1"] 3 | build-backend = "pbr.build" 4 | -------------------------------------------------------------------------------- /releasenotes/notes/add-option-for-fair-locks-b6d660e97683cec6.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | prelude: > 3 | This release includes optional support for fair locks. When fair locks 4 | are specified, blocking waiters will acquire the lock in the order that 5 | they blocked. 6 | features: 7 | - | 8 | We now have optional support for ``fair`` locks. When fair locks are 9 | specified, blocking waiters will acquire the lock in the order that they 10 | blocked. This can be useful to ensure that existing blocked waiters do 11 | not wait indefinitely in the face of large numbers of new attempts to 12 | acquire the lock. When specifying locks as both ``external`` and ``fair``, 13 | the ordering *within* a given process will be fair, but the ordering 14 | *between* processes will be determined by the behaviour of the underlying 15 | OS. 16 | -------------------------------------------------------------------------------- /releasenotes/notes/add-python-exec-kwarg-3a7a0c0849f9bb21.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - A new kwarg, ``python_exec`` is added to the execute() function in the 4 | processutils module. This option is used to specify the path to the python 5 | executable to use for prlimits enforcement. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/add_reno-3b4ae0789e9c45b4.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | other: 3 | - Switch to reno for managing release notes. -------------------------------------------------------------------------------- /releasenotes/notes/deprecate-eventlet-within-lockutils-cba49086d7a65042.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | deprecations: 3 | - | 4 | Eventlet usages are deprecated and the removal of Eventlet from 5 | OpenStack `is planned `_, 6 | for this reason, using the ``lockutils`` module of oslo.concurrency in a 7 | monkey patched environment is now deprecated. The support of Eventlet will 8 | be soon removed from oslo.concurrency. 9 | 10 | Please start considering removing your internal Eventlet usages and 11 | start migrating your stack. 12 | -------------------------------------------------------------------------------- /releasenotes/notes/deprecate-windows-support-fcb77dddf82de36b.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | deprecations: 3 | - | 4 | Support for Windows operating systems has been deprecated. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/drop-python27-support-7d837a45dae941bb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Python 2.7 is no longer supported. The minimum supported version of Python 5 | is now Python 3.6. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/log_acquiring_lock-1b224c0b1562ec97.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Log before trying to acquire a lock to help detect deadlocks and long waits 5 | to acquire locks. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/remove-defaut-section-fallback-a90a6d2fd10671bc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | The following options are no longer loaded from the ``[DEFAULT] section``. 5 | Set these options in the ``[oslo_concurrency]`` section. 6 | 7 | - ``disable_process_locking`` 8 | - ``lock_path`` 9 | -------------------------------------------------------------------------------- /releasenotes/notes/remove-py38-dcdd342ee21f8118.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Support for Python 3.8 has been removed. Now the minimum python version 5 | supported is 3.9 . 6 | -------------------------------------------------------------------------------- /releasenotes/notes/remove-windows-bad63cd41c15235d.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | This library no longer supports Windows operating systems. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/timeout-c3fb65acda04c1c7.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Adds a new ``timeout`` argument to ``processutils.execute``. If set, 5 | the process will be aborted if it runs more than ``timeout`` seconds. 6 | -------------------------------------------------------------------------------- /releasenotes/source/2023.1.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | 2023.1 Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: unmaintained/2023.1 7 | -------------------------------------------------------------------------------- /releasenotes/source/2023.2.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | 2023.2 Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/2023.2 7 | -------------------------------------------------------------------------------- /releasenotes/source/2024.1.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | 2024.1 Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/2024.1 7 | -------------------------------------------------------------------------------- /releasenotes/source/2024.2.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | 2024.2 Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/2024.2 7 | -------------------------------------------------------------------------------- /releasenotes/source/2025.1.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | 2025.1 Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/2025.1 7 | -------------------------------------------------------------------------------- /releasenotes/source/_static/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/oslo.concurrency/08987d8af202622ec5ea00bea76c6f6588b07960/releasenotes/source/_static/.placeholder -------------------------------------------------------------------------------- /releasenotes/source/_templates/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/oslo.concurrency/08987d8af202622ec5ea00bea76c6f6588b07960/releasenotes/source/_templates/.placeholder -------------------------------------------------------------------------------- /releasenotes/source/conf.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 10 | # implied. 11 | # See the License for the specific language governing permissions and 12 | # limitations under the License. 13 | # 14 | # This file is execfile()d with the current directory set to its 15 | # containing dir. 16 | # 17 | # Note that not all possible configuration values are present in this 18 | # autogenerated file. 19 | # 20 | # All configuration values have a default; values that are commented out 21 | # serve to show the default. 22 | 23 | # If extensions (or modules to document with autodoc) are in another directory, 24 | # add these directories to sys.path here. If the directory is relative to the 25 | # documentation root, use os.path.abspath to make it absolute, like shown here. 26 | # sys.path.insert(0, os.path.abspath('.')) 27 | 28 | # -- General configuration ------------------------------------------------ 29 | 30 | # If your documentation needs a minimal Sphinx version, state it here. 31 | # needs_sphinx = '1.0' 32 | 33 | # Add any Sphinx extension module names here, as strings. They can be 34 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 35 | # ones. 36 | extensions = [ 37 | 'openstackdocstheme', 38 | 'reno.sphinxext', 39 | ] 40 | 41 | # openstackdocstheme options 42 | openstackdocs_repo_name = 'openstack/oslo.config' 43 | openstackdocs_bug_project = 'oslo.config' 44 | openstackdocs_bug_tag = '' 45 | 46 | # Add any paths that contain templates here, relative to this directory. 47 | templates_path = ['_templates'] 48 | 49 | # The suffix of source filenames. 50 | source_suffix = '.rst' 51 | 52 | # The encoding of source files. 53 | # source_encoding = 'utf-8-sig' 54 | 55 | # The master toctree document. 56 | master_doc = 'index' 57 | 58 | # General information about the project. 59 | copyright = '2016, oslo.concurrency Developers' 60 | 61 | # Release notes do not need a version in the title, they span 62 | # multiple versions. 63 | # The full version, including alpha/beta/rc tags. 64 | release = '' 65 | # The short X.Y version. 66 | version = '' 67 | 68 | # The language for content autogenerated by Sphinx. Refer to documentation 69 | # for a list of supported languages. 70 | # language = None 71 | 72 | # There are two options for replacing |today|: either, you set today to some 73 | # non-false value, then it is used: 74 | # today = '' 75 | # Else, today_fmt is used as the format for a strftime call. 76 | # today_fmt = '%B %d, %Y' 77 | 78 | # List of patterns, relative to source directory, that match files and 79 | # directories to ignore when looking for source files. 80 | exclude_patterns = [] 81 | 82 | # The reST default role (used for this markup: `text`) to use for all 83 | # documents. 84 | # default_role = None 85 | 86 | # If true, '()' will be appended to :func: etc. cross-reference text. 87 | # add_function_parentheses = True 88 | 89 | # If true, the current module name will be prepended to all description 90 | # unit titles (such as .. function::). 91 | # add_module_names = True 92 | 93 | # If true, sectionauthor and moduleauthor directives will be shown in the 94 | # output. They are ignored by default. 95 | # show_authors = False 96 | 97 | # The name of the Pygments (syntax highlighting) style to use. 98 | pygments_style = 'native' 99 | 100 | # A list of ignored prefixes for module index sorting. 101 | # modindex_common_prefix = [] 102 | 103 | # If true, keep warnings as "system message" paragraphs in the built documents. 104 | # keep_warnings = False 105 | 106 | 107 | # -- Options for HTML output ---------------------------------------------- 108 | 109 | # The theme to use for HTML and HTML Help pages. See the documentation for 110 | # a list of builtin themes. 111 | html_theme = 'openstackdocs' 112 | 113 | # Theme options are theme-specific and customize the look and feel of a theme 114 | # further. For a list of options available for each theme, see the 115 | # documentation. 116 | # html_theme_options = {} 117 | 118 | # Add any paths that contain custom themes here, relative to this directory. 119 | # html_theme_path = [] 120 | 121 | # The name for this set of Sphinx documents. If None, it defaults to 122 | # " v documentation". 123 | # html_title = None 124 | 125 | # A shorter title for the navigation bar. Default is the same as html_title. 126 | # html_short_title = None 127 | 128 | # The name of an image file (relative to this directory) to place at the top 129 | # of the sidebar. 130 | # html_logo = None 131 | 132 | # The name of an image file (within the static path) to use as favicon of the 133 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 134 | # pixels large. 135 | # html_favicon = None 136 | 137 | # Add any paths that contain custom static files (such as style sheets) here, 138 | # relative to this directory. They are copied after the builtin static files, 139 | # so a file named "default.css" will overwrite the builtin "default.css". 140 | html_static_path = ['_static'] 141 | 142 | # Add any extra paths that contain custom files (such as robots.txt or 143 | # .htaccess) here, relative to this directory. These files are copied 144 | # directly to the root of the documentation. 145 | # html_extra_path = [] 146 | 147 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 148 | # using the given strftime format. 149 | # html_last_updated_fmt = '%b %d, %Y' 150 | 151 | # If true, SmartyPants will be used to convert quotes and dashes to 152 | # typographically correct entities. 153 | # html_use_smartypants = True 154 | 155 | # Custom sidebar templates, maps document names to template names. 156 | # html_sidebars = {} 157 | 158 | # Additional templates that should be rendered to pages, maps page names to 159 | # template names. 160 | # html_additional_pages = {} 161 | 162 | # If false, no module index is generated. 163 | # html_domain_indices = True 164 | 165 | # If false, no index is generated. 166 | # html_use_index = True 167 | 168 | # If true, the index is split into individual pages for each letter. 169 | # html_split_index = False 170 | 171 | # If true, links to the reST sources are added to the pages. 172 | # html_show_sourcelink = True 173 | 174 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 175 | # html_show_sphinx = True 176 | 177 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 178 | # html_show_copyright = True 179 | 180 | # If true, an OpenSearch description file will be output, and all pages will 181 | # contain a tag referring to it. The value of this option must be the 182 | # base URL from which the finished HTML is served. 183 | # html_use_opensearch = '' 184 | 185 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 186 | # html_file_suffix = None 187 | 188 | # Output file base name for HTML help builder. 189 | htmlhelp_basename = 'oslo.concurrencyReleaseNotesDoc' 190 | 191 | 192 | # -- Options for LaTeX output --------------------------------------------- 193 | 194 | # Grouping the document tree into LaTeX files. List of tuples 195 | # (source start file, target name, title, 196 | # author, documentclass [howto, manual, or own class]). 197 | latex_documents = [ 198 | ('index', 'oslo.concurrencyReleaseNotes.tex', 199 | 'oslo.concurrency Release Notes Documentation', 200 | 'oslo.concurrency Developers', 'manual'), 201 | ] 202 | 203 | # The name of an image file (relative to this directory) to place at the top of 204 | # the title page. 205 | # latex_logo = None 206 | 207 | # For "manual" documents, if this is true, then toplevel headings are parts, 208 | # not chapters. 209 | # latex_use_parts = False 210 | 211 | # If true, show page references after internal links. 212 | # latex_show_pagerefs = False 213 | 214 | # If true, show URL addresses after external links. 215 | # latex_show_urls = False 216 | 217 | # Documents to append as an appendix to all manuals. 218 | # latex_appendices = [] 219 | 220 | # If false, no module index is generated. 221 | # latex_domain_indices = True 222 | 223 | 224 | # -- Options for manual page output --------------------------------------- 225 | 226 | # One entry per manual page. List of tuples 227 | # (source start file, name, description, authors, manual section). 228 | man_pages = [ 229 | ('index', 'oslo.concurrencyReleaseNotes', 230 | 'oslo.concurrency Release Notes Documentation', 231 | ['oslo.concurrency Developers'], 1) 232 | ] 233 | 234 | # If true, show URL addresses after external links. 235 | # man_show_urls = False 236 | 237 | 238 | # -- Options for Texinfo output ------------------------------------------- 239 | 240 | # Grouping the document tree into Texinfo files. List of tuples 241 | # (source start file, target name, title, author, 242 | # dir menu entry, description, category) 243 | texinfo_documents = [ 244 | ('index', 'oslo.concurrencyReleaseNotes', 245 | 'oslo.concurrency Release Notes Documentation', 246 | 'oslo.concurrency Developers', 'oslo.concurrencyReleaseNotes', 247 | 'One line description of project.', 248 | 'Miscellaneous'), 249 | ] 250 | 251 | # Documents to append as an appendix to all manuals. 252 | # texinfo_appendices = [] 253 | 254 | # If false, no module index is generated. 255 | # texinfo_domain_indices = True 256 | 257 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 258 | # texinfo_show_urls = 'footnote' 259 | 260 | # If true, do not generate a @detailmenu in the "Top" node's menu. 261 | # texinfo_no_detailmenu = False 262 | 263 | # -- Options for Internationalization output ------------------------------ 264 | locale_dirs = ['locale/'] 265 | -------------------------------------------------------------------------------- /releasenotes/source/index.rst: -------------------------------------------------------------------------------- 1 | ================================ 2 | oslo.concurrency Release Notes 3 | ================================ 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | unreleased 9 | 2025.1 10 | 2024.2 11 | 2024.1 12 | 2023.2 13 | 2023.1 14 | victoria 15 | ussuri 16 | train 17 | stein 18 | rocky 19 | queens 20 | pike 21 | ocata 22 | newton 23 | -------------------------------------------------------------------------------- /releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po: -------------------------------------------------------------------------------- 1 | # Andi Chandler , 2016. #zanata 2 | # Andi Chandler , 2017. #zanata 3 | # Andi Chandler , 2018. #zanata 4 | # Andi Chandler , 2020. #zanata 5 | # Andi Chandler , 2022. #zanata 6 | # Andi Chandler , 2023. #zanata 7 | msgid "" 8 | msgstr "" 9 | "Project-Id-Version: oslo.concurrency\n" 10 | "Report-Msgid-Bugs-To: \n" 11 | "POT-Creation-Date: 2023-05-05 13:49+0000\n" 12 | "MIME-Version: 1.0\n" 13 | "Content-Type: text/plain; charset=UTF-8\n" 14 | "Content-Transfer-Encoding: 8bit\n" 15 | "PO-Revision-Date: 2023-05-09 12:01+0000\n" 16 | "Last-Translator: Andi Chandler \n" 17 | "Language-Team: English (United Kingdom)\n" 18 | "Language: en_GB\n" 19 | "X-Generator: Zanata 4.3.3\n" 20 | "Plural-Forms: nplurals=2; plural=(n != 1)\n" 21 | 22 | msgid "2023.1 Series Release Notes" 23 | msgstr "2023.1 Series Release Notes" 24 | 25 | msgid "3.10.0" 26 | msgstr "3.10.0" 27 | 28 | msgid "3.25.0" 29 | msgstr "3.25.0" 30 | 31 | msgid "3.29.0" 32 | msgstr "3.29.0" 33 | 34 | msgid "4.0.0" 35 | msgstr "4.0.0" 36 | 37 | msgid "4.2.0" 38 | msgstr "4.2.0" 39 | 40 | msgid "5.0.0" 41 | msgstr "5.0.0" 42 | 43 | msgid "" 44 | "A new kwarg, ``python_exec`` is added to the execute() function in the " 45 | "processutils module. This option is used to specify the path to the python " 46 | "executable to use for prlimits enforcement." 47 | msgstr "" 48 | "A new kwarg, ``python_exec`` is added to the execute() function in the " 49 | "processutils module. This option is used to specify the path to the Python " 50 | "executable to use for prlimits enforcement." 51 | 52 | msgid "" 53 | "Adds a new ``timeout`` argument to ``processutils.execute``. If set, the " 54 | "process will be aborted if it runs more than ``timeout`` seconds." 55 | msgstr "" 56 | "Adds a new ``timeout`` argument to ``processutils.execute``. If set, the " 57 | "process will be aborted if it runs more than ``timeout`` seconds." 58 | 59 | msgid "" 60 | "Log before trying to acquire a lock to help detect deadlocks and long waits " 61 | "to acquire locks." 62 | msgstr "" 63 | "Log before trying to acquire a lock to help detect deadlocks and long waits " 64 | "to acquire locks." 65 | 66 | msgid "New Features" 67 | msgstr "New Features" 68 | 69 | msgid "Newton Series Release Notes" 70 | msgstr "Newton Series Release Notes" 71 | 72 | msgid "Ocata Series Release Notes" 73 | msgstr "Ocata Series Release Notes" 74 | 75 | msgid "Other Notes" 76 | msgstr "Other Notes" 77 | 78 | msgid "Pike Series Release Notes" 79 | msgstr "Pike Series Release Notes" 80 | 81 | msgid "Prelude" 82 | msgstr "Prelude" 83 | 84 | msgid "" 85 | "Python 2.7 is no longer supported. The minimum supported version of Python " 86 | "is now Python 3.6." 87 | msgstr "" 88 | "Python 2.7 is no longer supported. The minimum supported version of Python " 89 | "is now Python 3.6." 90 | 91 | msgid "Queens Series Release Notes" 92 | msgstr "Queens Series Release Notes" 93 | 94 | msgid "Rocky Series Release Notes" 95 | msgstr "Rocky Series Release Notes" 96 | 97 | msgid "Stein Series Release Notes" 98 | msgstr "Stein Series Release Notes" 99 | 100 | msgid "Switch to reno for managing release notes." 101 | msgstr "Switch to reno for managing release notes." 102 | 103 | msgid "" 104 | "This release includes optional support for fair locks. When fair locks are " 105 | "specified, blocking waiters will acquire the lock in the order that they " 106 | "blocked." 107 | msgstr "" 108 | "This release includes optional support for fair locks. When fair locks are " 109 | "specified, blocking waiters will acquire the lock in the order that they " 110 | "blocked." 111 | 112 | msgid "Train Series Release Notes" 113 | msgstr "Train Series Release Notes" 114 | 115 | msgid "Unreleased Release Notes" 116 | msgstr "Unreleased Release Notes" 117 | 118 | msgid "Upgrade Notes" 119 | msgstr "Upgrade Notes" 120 | 121 | msgid "Ussuri Series Release Notes" 122 | msgstr "Ussuri Series Release Notes" 123 | 124 | msgid "Victoria Series Release Notes" 125 | msgstr "Victoria Series Release Notes" 126 | 127 | msgid "" 128 | "We now have optional support for ``fair`` locks. When fair locks are " 129 | "specified, blocking waiters will acquire the lock in the order that they " 130 | "blocked. This can be useful to ensure that existing blocked waiters do not " 131 | "wait indefinitely in the face of large numbers of new attempts to acquire " 132 | "the lock. When specifying locks as both ``external`` and ``fair``, the " 133 | "ordering *within* a given process will be fair, but the ordering *between* " 134 | "processes will be determined by the behaviour of the underlying OS." 135 | msgstr "" 136 | "We now have optional support for ``fair`` locks. When fair locks are " 137 | "specified, blocking waiters will acquire the lock in the order that they " 138 | "blocked. This can be useful to ensure that existing blocked waiters do not " 139 | "wait indefinitely in the face of large numbers of new attempts to acquire " 140 | "the lock. When specifying locks as both ``external`` and ``fair``, the " 141 | "ordering *within* a given process will be fair, but the ordering *between* " 142 | "processes will be determined by the behaviour of the underlying OS." 143 | 144 | msgid "oslo.concurrency Release Notes" 145 | msgstr "oslo.concurrency Release Notes" 146 | -------------------------------------------------------------------------------- /releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po: -------------------------------------------------------------------------------- 1 | # Gérald LONLAS , 2016. #zanata 2 | msgid "" 3 | msgstr "" 4 | "Project-Id-Version: oslo.concurrency Release Notes 3.15.1\n" 5 | "Report-Msgid-Bugs-To: \n" 6 | "POT-Creation-Date: 2016-10-25 16:33+0000\n" 7 | "MIME-Version: 1.0\n" 8 | "Content-Type: text/plain; charset=UTF-8\n" 9 | "Content-Transfer-Encoding: 8bit\n" 10 | "PO-Revision-Date: 2016-10-22 05:58+0000\n" 11 | "Last-Translator: Gérald LONLAS \n" 12 | "Language-Team: French\n" 13 | "Language: fr\n" 14 | "X-Generator: Zanata 3.7.3\n" 15 | "Plural-Forms: nplurals=2; plural=(n > 1)\n" 16 | 17 | msgid "3.10.0" 18 | msgstr "3.10.0" 19 | 20 | msgid "Newton Series Release Notes" 21 | msgstr "Note de release pour Newton" 22 | 23 | msgid "Other Notes" 24 | msgstr "Autres notes" 25 | 26 | msgid "Switch to reno for managing release notes." 27 | msgstr "Commence à utiliser reno pour la gestion des notes de release" 28 | 29 | msgid "Unreleased Release Notes" 30 | msgstr "Note de release pour les changements non déployées" 31 | 32 | msgid "oslo.concurrency Release Notes" 33 | msgstr "Note de release pour oslo.concurrency" 34 | -------------------------------------------------------------------------------- /releasenotes/source/newton.rst: -------------------------------------------------------------------------------- 1 | ============================= 2 | Newton Series Release Notes 3 | ============================= 4 | 5 | .. release-notes:: 6 | :branch: origin/stable/newton 7 | -------------------------------------------------------------------------------- /releasenotes/source/ocata.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Ocata Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: origin/stable/ocata 7 | -------------------------------------------------------------------------------- /releasenotes/source/pike.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Pike Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: stable/pike 7 | -------------------------------------------------------------------------------- /releasenotes/source/queens.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Queens Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: stable/queens 7 | -------------------------------------------------------------------------------- /releasenotes/source/rocky.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Rocky Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: stable/rocky 7 | -------------------------------------------------------------------------------- /releasenotes/source/stein.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Stein Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: stable/stein 7 | -------------------------------------------------------------------------------- /releasenotes/source/train.rst: -------------------------------------------------------------------------------- 1 | ========================== 2 | Train Series Release Notes 3 | ========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/train 7 | -------------------------------------------------------------------------------- /releasenotes/source/unreleased.rst: -------------------------------------------------------------------------------- 1 | ========================== 2 | Unreleased Release Notes 3 | ========================== 4 | 5 | .. release-notes:: 6 | -------------------------------------------------------------------------------- /releasenotes/source/ussuri.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | Ussuri Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/ussuri 7 | -------------------------------------------------------------------------------- /releasenotes/source/victoria.rst: -------------------------------------------------------------------------------- 1 | ============================= 2 | Victoria Series Release Notes 3 | ============================= 4 | 5 | .. release-notes:: 6 | :branch: unmaintained/victoria 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Requirements lower bounds listed here are our best effort to keep them up to 2 | # date but we do not test them so no guarantee of having them all correct. If 3 | # you find any incorrect lower bounds, let us know or propose a fix. 4 | 5 | pbr>=2.0.0 # Apache-2.0 6 | oslo.config>=5.2.0 # Apache-2.0 7 | oslo.i18n>=3.15.3 # Apache-2.0 8 | oslo.utils>=3.33.0 # Apache-2.0 9 | fasteners>=0.7.0 # Apache-2.0 10 | debtcollector>=3.0.0 # Apache-2.0 11 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = oslo.concurrency 3 | summary = Oslo Concurrency library 4 | description_file = 5 | README.rst 6 | author = OpenStack 7 | author_email = openstack-discuss@lists.openstack.org 8 | home_page = https://docs.openstack.org/oslo.concurrency/latest/ 9 | python_requires = >=3.9 10 | classifier = 11 | Environment :: OpenStack 12 | Intended Audience :: Information Technology 13 | Intended Audience :: System Administrators 14 | License :: OSI Approved :: Apache Software License 15 | Operating System :: POSIX :: Linux 16 | Programming Language :: Python 17 | Programming Language :: Python :: 3 18 | Programming Language :: Python :: 3.9 19 | Programming Language :: Python :: 3.10 20 | Programming Language :: Python :: 3.11 21 | Programming Language :: Python :: 3.12 22 | Programming Language :: Python :: 3 :: Only 23 | Programming Language :: Python :: Implementation :: CPython 24 | 25 | [files] 26 | packages = 27 | oslo_concurrency 28 | 29 | [entry_points] 30 | oslo.config.opts = 31 | oslo.concurrency = oslo_concurrency.opts:list_opts 32 | console_scripts = 33 | lockutils-wrapper = oslo_concurrency.lockutils:main 34 | 35 | [extras] 36 | eventlet = 37 | eventlet>=0.35.2 # MIT 38 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | import setuptools 17 | 18 | setuptools.setup( 19 | setup_requires=['pbr>=2.0.0'], 20 | pbr=True) 21 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | oslotest>=3.2.0 # Apache-2.0 2 | coverage>=4.0 # Apache-2.0 3 | fixtures>=3.0.0 # Apache-2.0/BSD 4 | stestr>=2.0.0 # Apache-2.0 5 | eventlet>=0.35.2 # MIT 6 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion = 3.18.0 3 | envlist = py3,pep8 4 | 5 | [testenv] 6 | deps = 7 | -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} 8 | -r{toxinidir}/test-requirements.txt 9 | # We want to support both vanilla stdlib and eventlet monkey patched 10 | allowlist_externals = env 11 | commands = 12 | env TEST_EVENTLET=0 lockutils-wrapper stestr run --slowest {posargs} 13 | env TEST_EVENTLET=1 lockutils-wrapper stestr run --slowest {posargs} 14 | 15 | [testenv:pep8] 16 | skip_install = true 17 | deps = 18 | pre-commit 19 | commands = 20 | pre-commit run -a 21 | 22 | [testenv:venv] 23 | commands = {posargs} 24 | 25 | [testenv:docs] 26 | allowlist_externals = rm 27 | deps = 28 | -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} 29 | -r{toxinidir}/doc/requirements.txt 30 | commands = 31 | rm -fr doc/build 32 | sphinx-build -W --keep-going -b html doc/source doc/build/html {posargs} 33 | 34 | [testenv:cover] 35 | setenv = 36 | PYTHON=coverage run --source oslo_concurrency --parallel-mode 37 | commands = 38 | coverage erase 39 | env TEST_EVENTLET=0 lockutils-wrapper stestr run {posargs} 40 | coverage combine 41 | coverage html -d cover 42 | coverage xml -o cover/coverage.xml 43 | coverage report --show-missing 44 | 45 | [flake8] 46 | show-source = True 47 | ignore = H405,W504 48 | exclude=.venv,.git,.tox,dist,*lib/python*,*egg,build 49 | 50 | [hacking] 51 | import_exceptions = 52 | oslo_concurrency._i18n 53 | 54 | [testenv:releasenotes] 55 | deps = {[testenv:docs]deps} 56 | commands = 57 | sphinx-build -a -E -W -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html 58 | --------------------------------------------------------------------------------