├── t ├── __init__.py ├── integration │ ├── setup.cfg │ ├── __init__.py │ ├── tests │ │ └── __init__.py │ └── setup.py ├── unit │ ├── test_package.py │ ├── __init__.py │ ├── test_dummy.py │ ├── test_spawn.py │ ├── test_values.py │ ├── test_win32.py │ ├── test_common.py │ ├── test_einfo.py │ └── test_pool.py └── skip.py ├── Doc ├── includes │ ├── __init__.py │ ├── mp_webserver.py │ ├── mp_workers.py │ ├── mp_newtype.py │ ├── mp_benchmarks.py │ ├── mp_synchronize.py │ └── mp_pool.py ├── index.rst ├── glossary.rst └── conf.py ├── requirements ├── test-ci.txt ├── test.txt └── pkgutils.txt ├── .editorconfig ├── SECURITY.md ├── .gitignore ├── .cookiecutterrc ├── MANIFEST.in ├── .pre-commit-config.yaml ├── tox.ini ├── setup.cfg ├── .github ├── workflows │ ├── codespell.yml │ ├── ci.yaml │ └── codeql-analysis.yml └── dependabot.yml ├── billiard ├── _ext.py ├── exceptions.py ├── __init__.py ├── popen_forkserver.py ├── popen_spawn_posix.py ├── popen_fork.py ├── dummy │ ├── connection.py │ └── __init__.py ├── _win.py ├── popen_spawn_win32.py ├── common.py ├── semaphore_tracker.py ├── einfo.py ├── resource_sharer.py ├── util.py ├── sharedctypes.py ├── compat.py ├── forkserver.py ├── heap.py ├── reduction.py └── process.py ├── LICENSE.txt ├── INSTALL.txt ├── README.rst ├── Makefile ├── Modules └── _billiard │ ├── multiprocessing.h │ └── multiprocessing.c └── setup.py /t/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Doc/includes/__init__.py: -------------------------------------------------------------------------------- 1 | # package 2 | 3 | -------------------------------------------------------------------------------- /requirements/test-ci.txt: -------------------------------------------------------------------------------- 1 | pytest-cov 2 | -------------------------------------------------------------------------------- /requirements/test.txt: -------------------------------------------------------------------------------- 1 | pytest>=6.2 2 | psutil>=5.9.0 3 | -------------------------------------------------------------------------------- /requirements/pkgutils.txt: -------------------------------------------------------------------------------- 1 | setuptools>=59.2.0 2 | wheel>=0.37.0 3 | flake8>=4.0.1 4 | tox>=3.24.4 5 | -------------------------------------------------------------------------------- /t/integration/setup.cfg: -------------------------------------------------------------------------------- 1 | [nosetests] 2 | verbosity = 1 3 | detailed-errors = 1 4 | where = tests 5 | -------------------------------------------------------------------------------- /t/integration/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.insert(0, os.pardir) 5 | sys.path.insert(0, os.getcwd()) 6 | -------------------------------------------------------------------------------- /t/unit/test_package.py: -------------------------------------------------------------------------------- 1 | import billiard 2 | 3 | 4 | def test_has_version(): 5 | assert billiard.__version__ 6 | assert isinstance(billiard.__version__, str) 7 | -------------------------------------------------------------------------------- /t/integration/tests/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | sys.path.insert(0, os.path.join(os.getcwd(), os.pardir)) 5 | print(sys.path[0]) 6 | sys.path.insert(0, os.getcwd()) 7 | print(sys.path[0]) 8 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # https://editorconfig.org/ 2 | 3 | root = true 4 | 5 | [*] 6 | indent_style = space 7 | indent_size = 4 8 | trim_trailing_whitespace = true 9 | insert_final_newline = true 10 | charset = utf-8 11 | end_of_line = lf 12 | 13 | [Makefile] 14 | indent_style = tab 15 | -------------------------------------------------------------------------------- /t/skip.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import pytest 4 | 5 | if_win32 = pytest.mark.skipif( 6 | sys.platform.startswith('win32'), 7 | reason='Does not work on Windows' 8 | ) 9 | 10 | unless_win32 = pytest.mark.skipif( 11 | not sys.platform.startswith('win32'), 12 | reason='Requires Windows to work' 13 | ) 14 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | 6 | | Version | Supported | 7 | | ------- | ------------------ | 8 | | 3.6.x | :white_check_mark: | 9 | | 3.5.x | :x: | 10 | | 4.0.x | :white_check_mark: | 11 | | < 3.6 | :x: | 12 | 13 | ## Reporting a Vulnerability 14 | 15 | Please contact auvipy@gmail.com for reporting Vulnerability issues. 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *.pyc 3 | *~ 4 | *.sqlite 5 | *.sqlite-journal 6 | settings_local.py 7 | local_settings.py 8 | .*.sw[po] 9 | dist/ 10 | *.egg-info 11 | doc/__build/* 12 | build/ 13 | locale/ 14 | pip-log.txt 15 | devdatabase.db 16 | .directory 17 | bundle_version.gen 18 | celeryd.log 19 | celeryd.pid 20 | coverage.xml 21 | cover/ 22 | *.so 23 | .tox/ 24 | .eggs/ 25 | htmlcov/ 26 | .cache/ 27 | .coverage 28 | .vscode/ 29 | .python-version 30 | -------------------------------------------------------------------------------- /.cookiecutterrc: -------------------------------------------------------------------------------- 1 | default_context: 2 | 3 | email: 'ask@celeryproject.org' 4 | full_name: 'Ask Solem' 5 | github_username: 'celery' 6 | project_name: 'Billiard' 7 | project_short_description: 'Python multiprocessing fork with improvements and bugfixes' 8 | project_slug: 'billiard' 9 | version: '1.0.0' 10 | year: '2009-2016' 11 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.py 2 | include *.txt 3 | include *.rst 4 | include Makefile 5 | recursive-include Lib *.py 6 | recursive-include Modules *.c *.h 7 | recursive-include Doc *.rst *.py 8 | recursive-include funtests *.py 9 | recursive-include requirements *.txt 10 | recursive-include billiard *.py 11 | recursive-include t *.py 12 | 13 | recursive-exclude docs/_build * 14 | recursive-exclude * __pycache__ 15 | recursive-exclude * *.py[co] 16 | recursive-exclude * .*.sw* 17 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # Learn more about this config here: https://pre-commit.com/ 2 | 3 | # To enable these pre-commit hooks run: `uv tool install pre-commit` 4 | # or `pipx install pre-commit` or `brew install pre-commit` 5 | # Then in the project root directory run `pre-commit install` 6 | 7 | repos: 8 | - repo: https://github.com/codespell-project/codespell 9 | rev: v2.4.1 10 | hooks: 11 | - id: codespell # See pyproject.toml for args 12 | additional_dependencies: 13 | - tomli 14 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | requires = 3 | tox-gh-actions 4 | envlist = {3.8,3.9,3.10,3.11,3.12,3.13,3.14}-unit 5 | skip_missing_interpreters = True 6 | 7 | [gh-actions] 8 | python = 9 | 3.8: 3.8-unit 10 | 3.9: 3.9-unit 11 | 3.10: 3.10-unit 12 | 3.11: 3.11-unit 13 | 3.12: 3.12-unit 14 | 3.13: 3.13-unit 15 | 3.14: 3.14-unit 16 | 17 | [testenv] 18 | distribute = True 19 | sitepackages = False 20 | commands = py.test -xv 21 | deps= 22 | -r{toxinidir}/requirements/test-ci.txt 23 | -r{toxinidir}/requirements/test.txt 24 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [tool:pytest] 2 | testpaths = t/unit/ 3 | python_classes = test_* 4 | 5 | [flake8] 6 | # classes can be lowercase, arguments and variables can be uppercase 7 | # whenever it makes the code more readable. 8 | ignore = N806, N802, N801, N803, E305 9 | 10 | [pep257] 11 | ignore = D102,D104,D203,D105,D213 12 | 13 | [metadata] 14 | license_file = LICENSE.txt 15 | 16 | [codespell] 17 | # Ref: https://github.com/codespell-project/codespell#using-a-config-file 18 | skip = .git* 19 | check-hidden = true 20 | # ignore-regex = 21 | ignore-words-list = assertin 22 | -------------------------------------------------------------------------------- /Doc/index.rst: -------------------------------------------------------------------------------- 1 | .. multiprocessing documentation master file, created by sphinx-quickstart on Wed Nov 26 12:47:00 2008. 2 | You can adapt this file completely to your liking, but it should at least 3 | contain the root `toctree` directive. 4 | 5 | Welcome to multiprocessing's documentation! 6 | =========================================== 7 | 8 | Contents: 9 | 10 | .. toctree:: 11 | 12 | library/multiprocessing.rst 13 | glossary.rst 14 | 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | * :ref:`modindex` 21 | * :ref:`search` 22 | 23 | -------------------------------------------------------------------------------- /t/unit/__init__.py: -------------------------------------------------------------------------------- 1 | import atexit 2 | 3 | 4 | def teardown(): 5 | # Workaround for multiprocessing bug where logging 6 | # is attempted after global already collected at shutdown. 7 | cancelled = set() 8 | try: 9 | import multiprocessing.util 10 | cancelled.add(multiprocessing.util._exit_function) 11 | except (AttributeError, ImportError): 12 | pass 13 | 14 | try: 15 | atexit._exithandlers[:] = [ 16 | e for e in atexit._exithandlers if e[0] not in cancelled 17 | ] 18 | except AttributeError: 19 | pass 20 | -------------------------------------------------------------------------------- /t/unit/test_dummy.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import billiard.dummy 3 | 4 | 5 | class test_restart_state: 6 | def test_raises(self): 7 | class Thread(threading.Thread): 8 | exception = None 9 | 10 | def run(self): 11 | try: 12 | billiard.dummy.Process().start() 13 | except BaseException as e: 14 | self.exception = e 15 | 16 | thread = Thread() 17 | thread.start() 18 | thread.join(0.1) 19 | assert not thread.is_alive() 20 | assert thread.exception is None 21 | -------------------------------------------------------------------------------- /.github/workflows/codespell.yml: -------------------------------------------------------------------------------- 1 | # Codespell configuration is within setup.cfg 2 | --- 3 | name: Codespell 4 | 5 | on: 6 | push: 7 | branches: [main] 8 | pull_request: 9 | branches: [main] 10 | 11 | permissions: 12 | contents: read 13 | 14 | jobs: 15 | codespell: 16 | name: Check for spelling errors 17 | runs-on: ubuntu-latest 18 | 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v6 22 | - name: Annotate locations with typos 23 | uses: codespell-project/codespell-problem-matcher@v1 24 | - name: Codespell 25 | uses: codespell-project/actions-codespell@v2 26 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Keep GitHub Actions up to date with GitHub's Dependabot... 2 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot 3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem 4 | version: 2 5 | updates: 6 | - package-ecosystem: github-actions 7 | directory: / 8 | groups: 9 | github-actions: 10 | patterns: 11 | - "*" # Group all Actions updates into a single larger pull request 12 | schedule: 13 | interval: weekly 14 | -------------------------------------------------------------------------------- /billiard/_ext.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | supports_exec = True 4 | 5 | from .compat import _winapi as win32 # noqa 6 | 7 | if sys.platform.startswith("java"): 8 | _billiard = None 9 | else: 10 | try: 11 | import _billiard # noqa 12 | except ImportError: 13 | import _multiprocessing as _billiard # noqa 14 | supports_exec = False 15 | 16 | 17 | def ensure_multiprocessing(): 18 | if _billiard is None: 19 | raise NotImplementedError("multiprocessing not supported") 20 | 21 | 22 | def ensure_SemLock(): 23 | try: 24 | from _billiard import SemLock # noqa 25 | except ImportError: 26 | try: 27 | from _multiprocessing import SemLock # noqa 28 | except ImportError: 29 | raise ImportError("""\ 30 | This platform lacks a functioning sem_open implementation, therefore, 31 | the required synchronization primitives needed will not function, 32 | see issue 3770.""") 33 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: [pull_request, push] 3 | jobs: 4 | #################### Unittests #################### 5 | unittest: 6 | runs-on: blacksmith-4vcpu-ubuntu-2204 7 | strategy: 8 | fail-fast: false 9 | matrix: 10 | python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13', '3.14'] 11 | steps: 12 | - name: Install system packages 13 | run: sudo apt update && sudo apt-get install libcurl4-openssl-dev libssl-dev 14 | - name: Check out code from GitHub 15 | uses: actions/checkout@v6 16 | - name: Set up Python ${{ matrix.python-version }} 17 | id: python 18 | uses: actions/setup-python@main 19 | with: 20 | python-version: ${{ matrix.python-version }} 21 | allow-prereleases: true 22 | - name: Install dependencies 23 | run: pip install --upgrade pip setuptools wheel tox tox-gh-actions tox-docker 24 | - name: Run unittest 25 | run: tox -v -e ${{ matrix.python-version }}-unit -- -v 26 | -------------------------------------------------------------------------------- /billiard/exceptions.py: -------------------------------------------------------------------------------- 1 | try: 2 | from multiprocessing import ( 3 | ProcessError, 4 | BufferTooShort, 5 | TimeoutError, 6 | AuthenticationError, 7 | ) 8 | except ImportError: 9 | class ProcessError(Exception): # noqa 10 | pass 11 | 12 | class BufferTooShort(ProcessError): # noqa 13 | pass 14 | 15 | class TimeoutError(ProcessError): # noqa 16 | pass 17 | 18 | class AuthenticationError(ProcessError): # noqa 19 | pass 20 | 21 | 22 | class TimeLimitExceeded(Exception): 23 | """The time limit has been exceeded and the job has been terminated.""" 24 | 25 | def __str__(self): 26 | return "TimeLimitExceeded%s" % (self.args, ) 27 | 28 | 29 | class SoftTimeLimitExceeded(Exception): 30 | """The soft time limit has been exceeded. This exception is raised 31 | to give the task a chance to clean up.""" 32 | 33 | def __str__(self): 34 | return "SoftTimeLimitExceeded%s" % (self.args, ) 35 | 36 | 37 | class WorkerLostError(Exception): 38 | """The worker processing a job has exited prematurely.""" 39 | 40 | 41 | class Terminated(Exception): 42 | """The worker processing a job has been terminated by user request.""" 43 | 44 | 45 | class RestartFreqExceeded(Exception): 46 | """Restarts too fast.""" 47 | 48 | 49 | class CoroStop(Exception): 50 | """Coroutine exit, as opposed to StopIteration which may 51 | mean it should be restarted.""" 52 | pass 53 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2006-2008, R Oudkerk and Contributors 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions 7 | are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | 2. Redistributions in binary form must reproduce the above copyright 12 | notice, this list of conditions and the following disclaimer in the 13 | documentation and/or other materials provided with the distribution. 14 | 3. Neither the name of author nor the names of any contributors may be 15 | used to endorse or promote products derived from this software 16 | without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND 19 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 | ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 | OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 | LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 | OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 | SUCH DAMAGE. 29 | 30 | -------------------------------------------------------------------------------- /t/integration/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | try: 4 | from setuptools import setup 5 | from setuptools.command.install import install 6 | except ImportError: 7 | from ez_setup import use_setuptools 8 | use_setuptools() 9 | from setuptools import setup # noqa 10 | from setuptools.command.install import install # noqa 11 | 12 | 13 | class no_install(install): 14 | 15 | def run(self, *args, **kwargs): 16 | import sys 17 | sys.stderr.write(""" 18 | ------------------------------------------------------- 19 | The billiard functional test suite cannot be installed. 20 | ------------------------------------------------------- 21 | 22 | 23 | But you can execute the tests by running the command: 24 | 25 | $ tox -e py 26 | 27 | 28 | """) 29 | 30 | 31 | setup( 32 | name='billiard-funtests', 33 | version='DEV', 34 | description='Functional test suite for billiard', 35 | author='Ask Solem', 36 | author_email='ask@celeryproject.org', 37 | url='https://github.com/celery/billiard', 38 | platforms=['any'], 39 | packages=[], 40 | data_files=[], 41 | zip_safe=False, 42 | cmdclass={'install': no_install}, 43 | test_suite='pytest', 44 | build_requires=[ 45 | 'pytest', 46 | 'coverage>=3.0', 47 | ], 48 | classifiers=[ 49 | 'Operating System :: OS Independent', 50 | 'Programming Language :: Python', 51 | 'Programming Language :: C' 52 | 'License :: OSI Approved :: BSD License', 53 | 'Intended Audience :: Developers', 54 | ], 55 | long_description='Do not install this package', 56 | ) 57 | -------------------------------------------------------------------------------- /billiard/__init__.py: -------------------------------------------------------------------------------- 1 | """Python multiprocessing fork with improvements and bugfixes""" 2 | # 3 | # Package analogous to 'threading.py' but using processes 4 | # 5 | # multiprocessing/__init__.py 6 | # 7 | # This package is intended to duplicate the functionality (and much of 8 | # the API) of threading.py but uses processes instead of threads. A 9 | # subpackage 'multiprocessing.dummy' has the same API but is a simple 10 | # wrapper for 'threading'. 11 | # 12 | # Try calling `multiprocessing.doc.main()` to read the html 13 | # documentation in a webbrowser. 14 | # 15 | # 16 | # Copyright (c) 2006-2008, R Oudkerk 17 | # Licensed to PSF under a Contributor Agreement. 18 | # 19 | 20 | 21 | import sys 22 | 23 | from . import context 24 | 25 | VERSION = (4, 2, 4) 26 | __version__ = '.'.join(map(str, VERSION[0:4])) + "".join(VERSION[4:]) 27 | __author__ = 'R Oudkerk / Python Software Foundation' 28 | __author_email__ = 'python-dev@python.org' 29 | __maintainer__ = 'Asif Saif Uddin' 30 | __contact__ = "auvipy@gmail.com" 31 | __homepage__ = "https://github.com/celery/billiard" 32 | __docformat__ = "restructuredtext" 33 | 34 | # -eof meta- 35 | 36 | # 37 | # Copy stuff from default context 38 | # 39 | 40 | globals().update((name, getattr(context._default_context, name)) 41 | for name in context._default_context.__all__) 42 | __all__ = context._default_context.__all__ 43 | 44 | # 45 | # XXX These should not really be documented or public. 46 | # 47 | 48 | SUBDEBUG = 5 49 | SUBWARNING = 25 50 | 51 | # 52 | # Alias for main module -- will be reset by bootstrapping child processes 53 | # 54 | 55 | if '__main__' in sys.modules: 56 | sys.modules['__mp_main__'] = sys.modules['__main__'] 57 | 58 | 59 | def ensure_multiprocessing(): 60 | from ._ext import ensure_multiprocessing 61 | return ensure_multiprocessing() 62 | -------------------------------------------------------------------------------- /Doc/glossary.rst: -------------------------------------------------------------------------------- 1 | .. _glossary: 2 | 3 | ******** 4 | Glossary 5 | ******** 6 | 7 | .. glossary:: 8 | 9 | bytecode 10 | Python source code is compiled into bytecode, the internal representation 11 | of a Python program in the interpreter. The bytecode is also cached in 12 | ``.pyc`` and ``.pyo`` files so that executing the same file is faster the 13 | second time (recompilation from source to bytecode can be avoided). This 14 | "intermediate language" is said to run on a :term:`virtual machine` 15 | that executes the machine code corresponding to each bytecode. 16 | 17 | CPython 18 | The canonical implementation of the Python programming language. The 19 | term "CPython" is used in contexts when necessary to distinguish this 20 | implementation from others such as Jython or IronPython. 21 | 22 | GIL 23 | See :term:`global interpreter lock`. 24 | 25 | global interpreter lock 26 | The lock used by Python threads to assure that only one thread 27 | executes in the :term:`CPython` :term:`virtual machine` at a time. 28 | This simplifies the CPython implementation by assuring that no two 29 | processes can access the same memory at the same time. Locking the 30 | entire interpreter makes it easier for the interpreter to be 31 | multi-threaded, at the expense of much of the parallelism afforded by 32 | multi-processor machines. Efforts have been made in the past to 33 | create a "free-threaded" interpreter (one which locks shared data at a 34 | much finer granularity), but so far none have been successful because 35 | performance suffered in the common single-processor case. 36 | 37 | virtual machine 38 | A computer defined entirely in software. Python's virtual machine 39 | executes the :term:`bytecode` emitted by the bytecode compiler. 40 | 41 | -------------------------------------------------------------------------------- /t/unit/test_spawn.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | from billiard import get_context, Process, Queue 4 | from billiard.util import set_pdeathsig, get_pdeathsig 5 | import pytest 6 | import psutil 7 | import signal 8 | from time import sleep 9 | 10 | class test_spawn: 11 | def test_start(self): 12 | ctx = get_context('spawn') 13 | 14 | p = ctx.Process(target=task_from_process, args=('opa',)) 15 | p.start() 16 | p.join() 17 | return p.exitcode 18 | 19 | @pytest.mark.skipif(not sys.platform.startswith('linux'), 20 | reason='set_pdeathsig() is supported only in Linux') 21 | def test_set_pdeathsig(self): 22 | success = "done" 23 | q = Queue() 24 | p = Process(target=parent_task, args=(q, success)) 25 | p.start() 26 | child_proc = psutil.Process(q.get(timeout=3)) 27 | try: 28 | p.terminate() 29 | assert q.get(timeout=3) == success 30 | finally: 31 | child_proc.terminate() 32 | 33 | @pytest.mark.skipif(not sys.platform.startswith('linux'), 34 | reason='get_pdeathsig() is supported only in Linux') 35 | def test_set_get_pdeathsig(self): 36 | sig = get_pdeathsig() 37 | assert sig == 0 38 | set_pdeathsig(signal.SIGTERM) 39 | sig = get_pdeathsig() 40 | assert sig == signal.SIGTERM 41 | 42 | def child_process(q, success): 43 | sig = signal.SIGUSR1 44 | class ParentDeathError(Exception): 45 | pass 46 | 47 | def handler(*args): 48 | raise ParentDeathError() 49 | 50 | signal.signal(sig, handler) 51 | set_pdeathsig(sig) 52 | q.put(os.getpid()) 53 | try: 54 | while True: 55 | sleep(1) 56 | except ParentDeathError: 57 | q.put(success) 58 | sys.exit(0) 59 | 60 | def parent_task(q, success): 61 | p = Process(target=child_process, args=(q, success)) 62 | p.start() 63 | p.join() 64 | 65 | def task_from_process(name): 66 | print('proc:', name) 67 | 68 | -------------------------------------------------------------------------------- /billiard/popen_forkserver.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | 4 | from . import reduction 5 | from . import context 6 | from . import forkserver 7 | from . import popen_fork 8 | from . import spawn 9 | 10 | __all__ = ['Popen'] 11 | 12 | # 13 | # Wrapper for an fd used while launching a process 14 | # 15 | 16 | 17 | class _DupFd: 18 | 19 | def __init__(self, ind): 20 | self.ind = ind 21 | 22 | def detach(self): 23 | return forkserver.get_inherited_fds()[self.ind] 24 | 25 | # 26 | # Start child process using a server process 27 | # 28 | 29 | 30 | class Popen(popen_fork.Popen): 31 | method = 'forkserver' 32 | DupFd = _DupFd 33 | 34 | def __init__(self, process_obj): 35 | self._fds = [] 36 | super().__init__(process_obj) 37 | 38 | def duplicate_for_child(self, fd): 39 | self._fds.append(fd) 40 | return len(self._fds) - 1 41 | 42 | def _launch(self, process_obj): 43 | prep_data = spawn.get_preparation_data(process_obj._name) 44 | buf = io.BytesIO() 45 | context.set_spawning_popen(self) 46 | try: 47 | reduction.dump(prep_data, buf) 48 | reduction.dump(process_obj, buf) 49 | finally: 50 | context.set_spawning_popen(None) 51 | 52 | self.sentinel, w = forkserver.connect_to_new_process(self._fds) 53 | with io.open(w, 'wb', closefd=True) as f: 54 | f.write(buf.getbuffer()) 55 | self.pid = forkserver.read_unsigned(self.sentinel) 56 | 57 | def poll(self, flag=os.WNOHANG): 58 | if self.returncode is None: 59 | from .connection import wait 60 | timeout = 0 if flag == os.WNOHANG else None 61 | if not wait([self.sentinel], timeout): 62 | return None 63 | try: 64 | self.returncode = forkserver.read_unsigned(self.sentinel) 65 | except (OSError, EOFError): 66 | # The process ended abnormally perhaps because of a signal 67 | self.returncode = 255 68 | return self.returncode 69 | -------------------------------------------------------------------------------- /billiard/popen_spawn_posix.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | 4 | from . import context 5 | from . import popen_fork 6 | from . import reduction 7 | from . import spawn 8 | 9 | from .compat import spawnv_passfds 10 | 11 | __all__ = ['Popen'] 12 | 13 | 14 | # 15 | # Wrapper for an fd used while launching a process 16 | # 17 | 18 | class _DupFd: 19 | 20 | def __init__(self, fd): 21 | self.fd = fd 22 | 23 | def detach(self): 24 | return self.fd 25 | 26 | # 27 | # Start child process using a fresh interpreter 28 | # 29 | 30 | 31 | class Popen(popen_fork.Popen): 32 | method = 'spawn' 33 | DupFd = _DupFd 34 | 35 | def __init__(self, process_obj): 36 | self._fds = [] 37 | super().__init__(process_obj) 38 | 39 | def duplicate_for_child(self, fd): 40 | self._fds.append(fd) 41 | return fd 42 | 43 | def _launch(self, process_obj): 44 | os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1" 45 | spawn._Django_old_layout_hack__save() 46 | from . import semaphore_tracker 47 | tracker_fd = semaphore_tracker.getfd() 48 | self._fds.append(tracker_fd) 49 | prep_data = spawn.get_preparation_data(process_obj._name) 50 | fp = io.BytesIO() 51 | context.set_spawning_popen(self) 52 | try: 53 | reduction.dump(prep_data, fp) 54 | reduction.dump(process_obj, fp) 55 | finally: 56 | context.set_spawning_popen(None) 57 | 58 | parent_r = child_w = child_r = parent_w = None 59 | try: 60 | parent_r, child_w = os.pipe() 61 | child_r, parent_w = os.pipe() 62 | cmd = spawn.get_command_line(tracker_fd=tracker_fd, 63 | pipe_handle=child_r) 64 | self._fds.extend([child_r, child_w]) 65 | self.pid = spawnv_passfds( 66 | spawn.get_executable(), cmd, self._fds, 67 | ) 68 | self.sentinel = parent_r 69 | with io.open(parent_w, 'wb', closefd=False) as f: 70 | f.write(fp.getvalue()) 71 | finally: 72 | for fd in (child_r, child_w, parent_w): 73 | if fd is not None: 74 | os.close(fd) 75 | -------------------------------------------------------------------------------- /t/unit/test_values.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from billiard import Value, RawValue, Lock, Process 4 | 5 | 6 | class test_values: 7 | 8 | codes_values = [ 9 | ('i', 4343, 24234), 10 | ('d', 3.625, -4.25), 11 | ('h', -232, 234), 12 | ('c', 'x'.encode('latin'), 'y'.encode('latin')) 13 | ] 14 | 15 | def test_issue_229(self): 16 | """Test fix for issue #229""" 17 | 18 | a = Value('i', 0) 19 | b = Value('i', 0) 20 | 21 | a.value = 5 22 | assert a.value == 5 23 | assert b.value == 0 24 | 25 | @classmethod 26 | def _test(cls, values): 27 | for sv, cv in zip(values, cls.codes_values): 28 | sv.value = cv[2] 29 | 30 | def test_value(self, raw=False): 31 | if raw: 32 | values = [RawValue(code, value) 33 | for code, value, _ in self.codes_values] 34 | else: 35 | values = [Value(code, value) 36 | for code, value, _ in self.codes_values] 37 | 38 | for sv, cv in zip(values, self.codes_values): 39 | assert sv.value == cv[1] 40 | 41 | proc = Process(target=self._test, args=(values,)) 42 | proc.daemon = True 43 | proc.start() 44 | proc.join() 45 | 46 | for sv, cv in zip(values, self.codes_values): 47 | assert sv.value == cv[2] 48 | 49 | def test_rawvalue(self): 50 | self.test_value(raw=True) 51 | 52 | def test_getobj_getlock(self): 53 | val1 = Value('i', 5) 54 | lock1 = val1.get_lock() 55 | obj1 = val1.get_obj() 56 | 57 | val2 = Value('i', 5, lock=None) 58 | lock2 = val2.get_lock() 59 | obj2 = val2.get_obj() 60 | 61 | lock = Lock() 62 | val3 = Value('i', 5, lock=lock) 63 | lock3 = val3.get_lock() 64 | obj3 = val3.get_obj() 65 | assert lock == lock3 66 | 67 | arr4 = Value('i', 5, lock=False) 68 | assert not hasattr(arr4, 'get_lock') 69 | assert not hasattr(arr4, 'get_obj') 70 | 71 | with pytest.raises(AttributeError): 72 | Value('i', 5, lock='navalue') 73 | 74 | arr5 = RawValue('i', 5) 75 | assert not hasattr(arr5, 'get_lock') 76 | assert not hasattr(arr5, 'get_obj') 77 | -------------------------------------------------------------------------------- /t/unit/test_win32.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import signal 3 | 4 | from billiard.util import set_pdeathsig, get_pdeathsig 5 | 6 | from billiard.compat import _winapi 7 | from t import skip 8 | 9 | 10 | @skip.unless_win32() 11 | class test_win32_module: 12 | 13 | @pytest.mark.parametrize('name', [ 14 | 'NULL', 15 | 'ERROR_ALREADY_EXISTS', 16 | 'ERROR_PIPE_BUSY', 17 | 'ERROR_PIPE_CONNECTED', 18 | 'ERROR_SEM_TIMEOUT', 19 | 'ERROR_MORE_DATA', 20 | 'ERROR_BROKEN_PIPE', 21 | 'ERROR_IO_PENDING', 22 | 'ERROR_NETNAME_DELETED', 23 | 'GENERIC_READ', 24 | 'GENERIC_WRITE', 25 | 'DUPLICATE_SAME_ACCESS', 26 | 'DUPLICATE_CLOSE_SOURCE', 27 | 'INFINITE', 28 | 'NMPWAIT_WAIT_FOREVER', 29 | 'OPEN_EXISTING', 30 | 'PIPE_ACCESS_DUPLEX', 31 | 'PIPE_ACCESS_INBOUND', 32 | 'PIPE_READMODE_MESSAGE', 33 | 'PIPE_TYPE_MESSAGE', 34 | 'PIPE_UNLIMITED_INSTANCES', 35 | 'PIPE_WAIT', 36 | 'PROCESS_ALL_ACCESS', 37 | 'PROCESS_DUP_HANDLE', 38 | 'WAIT_OBJECT_0', 39 | 'WAIT_ABANDONED_0', 40 | 'WAIT_TIMEOUT', 41 | 'FILE_FLAG_FIRST_PIPE_INSTANCE', 42 | 'FILE_FLAG_OVERLAPPED', 43 | ]) 44 | def test_constants(self, name): 45 | assert getattr(_winapi, name) is not None 46 | 47 | @pytest.mark.parametrize('name', [ 48 | 'Overlapped', 49 | 'CloseHandle', 50 | 'GetLastError', 51 | 'OpenProcess', 52 | 'ExitProcess', 53 | 'ConnectNamedPipe', 54 | 'CreateFile', 55 | 'WriteFile', 56 | 'ReadFile', 57 | 'CreateNamedPipe', 58 | 'SetNamedPipeHandleState', 59 | 'WaitNamedPipe', 60 | 'PeekNamedPipe', 61 | 'WaitForMultipleObjects', 62 | 'WaitForSingleObject', 63 | 'GetCurrentProcess', 64 | 'GetExitCodeProcess', 65 | 'TerminateProcess', 66 | 'DuplicateHandle', 67 | 'CreatePipe', 68 | ]) 69 | def test_functions(self, name): 70 | assert getattr(_winapi, name) 71 | 72 | def test_set_pdeathsig(self): 73 | with pytest.raises(OSError): 74 | set_pdeathsig(signal.SIGTERM) 75 | 76 | def test_get_pdeathsig(self): 77 | with pytest.raises(OSError): 78 | get_pdeathsig() 79 | -------------------------------------------------------------------------------- /Doc/includes/mp_webserver.py: -------------------------------------------------------------------------------- 1 | # 2 | # Example where a pool of http servers share a single listening socket 3 | # 4 | # On Windows this module depends on the ability to pickle a socket 5 | # object so that the worker processes can inherit a copy of the server 6 | # object. (We import `multiprocessing.reduction` to enable this pickling.) 7 | # 8 | # Not sure if we should synchronize access to `socket.accept()` method by 9 | # using a process-shared lock -- does not seem to be necessary. 10 | # 11 | # Copyright (c) 2006-2008, R Oudkerk 12 | # All rights reserved. 13 | # 14 | 15 | import os 16 | import sys 17 | 18 | from multiprocessing import Process, current_process, freeze_support 19 | from BaseHTTPServer import HTTPServer 20 | from SimpleHTTPServer import SimpleHTTPRequestHandler 21 | 22 | if sys.platform == 'win32': 23 | import multiprocessing.reduction # make sockets pickable/inheritable 24 | 25 | 26 | def note(format, *args): 27 | sys.stderr.write('[%s]\t%s\n' % (current_process().name, format%args)) 28 | 29 | 30 | class RequestHandler(SimpleHTTPRequestHandler): 31 | # we override log_message() to show which process is handling the request 32 | def log_message(self, format, *args): 33 | note(format, *args) 34 | 35 | def serve_forever(server): 36 | note('starting server') 37 | try: 38 | server.serve_forever() 39 | except KeyboardInterrupt: 40 | pass 41 | 42 | 43 | def runpool(address, number_of_processes): 44 | # create a single server object -- children will each inherit a copy 45 | server = HTTPServer(address, RequestHandler) 46 | 47 | # create child processes to act as workers 48 | for i in range(number_of_processes-1): 49 | Process(target=serve_forever, args=(server,)).start() 50 | 51 | # main process also acts as a worker 52 | serve_forever(server) 53 | 54 | 55 | def test(): 56 | DIR = os.path.join(os.path.dirname(__file__), '..') 57 | ADDRESS = ('localhost', 8000) 58 | NUMBER_OF_PROCESSES = 4 59 | 60 | print 'Serving at http://%s:%d using %d worker processes' % \ 61 | (ADDRESS[0], ADDRESS[1], NUMBER_OF_PROCESSES) 62 | print 'To exit press Ctrl-' + ['C', 'Break'][sys.platform=='win32'] 63 | 64 | os.chdir(DIR) 65 | runpool(ADDRESS, NUMBER_OF_PROCESSES) 66 | 67 | 68 | if __name__ == '__main__': 69 | freeze_support() 70 | test() 71 | -------------------------------------------------------------------------------- /Doc/includes/mp_workers.py: -------------------------------------------------------------------------------- 1 | # 2 | # Simple example which uses a pool of workers to carry out some tasks. 3 | # 4 | # Notice that the results will probably not come out of the output 5 | # queue in the same in the same order as the corresponding tasks were 6 | # put on the input queue. If it is important to get the results back 7 | # in the original order then consider using `Pool.map()` or 8 | # `Pool.imap()` (which will save on the amount of code needed anyway). 9 | # 10 | # Copyright (c) 2006-2008, R Oudkerk 11 | # All rights reserved. 12 | # 13 | 14 | import time 15 | import random 16 | 17 | from multiprocessing import Process, Queue, current_process, freeze_support 18 | 19 | # 20 | # Function run by worker processes 21 | # 22 | 23 | def worker(input, output): 24 | for func, args in iter(input.get, 'STOP'): 25 | result = calculate(func, args) 26 | output.put(result) 27 | 28 | # 29 | # Function used to calculate result 30 | # 31 | 32 | def calculate(func, args): 33 | result = func(*args) 34 | return '%s says that %s%s = %s' % \ 35 | (current_process().name, func.__name__, args, result) 36 | 37 | # 38 | # Functions referenced by tasks 39 | # 40 | 41 | def mul(a, b): 42 | time.sleep(0.5*random.random()) 43 | return a * b 44 | 45 | def plus(a, b): 46 | time.sleep(0.5*random.random()) 47 | return a + b 48 | 49 | # 50 | # 51 | # 52 | 53 | def test(): 54 | NUMBER_OF_PROCESSES = 4 55 | TASKS1 = [(mul, (i, 7)) for i in range(20)] 56 | TASKS2 = [(plus, (i, 8)) for i in range(10)] 57 | 58 | # Create queues 59 | task_queue = Queue() 60 | done_queue = Queue() 61 | 62 | # Submit tasks 63 | for task in TASKS1: 64 | task_queue.put(task) 65 | 66 | # Start worker processes 67 | for i in range(NUMBER_OF_PROCESSES): 68 | Process(target=worker, args=(task_queue, done_queue)).start() 69 | 70 | # Get and print results 71 | print 'Unordered results:' 72 | for i in range(len(TASKS1)): 73 | print '\t', done_queue.get() 74 | 75 | # Add more tasks using `put()` 76 | for task in TASKS2: 77 | task_queue.put(task) 78 | 79 | # Get and print some more results 80 | for i in range(len(TASKS2)): 81 | print '\t', done_queue.get() 82 | 83 | # Tell child processes to stop 84 | for i in range(NUMBER_OF_PROCESSES): 85 | task_queue.put('STOP') 86 | 87 | 88 | if __name__ == '__main__': 89 | freeze_support() 90 | test() 91 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '40 0 * * 5' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: blacksmith-4vcpu-ubuntu-2204 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] 37 | # Learn more about CodeQL language support at https://git.io/codeql-language-support 38 | 39 | steps: 40 | - name: Checkout repository 41 | uses: actions/checkout@v6 42 | 43 | # Initializes the CodeQL tools for scanning. 44 | - name: Initialize CodeQL 45 | uses: github/codeql-action/init@v4 46 | with: 47 | languages: ${{ matrix.language }} 48 | # If you wish to specify custom queries, you can do so here or in a config file. 49 | # By default, queries listed here will override any specified in a config file. 50 | # Prefix the list here with "+" to use these queries and those in the config file. 51 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 52 | 53 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 54 | # If this step fails, then you should remove it and run the build manually (see below) 55 | - name: Autobuild 56 | uses: github/codeql-action/autobuild@v4 57 | 58 | # ℹ️ Command-line programs to run using the OS shell. 59 | # 📚 https://git.io/JvXDl 60 | 61 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 62 | # and modify them (or add more) to build your code if your project 63 | # uses a compiled language 64 | 65 | #- run: | 66 | # make bootstrap 67 | # make release 68 | 69 | - name: Perform CodeQL Analysis 70 | uses: github/codeql-action/analyze@v4 71 | -------------------------------------------------------------------------------- /Doc/includes/mp_newtype.py: -------------------------------------------------------------------------------- 1 | # 2 | # This module shows how to use arbitrary callables with a subclass of 3 | # `BaseManager`. 4 | # 5 | # Copyright (c) 2006-2008, R Oudkerk 6 | # All rights reserved. 7 | # 8 | 9 | from multiprocessing import freeze_support 10 | from multiprocessing.managers import BaseManager, BaseProxy 11 | import operator 12 | 13 | ## 14 | 15 | class Foo(object): 16 | def f(self): 17 | print 'you called Foo.f()' 18 | def g(self): 19 | print 'you called Foo.g()' 20 | def _h(self): 21 | print 'you called Foo._h()' 22 | 23 | # A simple generator function 24 | def baz(): 25 | for i in xrange(10): 26 | yield i*i 27 | 28 | # Proxy type for generator objects 29 | class GeneratorProxy(BaseProxy): 30 | _exposed_ = ('next', '__next__') 31 | def __iter__(self): 32 | return self 33 | def next(self): 34 | return self._callmethod('next') 35 | def __next__(self): 36 | return self._callmethod('__next__') 37 | 38 | # Function to return the operator module 39 | def get_operator_module(): 40 | return operator 41 | 42 | ## 43 | 44 | class MyManager(BaseManager): 45 | pass 46 | 47 | # register the Foo class; make `f()` and `g()` accessible via proxy 48 | MyManager.register('Foo1', Foo) 49 | 50 | # register the Foo class; make `g()` and `_h()` accessible via proxy 51 | MyManager.register('Foo2', Foo, exposed=('g', '_h')) 52 | 53 | # register the generator function baz; use `GeneratorProxy` to make proxies 54 | MyManager.register('baz', baz, proxytype=GeneratorProxy) 55 | 56 | # register get_operator_module(); make public functions accessible via proxy 57 | MyManager.register('operator', get_operator_module) 58 | 59 | ## 60 | 61 | def test(): 62 | manager = MyManager() 63 | manager.start() 64 | 65 | print '-' * 20 66 | 67 | f1 = manager.Foo1() 68 | f1.f() 69 | f1.g() 70 | assert not hasattr(f1, '_h') 71 | assert sorted(f1._exposed_) == sorted(['f', 'g']) 72 | 73 | print '-' * 20 74 | 75 | f2 = manager.Foo2() 76 | f2.g() 77 | f2._h() 78 | assert not hasattr(f2, 'f') 79 | assert sorted(f2._exposed_) == sorted(['g', '_h']) 80 | 81 | print '-' * 20 82 | 83 | it = manager.baz() 84 | for i in it: 85 | print '<%d>' % i, 86 | print 87 | 88 | print '-' * 20 89 | 90 | op = manager.operator() 91 | print 'op.add(23, 45) =', op.add(23, 45) 92 | print 'op.pow(2, 94) =', op.pow(2, 94) 93 | print 'op.getslice(range(10), 2, 6) =', op.getslice(range(10), 2, 6) 94 | print 'op.repeat(range(5), 3) =', op.repeat(range(5), 3) 95 | print 'op._exposed_ =', op._exposed_ 96 | 97 | ## 98 | 99 | if __name__ == '__main__': 100 | freeze_support() 101 | test() 102 | -------------------------------------------------------------------------------- /billiard/popen_fork.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import errno 4 | 5 | from .common import TERM_SIGNAL 6 | 7 | __all__ = ['Popen'] 8 | 9 | # 10 | # Start child process using fork 11 | # 12 | 13 | 14 | class Popen: 15 | method = 'fork' 16 | sentinel = None 17 | 18 | def __init__(self, process_obj): 19 | sys.stdout.flush() 20 | sys.stderr.flush() 21 | self.returncode = None 22 | self._launch(process_obj) 23 | 24 | def duplicate_for_child(self, fd): 25 | return fd 26 | 27 | def poll(self, flag=os.WNOHANG): 28 | if self.returncode is None: 29 | while True: 30 | try: 31 | pid, sts = os.waitpid(self.pid, flag) 32 | except OSError as e: 33 | if e.errno == errno.EINTR: 34 | continue 35 | # Child process not yet created. See #1731717 36 | # e.errno == errno.ECHILD == 10 37 | return None 38 | else: 39 | break 40 | if pid == self.pid: 41 | if os.WIFSIGNALED(sts): 42 | self.returncode = -os.WTERMSIG(sts) 43 | else: 44 | assert os.WIFEXITED(sts) 45 | self.returncode = os.WEXITSTATUS(sts) 46 | return self.returncode 47 | 48 | def wait(self, timeout=None): 49 | if self.returncode is None: 50 | if timeout is not None: 51 | from .connection import wait 52 | if not wait([self.sentinel], timeout): 53 | return None 54 | # This shouldn't block if wait() returned successfully. 55 | return self.poll(os.WNOHANG if timeout == 0.0 else 0) 56 | return self.returncode 57 | 58 | def terminate(self): 59 | if self.returncode is None: 60 | try: 61 | os.kill(self.pid, TERM_SIGNAL) 62 | except OSError as exc: 63 | if getattr(exc, 'errno', None) != errno.ESRCH: 64 | if self.wait(timeout=0.1) is None: 65 | raise 66 | 67 | def _launch(self, process_obj): 68 | code = 1 69 | parent_r, child_w = os.pipe() 70 | self.pid = os.fork() 71 | if self.pid == 0: 72 | try: 73 | os.close(parent_r) 74 | if 'random' in sys.modules: 75 | import random 76 | random.seed() 77 | code = process_obj._bootstrap() 78 | finally: 79 | os._exit(code) 80 | else: 81 | os.close(child_w) 82 | self.sentinel = parent_r 83 | 84 | def close(self): 85 | if self.sentinel is not None: 86 | try: 87 | os.close(self.sentinel) 88 | finally: 89 | self.sentinel = None 90 | -------------------------------------------------------------------------------- /INSTALL.txt: -------------------------------------------------------------------------------- 1 | .. default-role:: literal 2 | 3 | ================================ 4 | Installation of multiprocessing 5 | ================================ 6 | 7 | Versions earlier than Python 2.4 are not supported. If you are using 8 | Python 2.4 then you must install the `ctypes` package (which comes 9 | automatically with Python 2.5). Users of Python 2.4 on Windows 10 | also need to install the `pywin32` package. 11 | 12 | On Unix It's highly recommended to use Python 2.5.3 (not yet released) or 13 | apply the ``fork-thread-patch-2`` patch from `Issue 1683 14 | https://bugs.python.org/issue1683`_. 15 | 16 | Windows binary builds for Python 2.4 and Python 2.5 are available at 17 | 18 | https://pypi.org/project/multiprocessing/ 19 | 20 | Python 2.6 and newer versions already come with multiprocessing. Although 21 | the stand alone variant of the multiprocessing package is kept compatible 22 | with 2.6, you mustn't install it with Python 2.6. 23 | 24 | Otherwise, if you have the correct C compiler setup then the source 25 | distribution can be installed the usual way:: 26 | 27 | python setup.py install 28 | 29 | It should not be necessary to do any editing of `setup.py` if you are 30 | using Windows, macOS or Linux. On other unices it may be necessary 31 | to modify the values of the `macros` dictionary or `libraries` list. 32 | The section to modify reads :: 33 | 34 | else: 35 | macros = dict( 36 | HAVE_SEM_OPEN=1, 37 | HAVE_SEM_TIMEDWAIT=1, 38 | HAVE_FD_TRANSFER=1 39 | ) 40 | libraries = ['rt'] 41 | 42 | More details can be found in the comments in `setup.py`. 43 | 44 | Note that if you use `HAVE_SEM_OPEN=0` then support for posix 45 | semaphores will not been compiled in, and then many of the functions 46 | in the `processing` namespace like `Lock()`, `Queue()` or will not be 47 | available. However, one can still create a manager using `manager = 48 | processing.Manager()` and then do `lock = manager.Lock()` etc. 49 | 50 | 51 | Running tests 52 | ------------- 53 | 54 | To run the test scripts using Python 2.5 do :: 55 | 56 | python -m multiprocessing.tests 57 | 58 | and on Python 2.4 do :: 59 | 60 | python -c "from multiprocessing.tests import main; main()" 61 | 62 | The sources also come with a Makefile. To run the unit tests with the 63 | Makefile using Python 2.5 do :: 64 | 65 | make test 66 | 67 | using another version of Python do :: 68 | 69 | make test PYTHON=python2.4 70 | 71 | This will run a number of test scripts using both processes and threads. 72 | 73 | 74 | Running examples 75 | ---------------- 76 | 77 | The make target `examples` runs several example scripts. 78 | 79 | 80 | Building docs 81 | ------------- 82 | 83 | To build the standalone documentation you need Sphinx 0.5 and setuptools 84 | 0.6c9 or newer. Both are available at https://pypi.org/. With 85 | setuptools installed, do :: 86 | 87 | sudo easy_install-2.5 "Sphinx>=0.5" 88 | make doc 89 | 90 | The docs end up in ``build/sphinx/builder_name``. 91 | -------------------------------------------------------------------------------- /billiard/dummy/connection.py: -------------------------------------------------------------------------------- 1 | # 2 | # Analogue of `multiprocessing.connection` which uses queues instead of sockets 3 | # 4 | # multiprocessing/dummy/connection.py 5 | # 6 | # Copyright (c) 2006-2008, R Oudkerk 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions 11 | # are met: 12 | # 13 | # 1. Redistributions of source code must retain the above copyright 14 | # notice, this list of conditions and the following disclaimer. 15 | # 2. Redistributions in binary form must reproduce the above copyright 16 | # notice, this list of conditions and the following disclaimer in the 17 | # documentation and/or other materials provided with the distribution. 18 | # 3. Neither the name of author nor the names of any contributors may be 19 | # used to endorse or promote products derived from this software 20 | # without specific prior written permission. 21 | # 22 | # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND 23 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 | # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 | # SUCH DAMAGE. 33 | # 34 | 35 | from queue import Queue 36 | 37 | __all__ = ['Client', 'Listener', 'Pipe'] 38 | 39 | families = [None] 40 | 41 | 42 | class Listener(object): 43 | 44 | def __init__(self, address=None, family=None, backlog=1): 45 | self._backlog_queue = Queue(backlog) 46 | 47 | def accept(self): 48 | return Connection(*self._backlog_queue.get()) 49 | 50 | def close(self): 51 | self._backlog_queue = None 52 | 53 | address = property(lambda self: self._backlog_queue) 54 | 55 | def __enter__(self): 56 | return self 57 | 58 | def __exit__(self, *exc_info): 59 | self.close() 60 | 61 | 62 | def Client(address): 63 | _in, _out = Queue(), Queue() 64 | address.put((_out, _in)) 65 | return Connection(_in, _out) 66 | 67 | 68 | def Pipe(duplex=True): 69 | a, b = Queue(), Queue() 70 | return Connection(a, b), Connection(b, a) 71 | 72 | 73 | class Connection(object): 74 | 75 | def __init__(self, _in, _out): 76 | self._out = _out 77 | self._in = _in 78 | self.send = self.send_bytes = _out.put 79 | self.recv = self.recv_bytes = _in.get 80 | 81 | def poll(self, timeout=0.0): 82 | if self._in.qsize() > 0: 83 | return True 84 | if timeout <= 0.0: 85 | return False 86 | self._in.not_empty.acquire() 87 | self._in.not_empty.wait(timeout) 88 | self._in.not_empty.release() 89 | return self._in.qsize() > 0 90 | 91 | def close(self): 92 | pass 93 | -------------------------------------------------------------------------------- /t/unit/test_common.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | import signal 4 | 5 | from contextlib import contextmanager 6 | from time import time 7 | from unittest.mock import patch, Mock, call 8 | 9 | from t import skip 10 | 11 | from billiard.common import ( 12 | _shutdown_cleanup, 13 | reset_signals, 14 | restart_state, 15 | ) 16 | 17 | 18 | def signo(name): 19 | return getattr(signal, name) 20 | 21 | 22 | @contextmanager 23 | def termsigs(default, full): 24 | from billiard import common 25 | prev_def, common.TERMSIGS_DEFAULT = common.TERMSIGS_DEFAULT, default 26 | prev_full, common.TERMSIGS_FULL = common.TERMSIGS_FULL, full 27 | try: 28 | yield 29 | finally: 30 | common.TERMSIGS_DEFAULT, common.TERMSIGS_FULL = prev_def, prev_full 31 | 32 | 33 | @skip.if_win32() 34 | class test_reset_signals: 35 | 36 | def test_shutdown_handler(self): 37 | with patch('sys.exit') as exit: 38 | _shutdown_cleanup(15, Mock()) 39 | exit.assert_called() 40 | assert os.WTERMSIG(exit.call_args[0][0]) == 15 41 | 42 | def test_does_not_reset_ignored_signal(self, sigs=['SIGTERM']): 43 | with self.assert_context(sigs, [], signal.SIG_IGN) as (_, SET): 44 | SET.assert_not_called() 45 | 46 | def test_does_not_reset_if_current_is_None(self, sigs=['SIGTERM']): 47 | with self.assert_context(sigs, [], None) as (_, SET): 48 | SET.assert_not_called() 49 | 50 | def test_resets_for_SIG_DFL(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']): 51 | with self.assert_context(sigs, [], signal.SIG_DFL) as (_, SET): 52 | SET.assert_has_calls([ 53 | call(signo(sig), _shutdown_cleanup) for sig in sigs 54 | ]) 55 | 56 | def test_resets_for_obj(self, sigs=['SIGTERM', 'SIGINT', 'SIGUSR1']): 57 | with self.assert_context(sigs, [], object()) as (_, SET): 58 | SET.assert_has_calls([ 59 | call(signo(sig), _shutdown_cleanup) for sig in sigs 60 | ]) 61 | 62 | def test_handles_errors(self, sigs=['SIGTERM']): 63 | for exc in (OSError(), AttributeError(), 64 | ValueError(), RuntimeError()): 65 | with self.assert_context(sigs, [], signal.SIG_DFL, exc) as (_, S): 66 | S.assert_called() 67 | 68 | @contextmanager 69 | def assert_context(self, default, full, get_returns=None, set_effect=None): 70 | with termsigs(default, full): 71 | with patch('signal.getsignal') as GET: 72 | with patch('signal.signal') as SET: 73 | GET.return_value = get_returns 74 | SET.side_effect = set_effect 75 | reset_signals() 76 | GET.assert_has_calls([ 77 | call(signo(sig)) for sig in default 78 | ]) 79 | yield GET, SET 80 | 81 | 82 | class test_restart_state: 83 | 84 | def test_raises(self): 85 | s = restart_state(100, 1) # max 100 restarts in 1 second. 86 | s.R = 99 87 | s.step() 88 | with pytest.raises(s.RestartFreqExceeded): 89 | s.step() 90 | 91 | def test_time_passed_resets_counter(self): 92 | s = restart_state(100, 10) 93 | s.R, s.T = 100, time() 94 | with pytest.raises(s.RestartFreqExceeded): 95 | s.step() 96 | s.R, s.T = 100, time() 97 | s.step(time() + 20) 98 | assert s.R == 1 99 | -------------------------------------------------------------------------------- /billiard/_win.py: -------------------------------------------------------------------------------- 1 | """ 2 | billiard._win 3 | ~~~~~~~~~~~~~ 4 | 5 | Windows utilities to terminate process groups. 6 | 7 | """ 8 | 9 | import os 10 | 11 | # psutil is painfully slow in win32. So to avoid adding big 12 | # dependencies like pywin32 a ctypes based solution is preferred 13 | 14 | # Code based on the winappdbg project http://winappdbg.sourceforge.net/ 15 | # (BSD License) 16 | from ctypes import ( 17 | byref, sizeof, windll, 18 | Structure, WinError, POINTER, 19 | c_size_t, c_char, c_void_p, 20 | ) 21 | from ctypes.wintypes import DWORD, LONG 22 | 23 | ERROR_NO_MORE_FILES = 18 24 | INVALID_HANDLE_VALUE = c_void_p(-1).value 25 | 26 | 27 | class PROCESSENTRY32(Structure): 28 | _fields_ = [ 29 | ('dwSize', DWORD), 30 | ('cntUsage', DWORD), 31 | ('th32ProcessID', DWORD), 32 | ('th32DefaultHeapID', c_size_t), 33 | ('th32ModuleID', DWORD), 34 | ('cntThreads', DWORD), 35 | ('th32ParentProcessID', DWORD), 36 | ('pcPriClassBase', LONG), 37 | ('dwFlags', DWORD), 38 | ('szExeFile', c_char * 260), 39 | ] 40 | LPPROCESSENTRY32 = POINTER(PROCESSENTRY32) 41 | 42 | 43 | def CreateToolhelp32Snapshot(dwFlags=2, th32ProcessID=0): 44 | hSnapshot = windll.kernel32.CreateToolhelp32Snapshot(dwFlags, 45 | th32ProcessID) 46 | if hSnapshot == INVALID_HANDLE_VALUE: 47 | raise WinError() 48 | return hSnapshot 49 | 50 | 51 | def Process32First(hSnapshot, pe=None): 52 | return _Process32n(windll.kernel32.Process32First, hSnapshot, pe) 53 | 54 | 55 | def Process32Next(hSnapshot, pe=None): 56 | return _Process32n(windll.kernel32.Process32Next, hSnapshot, pe) 57 | 58 | 59 | def _Process32n(fun, hSnapshot, pe=None): 60 | if pe is None: 61 | pe = PROCESSENTRY32() 62 | pe.dwSize = sizeof(PROCESSENTRY32) 63 | success = fun(hSnapshot, byref(pe)) 64 | if not success: 65 | if windll.kernel32.GetLastError() == ERROR_NO_MORE_FILES: 66 | return 67 | raise WinError() 68 | return pe 69 | 70 | 71 | def get_all_processes_pids(): 72 | """Return a dictionary with all processes pids as keys and their 73 | parents as value. Ignore processes with no parents. 74 | """ 75 | h = CreateToolhelp32Snapshot() 76 | parents = {} 77 | pe = Process32First(h) 78 | while pe: 79 | if pe.th32ParentProcessID: 80 | parents[pe.th32ProcessID] = pe.th32ParentProcessID 81 | pe = Process32Next(h, pe) 82 | 83 | return parents 84 | 85 | 86 | def get_processtree_pids(pid, include_parent=True): 87 | """Return a list with all the pids of a process tree""" 88 | parents = get_all_processes_pids() 89 | all_pids = list(parents.keys()) 90 | pids = {pid} 91 | while 1: 92 | pids_new = pids.copy() 93 | 94 | for _pid in all_pids: 95 | if parents[_pid] in pids: 96 | pids_new.add(_pid) 97 | 98 | if pids_new == pids: 99 | break 100 | 101 | pids = pids_new.copy() 102 | 103 | if not include_parent: 104 | pids.remove(pid) 105 | 106 | return list(pids) 107 | 108 | 109 | def kill_processtree(pid, signum): 110 | """Kill a process and all its descendants""" 111 | family_pids = get_processtree_pids(pid) 112 | 113 | for _pid in family_pids: 114 | os.kill(_pid, signum) 115 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | billiard 3 | ======== 4 | 5 | |build-status-lin| |build-status-win| |license| |wheel| |pyversion| |pyimp| 6 | 7 | :Version: 4.2.4 8 | :Web: https://billiard.readthedocs.io 9 | :Download: https://pypi.org/project/billiard/ 10 | :Source: https://github.com/celery/billiard/ 11 | :DeepWiki: |deepwiki| 12 | 13 | .. |build-status-lin| image:: https://github.com/celery/billiard/actions/workflows/ci.yaml/badge.svg 14 | :alt: Build status on Linux 15 | :target: https://github.com/celery/billiard/actions/workflows/ci.yaml 16 | 17 | .. |build-status-win| image:: https://ci.appveyor.com/api/projects/status/github/celery/billiard?png=true&branch=main 18 | :alt: Build status on Windows 19 | :target: https://ci.appveyor.com/project/ask/billiard 20 | 21 | .. |license| image:: https://img.shields.io/pypi/l/billiard.svg 22 | :alt: BSD License 23 | :target: https://opensource.org/licenses/BSD-3-Clause 24 | 25 | .. |wheel| image:: https://img.shields.io/pypi/wheel/billiard.svg 26 | :alt: Billiard can be installed via wheel 27 | :target: https://pypi.org/project/billiard/ 28 | 29 | .. |pyversion| image:: https://img.shields.io/pypi/pyversions/billiard.svg 30 | :alt: Supported Python versions. 31 | :target: https://pypi.org/project/billiard/ 32 | 33 | .. |pyimp| image:: https://img.shields.io/pypi/implementation/billiard.svg 34 | :alt: Support Python implementations. 35 | :target: https://pypi.org/project/billiard/ 36 | 37 | .. |deepwiki| image:: https://devin.ai/assets/deepwiki-badge.png 38 | :alt: Ask http://DeepWiki.com 39 | :target: https://deepwiki.com/celery/billiard 40 | :width: 125px 41 | 42 | About 43 | ----- 44 | 45 | ``billiard`` is a fork of the Python 2.7 `multiprocessing `_ 46 | package. The multiprocessing package itself is a renamed and updated version of 47 | R Oudkerk's `pyprocessing `_ package. 48 | This standalone variant draws its fixes/improvements from python-trunk and provides 49 | additional bug fixes and improvements. 50 | 51 | - This package would not be possible if not for the contributions of not only 52 | the current maintainers but all of the contributors to the original pyprocessing 53 | package listed `here `_. 54 | 55 | - Also, it is a fork of the multiprocessing backport package by Christian Heims. 56 | 57 | - It includes the no-execv patch contributed by R. Oudkerk. 58 | 59 | - And the Pool improvements previously located in `Celery`_. 60 | 61 | - Billiard is used in and is a dependency for `Celery`_ and is maintained by the 62 | Celery team. 63 | 64 | .. _`Celery`: http://celeryproject.org 65 | 66 | Documentation 67 | ------------- 68 | 69 | The documentation for ``billiard`` is available on `Read the Docs `_. 70 | 71 | Bug reporting 72 | ------------- 73 | 74 | Please report bugs related to multiprocessing at the 75 | `Python bug tracker `_. Issues related to billiard 76 | should be reported at https://github.com/celery/billiard/issues. 77 | 78 | billiard is part of the Tidelift Subscription 79 | --------------------------------------------- 80 | 81 | The maintainers of ``billiard`` and thousands of other packages are working 82 | with Tidelift to deliver commercial support and maintenance for the open source 83 | dependencies you use to build your applications. Save time, reduce risk, and 84 | improve code health, while paying the maintainers of the exact dependencies you 85 | use. `Learn more`_. 86 | 87 | .. _`Learn more`: https://tidelift.com/subscription/pkg/pypi-billiard?utm_source=pypi-billiard&utm_medium=referral&utm_campaign=readme&utm_term=repo 88 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PROJ=billiard 2 | PYTHON=python 3 | GIT=git 4 | TOX=tox 5 | NOSETESTS=nosetests 6 | ICONV=iconv 7 | FLAKE8=flake8 8 | FLAKEPLUS=flakeplus 9 | SPHINX2RST=sphinx2rst 10 | 11 | SPHINX_DIR=docs/ 12 | SPHINX_BUILDDIR="${SPHINX_DIR}/_build" 13 | README=README.rst 14 | README_SRC="docs/templates/readme.txt" 15 | CONTRIBUTING=CONTRIBUTING.rst 16 | CONTRIBUTING_SRC="docs/contributing.rst" 17 | SPHINX_HTMLDIR="${SPHINX_BUILDDIR}/html" 18 | DOCUMENTATION=Documentation 19 | FLAKEPLUSTARGET=2.7 20 | 21 | all: help 22 | 23 | help: 24 | @echo "docs - Build documentation." 25 | @echo "test-all - Run tests for all supported python versions." 26 | @echo "distcheck ---------- - Check distribution for problems." 27 | @echo " test - Run unittests using current python." 28 | @echo " lint ------------ - Check codebase for problems." 29 | @echo " apicheck - Check API reference coverage." 30 | @echo " configcheck - Check configuration reference coverage." 31 | @echo " readmecheck - Check README.rst encoding." 32 | @echo " contribcheck - Check CONTRIBUTING.rst encoding" 33 | @echo " flakes -------- - Check code for syntax and style errors." 34 | @echo " flakecheck - Run flake8 on the source code." 35 | @echo " flakepluscheck - Run flakeplus on the source code." 36 | @echo "readme - Regenerate README.rst file." 37 | @echo "contrib - Regenerate CONTRIBUTING.rst file" 38 | @echo "clean-dist --------- - Clean all distribution build artifacts." 39 | @echo " clean-git-force - Remove all uncommitted files." 40 | @echo " clean ------------ - Non-destructive clean" 41 | @echo " clean-pyc - Remove .pyc/__pycache__ files" 42 | @echo " clean-docs - Remove documentation build artifacts." 43 | @echo " clean-build - Remove setup artifacts." 44 | 45 | clean: clean-docs clean-pyc clean-build 46 | 47 | clean-dist: clean clean-git-force 48 | 49 | Documentation: 50 | (cd "$(SPHINX_DIR)"; $(MAKE) html) 51 | mv "$(SPHINX_HTMLDIR)" $(DOCUMENTATION) 52 | 53 | docs: Documentation 54 | 55 | clean-docs: 56 | -rm -rf "$(SPHINX_BUILDDIR)" 57 | 58 | lint: flakecheck apicheck configcheck readmecheck 59 | 60 | apicheck: 61 | (cd "$(SPHINX_DIR)"; $(MAKE) apicheck) 62 | 63 | configcheck: 64 | (cd "$(SPHINX_DIR)"; $(MAKE) configcheck) 65 | 66 | flakecheck: 67 | $(FLAKE8) "$(PROJ)" 68 | 69 | flakediag: 70 | -$(MAKE) flakecheck 71 | 72 | flakepluscheck: 73 | $(FLAKEPLUS) --$(FLAKEPLUSTARGET) "$(PROJ)" 74 | 75 | flakeplusdiag: 76 | -$(MAKE) flakepluscheck 77 | 78 | flakes: flakediag flakeplusdiag 79 | 80 | clean-readme: 81 | -rm -f $(README) 82 | 83 | readmecheck: 84 | $(ICONV) -f ascii -t ascii $(README) >/dev/null 85 | 86 | $(README): 87 | $(SPHINX2RST) "$(README_SRC)" --ascii > $@ 88 | 89 | readme: clean-readme $(README) readmecheck 90 | 91 | clean-contrib: 92 | -rm -f "$(CONTRIBUTING)" 93 | 94 | $(CONTRIBUTING): 95 | $(SPHINX2RST) "$(CONTRIBUTING_SRC)" > $@ 96 | 97 | contrib: clean-contrib $(CONTRIBUTING) 98 | 99 | clean-pyc: 100 | -find . -type f -a \( -name "*.pyc" -o -name "*$$py.class" \) | xargs rm 101 | -find . -type d -name "__pycache__" | xargs rm -r 102 | 103 | removepyc: clean-pyc 104 | 105 | clean-build: 106 | rm -rf build/ dist/ .eggs/ *.egg-info/ .tox/ .coverage cover/ 107 | 108 | clean-git: 109 | $(GIT) clean -xdn 110 | 111 | clean-git-force: 112 | $(GIT) clean -xdf 113 | 114 | test-all: clean-pyc 115 | $(TOX) 116 | 117 | test: 118 | tox -e py 119 | 120 | cov: 121 | $(NOSETESTS) -xv --with-coverage --cover-html --cover-branch 122 | 123 | build: 124 | $(PYTHON) setup.py sdist bdist_wheel 125 | 126 | distcheck: lint test clean 127 | 128 | dist: readme contrib clean-dist build 129 | -------------------------------------------------------------------------------- /t/unit/test_einfo.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import pickle 3 | import sys 4 | 5 | from billiard.einfo import _Code # noqa 6 | from billiard.einfo import _Frame # noqa 7 | from billiard.einfo import _Object # noqa 8 | from billiard.einfo import _Truncated # noqa 9 | from billiard.einfo import ExceptionInfo, Traceback 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | def test_exception_info_log_before_pickle(caplog): 15 | try: 16 | raise RuntimeError("some message") 17 | except Exception: 18 | exception = ExceptionInfo().exception 19 | 20 | logger.exception("failed", exc_info=exception) 21 | assert ' raise RuntimeError("some message")' in caplog.text 22 | assert "RuntimeError: some message" in caplog.text 23 | 24 | 25 | def test_exception_info_log_after_pickle(caplog): 26 | try: 27 | raise RuntimeError("some message") 28 | except Exception: 29 | exception = ExceptionInfo().exception 30 | exception = pickle.loads(pickle.dumps(exception)) 31 | 32 | logger.exception("failed", exc_info=exception) 33 | assert ' raise RuntimeError("some message")' in caplog.text 34 | assert "RuntimeError: some message" in caplog.text 35 | 36 | 37 | def test_exception_info(): 38 | try: 39 | raise ValueError("Test Exception") 40 | except ValueError: 41 | exc_info = sys.exc_info() 42 | e_info = ExceptionInfo(exc_info) 43 | assert isinstance(e_info.type, type) 44 | assert isinstance(e_info.exception, Exception) 45 | assert isinstance(e_info.tb, Traceback) 46 | assert isinstance(e_info.traceback, str) 47 | 48 | 49 | def test_traceback(): 50 | try: 51 | raise ValueError("Test Exception") 52 | except ValueError: 53 | tb = sys.exc_info()[2] 54 | trace = Traceback(tb) 55 | assert isinstance(trace.tb_frame, _Frame) 56 | assert isinstance(trace.tb_lineno, int) 57 | assert isinstance(trace.tb_lasti, int) 58 | assert trace.tb_next is None or isinstance(trace.tb_next, Traceback) 59 | 60 | 61 | def test_frame(): 62 | try: 63 | raise ValueError("Test Exception") 64 | except ValueError: 65 | tb = sys.exc_info()[2] 66 | frame = _Frame(tb.tb_frame) 67 | assert isinstance(frame.f_code, _Code) 68 | assert isinstance(frame.f_lineno, int) 69 | assert isinstance(frame.f_lasti, int) 70 | assert frame.f_globals == { 71 | "__file__": frame.f_globals.get("__file__", "__main__"), 72 | "__name__": frame.f_globals.get("__name__"), 73 | "__loader__": None, 74 | } 75 | 76 | 77 | def test_code(): 78 | try: 79 | raise ValueError("Test Exception") 80 | except ValueError: 81 | tb = sys.exc_info()[2] 82 | frame = tb.tb_frame 83 | code = _Code(frame.f_code) 84 | assert isinstance(code.co_filename, str) 85 | assert isinstance(code.co_name, str) 86 | assert isinstance(code.co_argcount, int) 87 | if sys.version_info >= (3, 11): 88 | assert callable(code.co_positions) 89 | assert next(code.co_positions()) == (77, 77, 0, 0) 90 | 91 | 92 | def test_object_init(): 93 | obj = _Object(a=1, b=2, c=3) 94 | assert obj.a == 1 95 | assert obj.b == 2 96 | assert obj.c == 3 97 | 98 | 99 | if sys.version_info >= (3, 11): 100 | 101 | def test_object_co_positions(): 102 | obj = _Object() 103 | 104 | default = ((None, None, None, None),) 105 | # Test that it returns the default co_positions 106 | assert list(iter(obj.co_positions())) == list(default) 107 | 108 | # Test setting co_positions 109 | new_value = ((1, 2, 3, 4),) 110 | obj.co_positions = new_value 111 | assert list(iter(obj.co_positions())) == list(new_value) 112 | 113 | def test_truncated_co_positions(): 114 | truncated = _Truncated() 115 | 116 | assert list(iter(truncated.co_positions())) == list( 117 | iter(truncated.tb_frame.co_positions()) 118 | ) 119 | -------------------------------------------------------------------------------- /billiard/popen_spawn_win32.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | import msvcrt 4 | import signal 5 | import sys 6 | 7 | from . import context 8 | from . import spawn 9 | from . import reduction 10 | 11 | from .compat import _winapi 12 | 13 | __all__ = ['Popen'] 14 | 15 | # 16 | # 17 | # 18 | 19 | TERMINATE = 0x10000 20 | WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) 21 | WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") 22 | 23 | # 24 | # We define a Popen class similar to the one from subprocess, but 25 | # whose constructor takes a process object as its argument. 26 | # 27 | 28 | 29 | if sys.platform == 'win32': 30 | try: 31 | from _winapi import CreateProcess, GetExitCodeProcess 32 | close_thread_handle = _winapi.CloseHandle 33 | except ImportError: # Py2.7 34 | from _subprocess import CreateProcess, GetExitCodeProcess 35 | 36 | def close_thread_handle(handle): 37 | handle.Close() 38 | 39 | 40 | class Popen: 41 | ''' 42 | Start a subprocess to run the code of a process object 43 | ''' 44 | method = 'spawn' 45 | sentinel = None 46 | 47 | def __init__(self, process_obj): 48 | os.environ["MULTIPROCESSING_FORKING_DISABLE"] = "1" 49 | spawn._Django_old_layout_hack__save() 50 | prep_data = spawn.get_preparation_data(process_obj._name) 51 | 52 | # read end of pipe will be "stolen" by the child process 53 | # -- see spawn_main() in spawn.py. 54 | rhandle, whandle = _winapi.CreatePipe(None, 0) 55 | wfd = msvcrt.open_osfhandle(whandle, 0) 56 | cmd = spawn.get_command_line(parent_pid=os.getpid(), 57 | pipe_handle=rhandle) 58 | cmd = ' '.join('"%s"' % x for x in cmd) 59 | 60 | with io.open(wfd, 'wb', closefd=True) as to_child: 61 | # start process 62 | try: 63 | hp, ht, pid, tid = CreateProcess( 64 | spawn.get_executable(), cmd, 65 | None, None, False, 0, None, None, None) 66 | close_thread_handle(ht) 67 | except: 68 | _winapi.CloseHandle(rhandle) 69 | raise 70 | 71 | # set attributes of self 72 | self.pid = pid 73 | self.returncode = None 74 | self._handle = hp 75 | self.sentinel = int(hp) 76 | 77 | # send information to child 78 | context.set_spawning_popen(self) 79 | try: 80 | reduction.dump(prep_data, to_child) 81 | reduction.dump(process_obj, to_child) 82 | finally: 83 | context.set_spawning_popen(None) 84 | 85 | def close(self): 86 | if self.sentinel is not None: 87 | try: 88 | _winapi.CloseHandle(self.sentinel) 89 | finally: 90 | self.sentinel = None 91 | 92 | def duplicate_for_child(self, handle): 93 | assert self is context.get_spawning_popen() 94 | return reduction.duplicate(handle, self.sentinel) 95 | 96 | def wait(self, timeout=None): 97 | if self.returncode is None: 98 | if timeout is None: 99 | msecs = _winapi.INFINITE 100 | else: 101 | msecs = max(0, int(timeout * 1000 + 0.5)) 102 | 103 | res = _winapi.WaitForSingleObject(int(self._handle), msecs) 104 | if res == _winapi.WAIT_OBJECT_0: 105 | code = GetExitCodeProcess(self._handle) 106 | if code == TERMINATE: 107 | code = -signal.SIGTERM 108 | self.returncode = code 109 | 110 | return self.returncode 111 | 112 | def poll(self): 113 | return self.wait(timeout=0) 114 | 115 | def terminate(self): 116 | if self.returncode is None: 117 | try: 118 | _winapi.TerminateProcess(int(self._handle), TERMINATE) 119 | except OSError: 120 | if self.wait(timeout=1.0) is None: 121 | raise 122 | -------------------------------------------------------------------------------- /t/unit/test_pool.py: -------------------------------------------------------------------------------- 1 | import billiard.pool 2 | from billiard import get_context 3 | import time 4 | import pytest 5 | 6 | 7 | def func(x): 8 | if x == 2: 9 | raise ValueError 10 | return x 11 | 12 | 13 | def get_on_ready_count(): 14 | import inspect 15 | worker = inspect.stack()[1].frame.f_locals['self'] 16 | return worker.on_ready_counter.value 17 | 18 | def simple_task(x): 19 | return x * 2 20 | 21 | class test_pool: 22 | def test_raises(self): 23 | pool = billiard.pool.Pool() 24 | assert pool.did_start_ok() is True 25 | pool.close() 26 | pool.terminate() 27 | 28 | def test_timeout_handler_iterates_with_cache(self): 29 | # Given a pool 30 | pool = billiard.pool.Pool() 31 | # If I have a cache containing async results 32 | cache = {n: pool.apply_async(n) for n in range(4)} 33 | # And a TimeoutHandler with that cache 34 | timeout_handler = pool.TimeoutHandler(pool._pool, cache, 0, 0) 35 | # If I call to handle the timeouts I expect no exception 36 | next(timeout_handler.handle_timeouts()) 37 | 38 | def test_exception_traceback_present(self): 39 | pool = billiard.pool.Pool(1) 40 | results = [pool.apply_async(func, (i,)) for i in range(3)] 41 | 42 | time.sleep(1) 43 | pool.close() 44 | pool.join() 45 | pool.terminate() 46 | 47 | for i, res in enumerate(results): 48 | if i == 2: 49 | with pytest.raises(ValueError): 50 | res.get() 51 | 52 | def test_on_ready_counter_is_synchronized(self): 53 | for ctx in ('spawn', 'fork', 'forkserver'): 54 | pool = billiard.pool.Pool(processes=1, context=get_context(ctx)) 55 | pool.apply_async(func, (1,)).get(1) 56 | on_ready_counter = pool.apply_async(get_on_ready_count, ).get(1) 57 | assert on_ready_counter == 1 58 | pool.close() 59 | pool.join() 60 | pool.terminate() 61 | 62 | def test_graceful_shutdown_delivers_results(self): 63 | """Test that queued results are delivered during pool shutdown. 64 | 65 | Specifically, this test verifies that when _terminate_pool() is called, 66 | the ResultHandler.finish_at_shutdown() continues processing results 67 | that workers have placed in the outqueue. 68 | """ 69 | 70 | # Create pool with threads=False so that the result handler thread does 71 | # not start and the task results are allowed to build up in the queue. 72 | pool = billiard.pool.Pool(processes=2, threads=False) 73 | 74 | # Submit tasks so that results are queued but not processed. 75 | results = [pool.apply_async(simple_task, (i,)) for i in range(8)] 76 | 77 | # Allow a small amount of time for tasks to complete. 78 | time.sleep(0.5) 79 | 80 | # Close and join the pool to ensure workers stop. 81 | pool.close() 82 | pool.join() 83 | 84 | # Call the _terminate_pool() class method to trigger the finish_at_shutdown() 85 | # function that will process results in the queue. Normally _terminate_pool() 86 | # is called by a Finalize object when the Pool object is destroyed. We cannot 87 | # call pool.terminate() here because it will call the Finalize object, which 88 | # won't do anything until the Pool object is destroyed at the end of this test. 89 | # We can simulate the shutdown behaviour by calling _terminate_pool() directly. 90 | billiard.pool.Pool._terminate_pool( 91 | pool._taskqueue, 92 | pool._inqueue, 93 | pool._outqueue, 94 | pool._pool, 95 | pool._worker_handler, 96 | pool._task_handler, 97 | pool._result_handler, 98 | pool._cache, 99 | pool._timeout_handler, 100 | pool._help_stuff_finish_args() 101 | ) 102 | 103 | # Cancel the Finalize object to prevent _terminate_pool() from being called 104 | # a second time when the Pool object is destroyed. 105 | pool._terminate.cancel() 106 | 107 | # Verify that all results were delivered by finish_at_shutdown() and can be 108 | # retrieved. 109 | for i, result in enumerate(results): 110 | assert result.get() == i * 2 111 | -------------------------------------------------------------------------------- /billiard/common.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains utilities added by billiard, to keep 3 | "non-core" functionality out of ``.util``.""" 4 | 5 | import os 6 | import signal 7 | import sys 8 | 9 | import pickle 10 | 11 | from .exceptions import RestartFreqExceeded 12 | from time import monotonic 13 | 14 | pickle_load = pickle.load 15 | pickle_loads = pickle.loads 16 | 17 | # cPickle.loads does not support buffer() objects, 18 | # but we can just create a StringIO and use load. 19 | from io import BytesIO 20 | 21 | 22 | SIGMAP = dict( 23 | (getattr(signal, n), n) for n in dir(signal) if n.startswith('SIG') 24 | ) 25 | for _alias_sig in ('SIGHUP', 'SIGABRT'): 26 | try: 27 | # Alias for deprecated signal overwrites the name we want 28 | SIGMAP[getattr(signal, _alias_sig)] = _alias_sig 29 | except AttributeError: 30 | pass 31 | 32 | 33 | TERM_SIGNAL, TERM_SIGNAME = signal.SIGTERM, 'SIGTERM' 34 | REMAP_SIGTERM = os.environ.get('REMAP_SIGTERM') 35 | if REMAP_SIGTERM: 36 | TERM_SIGNAL, TERM_SIGNAME = ( 37 | getattr(signal, REMAP_SIGTERM), REMAP_SIGTERM) 38 | 39 | 40 | TERMSIGS_IGNORE = {'SIGTERM'} if REMAP_SIGTERM else set() 41 | TERMSIGS_FORCE = {'SIGQUIT'} if REMAP_SIGTERM else set() 42 | 43 | EX_SOFTWARE = 70 44 | 45 | TERMSIGS_DEFAULT = { 46 | 'SIGHUP', 47 | 'SIGQUIT', 48 | TERM_SIGNAME, 49 | 'SIGUSR1', 50 | } 51 | 52 | TERMSIGS_FULL = { 53 | 'SIGHUP', 54 | 'SIGQUIT', 55 | 'SIGTRAP', 56 | 'SIGABRT', 57 | 'SIGEMT', 58 | 'SIGSYS', 59 | 'SIGPIPE', 60 | 'SIGALRM', 61 | TERM_SIGNAME, 62 | 'SIGXCPU', 63 | 'SIGXFSZ', 64 | 'SIGVTALRM', 65 | 'SIGPROF', 66 | 'SIGUSR1', 67 | 'SIGUSR2', 68 | } 69 | 70 | #: set by signal handlers just before calling exit. 71 | #: if this is true after the sighandler returns it means that something 72 | #: went wrong while terminating the process, and :func:`os._exit` 73 | #: must be called ASAP. 74 | _should_have_exited = [False] 75 | 76 | 77 | def human_status(status): 78 | if (status or 0) < 0: 79 | try: 80 | return 'signal {0} ({1})'.format(-status, SIGMAP[-status]) 81 | except KeyError: 82 | return 'signal {0}'.format(-status) 83 | return 'exitcode {0}'.format(status) 84 | 85 | 86 | def pickle_loads(s, load=pickle_load): 87 | # used to support buffer objects 88 | return load(BytesIO(s)) 89 | 90 | 91 | def maybe_setsignal(signum, handler): 92 | try: 93 | signal.signal(signum, handler) 94 | except (OSError, AttributeError, ValueError, RuntimeError): 95 | pass 96 | 97 | 98 | def _shutdown_cleanup(signum, frame): 99 | # we will exit here so if the signal is received a second time 100 | # we can be sure that something is very wrong and we may be in 101 | # a crashing loop. 102 | if _should_have_exited[0]: 103 | os._exit(EX_SOFTWARE) 104 | maybe_setsignal(signum, signal.SIG_DFL) 105 | _should_have_exited[0] = True 106 | sys.exit(-(256 - signum)) 107 | 108 | 109 | def signum(sig): 110 | return getattr(signal, sig, None) 111 | 112 | 113 | def _should_override_term_signal(sig, current): 114 | return ( 115 | sig in TERMSIGS_FORCE or 116 | (current is not None and current != signal.SIG_IGN) 117 | ) 118 | 119 | 120 | def reset_signals(handler=_shutdown_cleanup, full=False): 121 | for sig in TERMSIGS_FULL if full else TERMSIGS_DEFAULT: 122 | num = signum(sig) 123 | if num: 124 | if _should_override_term_signal(sig, signal.getsignal(num)): 125 | maybe_setsignal(num, handler) 126 | for sig in TERMSIGS_IGNORE: 127 | num = signum(sig) 128 | if num: 129 | maybe_setsignal(num, signal.SIG_IGN) 130 | 131 | 132 | class restart_state: 133 | RestartFreqExceeded = RestartFreqExceeded 134 | 135 | def __init__(self, maxR, maxT): 136 | self.maxR, self.maxT = maxR, maxT 137 | self.R, self.T = 0, None 138 | 139 | def step(self, now=None): 140 | now = monotonic() if now is None else now 141 | R = self.R 142 | if self.T and now - self.T >= self.maxT: 143 | # maxT passed, reset counter and time passed. 144 | self.T, self.R = now, 0 145 | elif self.maxR and self.R >= self.maxR: 146 | # verify that R has a value as the result handler 147 | # resets this when a job is accepted. If a job is accepted 148 | # the startup probably went fine (startup restart burst 149 | # protection) 150 | if self.R: # pragma: no cover 151 | self.R = 0 # reset in case someone catches the error 152 | raise self.RestartFreqExceeded("%r in %rs" % (R, self.maxT)) 153 | # first run sets T 154 | if self.T is None: 155 | self.T = now 156 | self.R += 1 157 | -------------------------------------------------------------------------------- /Modules/_billiard/multiprocessing.h: -------------------------------------------------------------------------------- 1 | #ifndef MULTIPROCESSING_H 2 | #define MULTIPROCESSING_H 3 | 4 | #define PY_SSIZE_T_CLEAN 5 | 6 | #ifdef __sun 7 | /* The control message API is only available on Solaris 8 | if XPG 4.2 or later is requested. */ 9 | #define _XOPEN_SOURCE 500 10 | #endif 11 | 12 | #include "Python.h" 13 | #include "structmember.h" 14 | #include "pythread.h" 15 | 16 | /* 17 | * Platform includes and definitions 18 | */ 19 | 20 | #ifdef MS_WINDOWS 21 | # define WIN32_LEAN_AND_MEAN 22 | # include 23 | # include 24 | # include /* getpid() */ 25 | # ifdef Py_DEBUG 26 | # include 27 | # endif 28 | # define SEM_HANDLE HANDLE 29 | # define SEM_VALUE_MAX LONG_MAX 30 | #else 31 | # include /* O_CREAT and O_EXCL */ 32 | # include 33 | # include 34 | # include 35 | # include /* htonl() and ntohl() */ 36 | # if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) 37 | # include 38 | typedef sem_t *SEM_HANDLE; 39 | # endif 40 | # define HANDLE long 41 | # define SOCKET int 42 | # define BOOL int 43 | # define UINT32 uint32_t 44 | # define INT32 int32_t 45 | # define TRUE 1 46 | # define FALSE 0 47 | # define INVALID_HANDLE_VALUE (-1) 48 | #endif 49 | 50 | /* 51 | * Issue 3110 - Solaris does not define SEM_VALUE_MAX 52 | */ 53 | #ifndef SEM_VALUE_MAX 54 | #if defined(HAVE_SYSCONF) && defined(_SC_SEM_VALUE_MAX) 55 | # define SEM_VALUE_MAX sysconf(_SC_SEM_VALUE_MAX) 56 | #elif defined(_SEM_VALUE_MAX) 57 | # define SEM_VALUE_MAX _SEM_VALUE_MAX 58 | #elif defined(_POSIX_SEM_VALUE_MAX) 59 | # define SEM_VALUE_MAX _POSIX_SEM_VALUE_MAX 60 | #else 61 | # define SEM_VALUE_MAX INT_MAX 62 | #endif 63 | #endif 64 | 65 | 66 | /* 67 | * Make sure Py_ssize_t available 68 | */ 69 | 70 | #if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) 71 | typedef int Py_ssize_t; 72 | # define PY_SSIZE_T_MAX INT_MAX 73 | # define PY_SSIZE_T_MIN INT_MIN 74 | # define F_PY_SSIZE_T "i" 75 | # define PyInt_FromSsize_t(n) PyInt_FromLong((long)n) 76 | #else 77 | # define F_PY_SSIZE_T "n" 78 | #endif 79 | 80 | /* 81 | * Format codes 82 | */ 83 | 84 | #if SIZEOF_VOID_P == SIZEOF_LONG 85 | # define F_POINTER "k" 86 | # define T_POINTER T_ULONG 87 | #elif defined(HAVE_LONG_LONG) && (SIZEOF_VOID_P == SIZEOF_LONG_LONG) 88 | # define F_POINTER "K" 89 | # define T_POINTER T_ULONGLONG 90 | #else 91 | # error "can't find format code for unsigned integer of same size as void*" 92 | #endif 93 | 94 | #ifdef MS_WINDOWS 95 | # define F_HANDLE F_POINTER 96 | # define T_HANDLE T_POINTER 97 | # define F_SEM_HANDLE F_HANDLE 98 | # define T_SEM_HANDLE T_HANDLE 99 | # define F_DWORD "k" 100 | # define T_DWORD T_ULONG 101 | #else 102 | # define F_HANDLE "i" 103 | # define T_HANDLE T_INT 104 | # define F_SEM_HANDLE F_POINTER 105 | # define T_SEM_HANDLE T_POINTER 106 | #endif 107 | 108 | #if PY_VERSION_HEX >= 0x03000000 109 | # define F_RBUFFER "y" 110 | #else 111 | # define F_RBUFFER "s" 112 | #endif 113 | 114 | /* 115 | * Error codes which can be returned by functions called without GIL 116 | */ 117 | 118 | #define MP_SUCCESS (0) 119 | #define MP_STANDARD_ERROR (-1) 120 | #define MP_MEMORY_ERROR (-1001) 121 | #define MP_END_OF_FILE (-1002) 122 | #define MP_EARLY_END_OF_FILE (-1003) 123 | #define MP_BAD_MESSAGE_LENGTH (-1004) 124 | #define MP_SOCKET_ERROR (-1005) 125 | #define MP_EXCEPTION_HAS_BEEN_SET (-1006) 126 | 127 | PyObject *Billiard_SetError(PyObject *Type, int num); 128 | 129 | /* 130 | * Externs - not all will really exist on all platforms 131 | */ 132 | 133 | extern PyObject *Billiard_BufferTooShort; 134 | extern PyTypeObject BilliardSemLockType; 135 | extern PyObject *Billiard_semlock_unlink(PyObject *ignore, PyObject *args); 136 | extern HANDLE sigint_event; 137 | 138 | /* 139 | * Py3k compatibility 140 | */ 141 | 142 | #if PY_VERSION_HEX >= 0x03000000 143 | # define PICKLE_MODULE "pickle" 144 | # define FROM_FORMAT PyUnicode_FromFormat 145 | # define PyInt_FromLong PyLong_FromLong 146 | # define PyInt_FromSsize_t PyLong_FromSsize_t 147 | #else 148 | # define PICKLE_MODULE "cPickle" 149 | # define FROM_FORMAT PyString_FromFormat 150 | #endif 151 | 152 | #ifndef PyVarObject_HEAD_INIT 153 | # define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, 154 | #endif 155 | 156 | #ifndef Py_TPFLAGS_HAVE_WEAKREFS 157 | # define Py_TPFLAGS_HAVE_WEAKREFS 0 158 | #endif 159 | 160 | /* 161 | * Connection definition 162 | */ 163 | 164 | #define CONNECTION_BUFFER_SIZE 131072 165 | 166 | typedef struct { 167 | PyObject_HEAD 168 | HANDLE handle; 169 | int flags; 170 | PyObject *weakreflist; 171 | char buffer[CONNECTION_BUFFER_SIZE]; 172 | } BilliardConnectionObject; 173 | 174 | /* 175 | * Miscellaneous 176 | */ 177 | 178 | #define MAX_MESSAGE_LENGTH 0x7fffffff 179 | 180 | #ifndef Py_MIN 181 | # define Py_MIN(x, y) (((x) > (y)) ? (y) : (x)) 182 | #endif 183 | 184 | #endif /* MULTIPROCESSING_H */ 185 | -------------------------------------------------------------------------------- /billiard/dummy/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Support for the API of the multiprocessing package using threads 3 | # 4 | # multiprocessing/dummy/__init__.py 5 | # 6 | # Copyright (c) 2006-2008, R Oudkerk 7 | # All rights reserved. 8 | # 9 | # Redistribution and use in source and binary forms, with or without 10 | # modification, are permitted provided that the following conditions 11 | # are met: 12 | # 13 | # 1. Redistributions of source code must retain the above copyright 14 | # notice, this list of conditions and the following disclaimer. 15 | # 2. Redistributions in binary form must reproduce the above copyright 16 | # notice, this list of conditions and the following disclaimer in the 17 | # documentation and/or other materials provided with the distribution. 18 | # 3. Neither the name of author nor the names of any contributors may be 19 | # used to endorse or promote products derived from this software 20 | # without specific prior written permission. 21 | # 22 | # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND 23 | # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 | # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 26 | # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 | # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 | # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 | # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 | # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 | # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 | # SUCH DAMAGE. 33 | # 34 | 35 | # 36 | # Imports 37 | # 38 | 39 | import threading 40 | import sys 41 | import weakref 42 | import array 43 | 44 | from threading import Lock, RLock, Semaphore, BoundedSemaphore 45 | from threading import Event 46 | 47 | from queue import Queue 48 | 49 | from billiard.connection import Pipe 50 | 51 | __all__ = [ 52 | 'Process', 'current_process', 'active_children', 'freeze_support', 53 | 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 54 | 'Event', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' 55 | ] 56 | 57 | 58 | class DummyProcess(threading.Thread): 59 | 60 | def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): 61 | threading.Thread.__init__(self, group, target, name, args, kwargs) 62 | self._pid = None 63 | self._children = weakref.WeakKeyDictionary() 64 | self._start_called = False 65 | self._parent = current_process() 66 | 67 | def start(self): 68 | assert self._parent is current_process() 69 | self._start_called = True 70 | if hasattr(self._parent, '_children'): 71 | self._parent._children[self] = None 72 | threading.Thread.start(self) 73 | 74 | @property 75 | def exitcode(self): 76 | if self._start_called and not self.is_alive(): 77 | return 0 78 | else: 79 | return None 80 | 81 | 82 | try: 83 | _Condition = threading._Condition 84 | except AttributeError: # Py3 85 | _Condition = threading.Condition # noqa 86 | 87 | 88 | class Condition(_Condition): 89 | if sys.version_info[0] == 3: 90 | notify_all = _Condition.notifyAll 91 | else: 92 | notify_all = _Condition.notifyAll.__func__ 93 | 94 | 95 | Process = DummyProcess 96 | current_process = threading.current_thread 97 | current_process()._children = weakref.WeakKeyDictionary() 98 | 99 | 100 | def active_children(): 101 | children = current_process()._children 102 | for p in list(children): 103 | if not p.is_alive(): 104 | children.pop(p, None) 105 | return list(children) 106 | 107 | 108 | def freeze_support(): 109 | pass 110 | 111 | 112 | class Namespace(object): 113 | 114 | def __init__(self, **kwds): 115 | self.__dict__.update(kwds) 116 | 117 | def __repr__(self): 118 | items = list(self.__dict__.items()) 119 | temp = [] 120 | for name, value in items: 121 | if not name.startswith('_'): 122 | temp.append('%s=%r' % (name, value)) 123 | temp.sort() 124 | return '%s(%s)' % (self.__class__.__name__, str.join(', ', temp)) 125 | 126 | 127 | dict = dict 128 | list = list 129 | 130 | 131 | def Array(typecode, sequence, lock=True): 132 | return array.array(typecode, sequence) 133 | 134 | 135 | class Value(object): 136 | 137 | def __init__(self, typecode, value, lock=True): 138 | self._typecode = typecode 139 | self._value = value 140 | 141 | def _get(self): 142 | return self._value 143 | 144 | def _set(self, value): 145 | self._value = value 146 | value = property(_get, _set) 147 | 148 | def __repr__(self): 149 | return '<%r(%r, %r)>' % (type(self).__name__, 150 | self._typecode, self._value) 151 | 152 | 153 | def Manager(): 154 | return sys.modules[__name__] 155 | 156 | 157 | def shutdown(): 158 | pass 159 | 160 | 161 | def Pool(processes=None, initializer=None, initargs=()): 162 | from billiard.pool import ThreadPool 163 | return ThreadPool(processes, initializer, initargs) 164 | 165 | 166 | JoinableQueue = Queue 167 | -------------------------------------------------------------------------------- /billiard/semaphore_tracker.py: -------------------------------------------------------------------------------- 1 | # 2 | # On Unix we run a server process which keeps track of unlinked 3 | # semaphores. The server ignores SIGINT and SIGTERM and reads from a 4 | # pipe. Every other process of the program has a copy of the writable 5 | # end of the pipe, so we get EOF when all other processes have exited. 6 | # Then the server process unlinks any remaining semaphore names. 7 | # 8 | # This is important because the system only supports a limited number 9 | # of named semaphores, and they will not be automatically removed till 10 | # the next reboot. Without this semaphore tracker process, "killall 11 | # python" would probably leave unlinked semaphores. 12 | # 13 | 14 | import io 15 | import os 16 | import signal 17 | import sys 18 | import threading 19 | import warnings 20 | from ._ext import _billiard 21 | 22 | from . import spawn 23 | from . import util 24 | 25 | from .compat import spawnv_passfds 26 | 27 | __all__ = ['ensure_running', 'register', 'unregister'] 28 | 29 | 30 | class SemaphoreTracker: 31 | 32 | def __init__(self): 33 | self._lock = threading.Lock() 34 | self._fd = None 35 | 36 | def getfd(self): 37 | self.ensure_running() 38 | return self._fd 39 | 40 | def ensure_running(self): 41 | '''Make sure that semaphore tracker process is running. 42 | 43 | This can be run from any process. Usually a child process will use 44 | the semaphore created by its parent.''' 45 | with self._lock: 46 | if self._fd is not None: 47 | return 48 | fds_to_pass = [] 49 | try: 50 | fds_to_pass.append(sys.stderr.fileno()) 51 | except Exception: 52 | pass 53 | cmd = 'from billiard.semaphore_tracker import main;main(%d)' 54 | r, w = os.pipe() 55 | try: 56 | fds_to_pass.append(r) 57 | # process will out live us, so no need to wait on pid 58 | exe = spawn.get_executable() 59 | args = [exe] + util._args_from_interpreter_flags() 60 | args += ['-c', cmd % r] 61 | spawnv_passfds(exe, args, fds_to_pass) 62 | except: 63 | os.close(w) 64 | raise 65 | else: 66 | self._fd = w 67 | finally: 68 | os.close(r) 69 | 70 | def register(self, name): 71 | '''Register name of semaphore with semaphore tracker.''' 72 | self._send('REGISTER', name) 73 | 74 | def unregister(self, name): 75 | '''Unregister name of semaphore with semaphore tracker.''' 76 | self._send('UNREGISTER', name) 77 | 78 | def _send(self, cmd, name): 79 | self.ensure_running() 80 | msg = '{0}:{1}\n'.format(cmd, name).encode('ascii') 81 | if len(name) > 512: 82 | # posix guarantees that writes to a pipe of less than PIPE_BUF 83 | # bytes are atomic, and that PIPE_BUF >= 512 84 | raise ValueError('name too long') 85 | nbytes = os.write(self._fd, msg) 86 | assert nbytes == len(msg) 87 | 88 | 89 | _semaphore_tracker = SemaphoreTracker() 90 | ensure_running = _semaphore_tracker.ensure_running 91 | register = _semaphore_tracker.register 92 | unregister = _semaphore_tracker.unregister 93 | getfd = _semaphore_tracker.getfd 94 | 95 | 96 | def main(fd): 97 | '''Run semaphore tracker.''' 98 | # protect the process from ^C and "killall python" etc 99 | signal.signal(signal.SIGINT, signal.SIG_IGN) 100 | signal.signal(signal.SIGTERM, signal.SIG_IGN) 101 | 102 | for f in (sys.stdin, sys.stdout): 103 | try: 104 | f.close() 105 | except Exception: 106 | pass 107 | 108 | cache = set() 109 | try: 110 | # keep track of registered/unregistered semaphores 111 | with io.open(fd, 'rb') as f: 112 | for line in f: 113 | try: 114 | cmd, name = line.strip().split(b':') 115 | if cmd == b'REGISTER': 116 | cache.add(name) 117 | elif cmd == b'UNREGISTER': 118 | cache.remove(name) 119 | else: 120 | raise RuntimeError('unrecognized command %r' % cmd) 121 | except Exception: 122 | try: 123 | sys.excepthook(*sys.exc_info()) 124 | except: 125 | pass 126 | finally: 127 | # all processes have terminated; cleanup any remaining semaphores 128 | if cache: 129 | try: 130 | warnings.warn('semaphore_tracker: There appear to be %d ' 131 | 'leaked semaphores to clean up at shutdown' % 132 | len(cache)) 133 | except Exception: 134 | pass 135 | for name in cache: 136 | # For some reason the process which created and registered this 137 | # semaphore has failed to unregister it. Presumably it has died. 138 | # We therefore unlink it. 139 | try: 140 | name = name.decode('ascii') 141 | try: 142 | _billiard.sem_unlink(name) 143 | except Exception as e: 144 | warnings.warn('semaphore_tracker: %r: %s' % (name, e)) 145 | finally: 146 | pass 147 | -------------------------------------------------------------------------------- /billiard/einfo.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import traceback 3 | 4 | __all__ = ['ExceptionInfo', 'Traceback'] 5 | 6 | DEFAULT_MAX_FRAMES = sys.getrecursionlimit() // 8 7 | 8 | 9 | class _Code: 10 | 11 | def __init__(self, code): 12 | self.co_filename = code.co_filename 13 | self.co_name = code.co_name 14 | self.co_argcount = code.co_argcount 15 | self.co_cellvars = () 16 | self.co_firstlineno = code.co_firstlineno 17 | self.co_flags = code.co_flags 18 | self.co_freevars = () 19 | self.co_code = b'' 20 | self.co_lnotab = b'' 21 | self.co_names = code.co_names 22 | self.co_nlocals = code.co_nlocals 23 | self.co_stacksize = code.co_stacksize 24 | self.co_varnames = () 25 | if sys.version_info >= (3, 11): 26 | self.co_qualname = code.co_qualname 27 | self._co_positions = list(code.co_positions()) 28 | 29 | if sys.version_info >= (3, 11): 30 | @property 31 | def co_positions(self): 32 | return self._co_positions.__iter__ 33 | 34 | 35 | class _Frame: 36 | Code = _Code 37 | 38 | def __init__(self, frame): 39 | self.f_builtins = {} 40 | self.f_globals = { 41 | "__file__": frame.f_globals.get("__file__", "__main__"), 42 | "__name__": frame.f_globals.get("__name__"), 43 | "__loader__": None, 44 | } 45 | self.f_locals = fl = {} 46 | try: 47 | fl["__traceback_hide__"] = frame.f_locals["__traceback_hide__"] 48 | except KeyError: 49 | pass 50 | self.f_back = None 51 | self.f_trace = None 52 | self.f_exc_traceback = None 53 | self.f_exc_type = None 54 | self.f_exc_value = None 55 | self.f_code = self.Code(frame.f_code) 56 | self.f_lineno = frame.f_lineno 57 | self.f_lasti = frame.f_lasti 58 | # don't want to hit https://bugs.python.org/issue21967 59 | self.f_restricted = False 60 | 61 | if sys.version_info >= (3, 11): 62 | @property 63 | def co_positions(self): 64 | return self.f_code.co_positions 65 | 66 | 67 | class _Object: 68 | 69 | def __init__(self, **kw): 70 | [setattr(self, k, v) for k, v in kw.items()] 71 | 72 | if sys.version_info >= (3, 11): 73 | __default_co_positions__ = ((None, None, None, None),) 74 | 75 | @property 76 | def co_positions(self): 77 | return getattr( 78 | self, 79 | "_co_positions", 80 | self.__default_co_positions__ 81 | ).__iter__ 82 | 83 | @co_positions.setter 84 | def co_positions(self, value): 85 | self._co_positions = value # noqa 86 | 87 | 88 | class _Truncated: 89 | 90 | def __init__(self): 91 | self.tb_lineno = -1 92 | self.tb_frame = _Object( 93 | f_globals={"__file__": "", 94 | "__name__": "", 95 | "__loader__": None}, 96 | f_fileno=None, 97 | f_code=_Object(co_filename="...", 98 | co_name="[rest of traceback truncated]"), 99 | ) 100 | self.tb_next = None 101 | self.tb_lasti = 0 102 | 103 | if sys.version_info >= (3, 11): 104 | @property 105 | def co_positions(self): 106 | return self.tb_frame.co_positions 107 | 108 | 109 | class Traceback: 110 | Frame = _Frame 111 | 112 | def __init__(self, tb, max_frames=DEFAULT_MAX_FRAMES, depth=0): 113 | self.tb_frame = self.Frame(tb.tb_frame) 114 | self.tb_lineno = tb.tb_lineno 115 | self.tb_lasti = tb.tb_lasti 116 | self.tb_next = None 117 | if tb.tb_next is not None: 118 | if depth <= max_frames: 119 | self.tb_next = Traceback(tb.tb_next, max_frames, depth + 1) 120 | else: 121 | self.tb_next = _Truncated() 122 | 123 | 124 | class RemoteTraceback(Exception): 125 | def __init__(self, tb): 126 | self.tb = tb 127 | 128 | def __str__(self): 129 | return self.tb 130 | 131 | 132 | class ExceptionWithTraceback(Exception): 133 | def __init__(self, exc, tb): 134 | self.exc = exc 135 | self.tb = '\n"""\n%s"""' % tb 136 | super().__init__() 137 | 138 | def __str__(self): 139 | return self.tb 140 | 141 | def __reduce__(self): 142 | return rebuild_exc, (self.exc, self.tb) 143 | 144 | 145 | def rebuild_exc(exc, tb): 146 | exc.__cause__ = RemoteTraceback(tb) 147 | return exc 148 | 149 | 150 | class ExceptionInfo: 151 | """Exception wrapping an exception and its traceback. 152 | 153 | :param exc_info: The exception info tuple as returned by 154 | :func:`sys.exc_info`. 155 | 156 | """ 157 | 158 | #: Exception type. 159 | type = None 160 | 161 | #: Exception instance. 162 | exception = None 163 | 164 | #: Pickleable traceback instance for use with :mod:`traceback` 165 | tb = None 166 | 167 | #: String representation of the traceback. 168 | traceback = None 169 | 170 | #: Set to true if this is an internal error. 171 | internal = False 172 | 173 | def __init__(self, exc_info=None, internal=False): 174 | self.type, exception, tb = exc_info or sys.exc_info() 175 | try: 176 | self.tb = Traceback(tb) 177 | self.traceback = ''.join( 178 | traceback.format_exception(self.type, exception, tb), 179 | ) 180 | self.internal = internal 181 | finally: 182 | del tb 183 | self.exception = ExceptionWithTraceback(exception, self.traceback) 184 | 185 | def __str__(self): 186 | return self.traceback 187 | 188 | def __repr__(self): 189 | return "<%s: %r>" % (self.__class__.__name__, self.exception, ) 190 | 191 | @property 192 | def exc_info(self): 193 | return self.type, self.exception, self.tb 194 | -------------------------------------------------------------------------------- /billiard/resource_sharer.py: -------------------------------------------------------------------------------- 1 | # 2 | # We use a background thread for sharing fds on Unix, and for sharing 3 | # sockets on Windows. 4 | # 5 | # A client which wants to pickle a resource registers it with the resource 6 | # sharer and gets an identifier in return. The unpickling process will connect 7 | # to the resource sharer, sends the identifier and its pid, and then receives 8 | # the resource. 9 | # 10 | 11 | import os 12 | import signal 13 | import socket 14 | import sys 15 | import threading 16 | 17 | from . import process 18 | from . import reduction 19 | from . import util 20 | 21 | __all__ = ['stop'] 22 | 23 | 24 | if sys.platform == 'win32': 25 | __all__ += ['DupSocket'] 26 | 27 | class DupSocket: 28 | '''Picklable wrapper for a socket.''' 29 | 30 | def __init__(self, sock): 31 | new_sock = sock.dup() 32 | 33 | def send(conn, pid): 34 | share = new_sock.share(pid) 35 | conn.send_bytes(share) 36 | self._id = _resource_sharer.register(send, new_sock.close) 37 | 38 | def detach(self): 39 | '''Get the socket. This should only be called once.''' 40 | with _resource_sharer.get_connection(self._id) as conn: 41 | share = conn.recv_bytes() 42 | return socket.fromshare(share) 43 | 44 | else: 45 | __all__ += ['DupFd'] 46 | 47 | class DupFd: 48 | '''Wrapper for fd which can be used at any time.''' 49 | def __init__(self, fd): 50 | new_fd = os.dup(fd) 51 | 52 | def send(conn, pid): 53 | reduction.send_handle(conn, new_fd, pid) 54 | 55 | def close(): 56 | os.close(new_fd) 57 | self._id = _resource_sharer.register(send, close) 58 | 59 | def detach(self): 60 | '''Get the fd. This should only be called once.''' 61 | with _resource_sharer.get_connection(self._id) as conn: 62 | return reduction.recv_handle(conn) 63 | 64 | 65 | class _ResourceSharer: 66 | '''Manager for resources using background thread.''' 67 | def __init__(self): 68 | self._key = 0 69 | self._cache = {} 70 | self._old_locks = [] 71 | self._lock = threading.Lock() 72 | self._listener = None 73 | self._address = None 74 | self._thread = None 75 | util.register_after_fork(self, _ResourceSharer._afterfork) 76 | 77 | def register(self, send, close): 78 | '''Register resource, returning an identifier.''' 79 | with self._lock: 80 | if self._address is None: 81 | self._start() 82 | self._key += 1 83 | self._cache[self._key] = (send, close) 84 | return (self._address, self._key) 85 | 86 | @staticmethod 87 | def get_connection(ident): 88 | '''Return connection from which to receive identified resource.''' 89 | from .connection import Client 90 | address, key = ident 91 | c = Client(address, authkey=process.current_process().authkey) 92 | c.send((key, os.getpid())) 93 | return c 94 | 95 | def stop(self, timeout=None): 96 | '''Stop the background thread and clear registered resources.''' 97 | from .connection import Client 98 | with self._lock: 99 | if self._address is not None: 100 | c = Client(self._address, 101 | authkey=process.current_process().authkey) 102 | c.send(None) 103 | c.close() 104 | self._thread.join(timeout) 105 | if self._thread.is_alive(): 106 | util.sub_warning('_ResourceSharer thread did ' 107 | 'not stop when asked') 108 | self._listener.close() 109 | self._thread = None 110 | self._address = None 111 | self._listener = None 112 | for key, (send, close) in self._cache.items(): 113 | close() 114 | self._cache.clear() 115 | 116 | def _afterfork(self): 117 | for key, (send, close) in self._cache.items(): 118 | close() 119 | self._cache.clear() 120 | # If self._lock was locked at the time of the fork, it may be broken 121 | # -- see issue 6721. Replace it without letting it be gc'ed. 122 | self._old_locks.append(self._lock) 123 | self._lock = threading.Lock() 124 | if self._listener is not None: 125 | self._listener.close() 126 | self._listener = None 127 | self._address = None 128 | self._thread = None 129 | 130 | def _start(self): 131 | from .connection import Listener 132 | assert self._listener is None 133 | util.debug('starting listener and thread for sending handles') 134 | self._listener = Listener(authkey=process.current_process().authkey) 135 | self._address = self._listener.address 136 | t = threading.Thread(target=self._serve) 137 | t.daemon = True 138 | t.start() 139 | self._thread = t 140 | 141 | def _serve(self): 142 | if hasattr(signal, 'pthread_sigmask'): 143 | signal.pthread_sigmask(signal.SIG_BLOCK, range(1, signal.NSIG)) 144 | while 1: 145 | try: 146 | with self._listener.accept() as conn: 147 | msg = conn.recv() 148 | if msg is None: 149 | break 150 | key, destination_pid = msg 151 | send, close = self._cache.pop(key) 152 | try: 153 | send(conn, destination_pid) 154 | finally: 155 | close() 156 | except: 157 | if not util.is_exiting(): 158 | sys.excepthook(*sys.exc_info()) 159 | 160 | 161 | _resource_sharer = _ResourceSharer() 162 | stop = _resource_sharer.stop 163 | -------------------------------------------------------------------------------- /Doc/includes/mp_benchmarks.py: -------------------------------------------------------------------------------- 1 | # 2 | # Simple benchmarks for the multiprocessing package 3 | # 4 | # Copyright (c) 2006-2008, R Oudkerk 5 | # All rights reserved. 6 | # 7 | 8 | import time, sys, multiprocessing, threading, Queue, gc 9 | 10 | if sys.platform == 'win32': 11 | _timer = time.clock 12 | else: 13 | _timer = time.time 14 | 15 | delta = 1 16 | 17 | 18 | #### TEST_QUEUESPEED 19 | 20 | def queuespeed_func(q, c, iterations): 21 | a = '0' * 256 22 | c.acquire() 23 | c.notify() 24 | c.release() 25 | 26 | for i in xrange(iterations): 27 | q.put(a) 28 | 29 | q.put('STOP') 30 | 31 | def test_queuespeed(Process, q, c): 32 | elapsed = 0 33 | iterations = 1 34 | 35 | while elapsed < delta: 36 | iterations *= 2 37 | 38 | p = Process(target=queuespeed_func, args=(q, c, iterations)) 39 | c.acquire() 40 | p.start() 41 | c.wait() 42 | c.release() 43 | 44 | result = None 45 | t = _timer() 46 | 47 | while result != 'STOP': 48 | result = q.get() 49 | 50 | elapsed = _timer() - t 51 | 52 | p.join() 53 | 54 | print iterations, 'objects passed through the queue in', elapsed, 'seconds' 55 | print 'average number/sec:', iterations/elapsed 56 | 57 | 58 | #### TEST_PIPESPEED 59 | 60 | def pipe_func(c, cond, iterations): 61 | a = '0' * 256 62 | cond.acquire() 63 | cond.notify() 64 | cond.release() 65 | 66 | for i in xrange(iterations): 67 | c.send(a) 68 | 69 | c.send('STOP') 70 | 71 | def test_pipespeed(): 72 | c, d = multiprocessing.Pipe() 73 | cond = multiprocessing.Condition() 74 | elapsed = 0 75 | iterations = 1 76 | 77 | while elapsed < delta: 78 | iterations *= 2 79 | 80 | p = multiprocessing.Process(target=pipe_func, 81 | args=(d, cond, iterations)) 82 | cond.acquire() 83 | p.start() 84 | cond.wait() 85 | cond.release() 86 | 87 | result = None 88 | t = _timer() 89 | 90 | while result != 'STOP': 91 | result = c.recv() 92 | 93 | elapsed = _timer() - t 94 | p.join() 95 | 96 | print iterations, 'objects passed through connection in',elapsed,'seconds' 97 | print 'average number/sec:', iterations/elapsed 98 | 99 | 100 | #### TEST_SEQSPEED 101 | 102 | def test_seqspeed(seq): 103 | elapsed = 0 104 | iterations = 1 105 | 106 | while elapsed < delta: 107 | iterations *= 2 108 | 109 | t = _timer() 110 | 111 | for i in xrange(iterations): 112 | a = seq[5] 113 | 114 | elapsed = _timer()-t 115 | 116 | print iterations, 'iterations in', elapsed, 'seconds' 117 | print 'average number/sec:', iterations/elapsed 118 | 119 | 120 | #### TEST_LOCK 121 | 122 | def test_lockspeed(l): 123 | elapsed = 0 124 | iterations = 1 125 | 126 | while elapsed < delta: 127 | iterations *= 2 128 | 129 | t = _timer() 130 | 131 | for i in xrange(iterations): 132 | l.acquire() 133 | l.release() 134 | 135 | elapsed = _timer()-t 136 | 137 | print iterations, 'iterations in', elapsed, 'seconds' 138 | print 'average number/sec:', iterations/elapsed 139 | 140 | 141 | #### TEST_CONDITION 142 | 143 | def conditionspeed_func(c, N): 144 | c.acquire() 145 | c.notify() 146 | 147 | for i in xrange(N): 148 | c.wait() 149 | c.notify() 150 | 151 | c.release() 152 | 153 | def test_conditionspeed(Process, c): 154 | elapsed = 0 155 | iterations = 1 156 | 157 | while elapsed < delta: 158 | iterations *= 2 159 | 160 | c.acquire() 161 | p = Process(target=conditionspeed_func, args=(c, iterations)) 162 | p.start() 163 | 164 | c.wait() 165 | 166 | t = _timer() 167 | 168 | for i in xrange(iterations): 169 | c.notify() 170 | c.wait() 171 | 172 | elapsed = _timer()-t 173 | 174 | c.release() 175 | p.join() 176 | 177 | print iterations * 2, 'waits in', elapsed, 'seconds' 178 | print 'average number/sec:', iterations * 2 / elapsed 179 | 180 | #### 181 | 182 | def test(): 183 | manager = multiprocessing.Manager() 184 | 185 | gc.disable() 186 | 187 | print '\n\t######## testing Queue.Queue\n' 188 | test_queuespeed(threading.Thread, Queue.Queue(), 189 | threading.Condition()) 190 | print '\n\t######## testing multiprocessing.Queue\n' 191 | test_queuespeed(multiprocessing.Process, multiprocessing.Queue(), 192 | multiprocessing.Condition()) 193 | print '\n\t######## testing Queue managed by server process\n' 194 | test_queuespeed(multiprocessing.Process, manager.Queue(), 195 | manager.Condition()) 196 | print '\n\t######## testing multiprocessing.Pipe\n' 197 | test_pipespeed() 198 | 199 | print 200 | 201 | print '\n\t######## testing list\n' 202 | test_seqspeed(range(10)) 203 | print '\n\t######## testing list managed by server process\n' 204 | test_seqspeed(manager.list(range(10))) 205 | print '\n\t######## testing Array("i", ..., lock=False)\n' 206 | test_seqspeed(multiprocessing.Array('i', range(10), lock=False)) 207 | print '\n\t######## testing Array("i", ..., lock=True)\n' 208 | test_seqspeed(multiprocessing.Array('i', range(10), lock=True)) 209 | 210 | print 211 | 212 | print '\n\t######## testing threading.Lock\n' 213 | test_lockspeed(threading.Lock()) 214 | print '\n\t######## testing threading.RLock\n' 215 | test_lockspeed(threading.RLock()) 216 | print '\n\t######## testing multiprocessing.Lock\n' 217 | test_lockspeed(multiprocessing.Lock()) 218 | print '\n\t######## testing multiprocessing.RLock\n' 219 | test_lockspeed(multiprocessing.RLock()) 220 | print '\n\t######## testing lock managed by server process\n' 221 | test_lockspeed(manager.Lock()) 222 | print '\n\t######## testing rlock managed by server process\n' 223 | test_lockspeed(manager.RLock()) 224 | 225 | print 226 | 227 | print '\n\t######## testing threading.Condition\n' 228 | test_conditionspeed(threading.Thread, threading.Condition()) 229 | print '\n\t######## testing multiprocessing.Condition\n' 230 | test_conditionspeed(multiprocessing.Process, multiprocessing.Condition()) 231 | print '\n\t######## testing condition managed by a server process\n' 232 | test_conditionspeed(multiprocessing.Process, manager.Condition()) 233 | 234 | gc.enable() 235 | 236 | if __name__ == '__main__': 237 | multiprocessing.freeze_support() 238 | test() 239 | -------------------------------------------------------------------------------- /Doc/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # multiprocessing documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Nov 26 12:47:00 2008. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # The contents of this file are pickled, so don't put values in the namespace 9 | # that aren't pickleable (module imports are okay, they're removed automatically). 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os 15 | 16 | # If your extensions are in another directory, add it here. If the directory 17 | # is relative to the documentation root, use os.path.abspath to make it 18 | # absolute, like shown here. 19 | #sys.path.append(os.path.abspath('.')) 20 | 21 | # General configuration 22 | # --------------------- 23 | 24 | # Add any Sphinx extension module names here, as strings. They can be extensions 25 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 26 | extensions = ['sphinx.ext.autodoc'] 27 | 28 | # Add any paths that contain templates here, relative to this directory. 29 | templates_path = ['templates'] 30 | 31 | # The suffix of source filenames. 32 | source_suffix = '.rst' 33 | 34 | # The encoding of source files. 35 | #source_encoding = 'utf-8' 36 | 37 | # The master toctree document. 38 | master_doc = 'index' 39 | 40 | # General information about the project. 41 | project = u'multiprocessing' 42 | copyright = u'2008, Python Software Foundation' 43 | 44 | # The version info for the project you're documenting, acts as replacement for 45 | # |version| and |release|, also used in various other places throughout the 46 | # built documents. 47 | 48 | import os 49 | import sys 50 | sys.path.insert(0, os.path.join(os.getcwd(), os.pardir)) 51 | import billiard 52 | # 53 | # The short X.Y version. 54 | version = billiard.__version__ 55 | # The full version, including alpha/beta/rc tags. 56 | release = billiard.__version__ 57 | 58 | # The language for content autogenerated by Sphinx. Refer to documentation 59 | # for a list of supported languages. 60 | #language = None 61 | 62 | # There are two options for replacing |today|: either, you set today to some 63 | # non-false value, then it is used: 64 | #today = '' 65 | # Else, today_fmt is used as the format for a strftime call. 66 | #today_fmt = '%B %d, %Y' 67 | 68 | # List of documents that shouldn't be included in the build. 69 | #unused_docs = [] 70 | 71 | # List of directories, relative to source directory, that shouldn't be searched 72 | # for source files. 73 | exclude_trees = ['build'] 74 | 75 | # The reST default role (used for this markup: `text`) to use for all documents. 76 | #default_role = None 77 | 78 | # If true, '()' will be appended to :func: etc. cross-reference text. 79 | #add_function_parentheses = True 80 | 81 | # If true, the current module name will be prepended to all description 82 | # unit titles (such as .. function::). 83 | #add_module_names = True 84 | 85 | # If true, sectionauthor and moduleauthor directives will be shown in the 86 | # output. They are ignored by default. 87 | #show_authors = False 88 | 89 | # The name of the Pygments (syntax highlighting) style to use. 90 | pygments_style = 'sphinx' 91 | 92 | 93 | # Options for HTML output 94 | # ----------------------- 95 | 96 | # The style sheet to use for HTML and HTML Help pages. A file of that name 97 | # must exist either in Sphinx' static/ path, or in one of the custom paths 98 | # given in html_static_path. 99 | html_style = 'default.css' 100 | 101 | # The name for this set of Sphinx documents. If None, it defaults to 102 | # " v documentation". 103 | #html_title = None 104 | 105 | # A shorter title for the navigation bar. Default is the same as html_title. 106 | #html_short_title = None 107 | 108 | # The name of an image file (relative to this directory) to place at the top 109 | # of the sidebar. 110 | #html_logo = None 111 | 112 | # The name of an image file (within the static path) to use as favicon of the 113 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 114 | # pixels large. 115 | #html_favicon = None 116 | 117 | # Add any paths that contain custom static files (such as style sheets) here, 118 | # relative to this directory. They are copied after the builtin static files, 119 | # so a file named "default.css" will overwrite the builtin "default.css". 120 | html_static_path = ['static'] 121 | 122 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 123 | # using the given strftime format. 124 | #html_last_updated_fmt = '%b %d, %Y' 125 | 126 | # If true, SmartyPants will be used to convert quotes and dashes to 127 | # typographically correct entities. 128 | #html_use_smartypants = True 129 | 130 | # Custom sidebar templates, maps document names to template names. 131 | #html_sidebars = {} 132 | 133 | # Additional templates that should be rendered to pages, maps page names to 134 | # template names. 135 | #html_additional_pages = {} 136 | 137 | # If false, no module index is generated. 138 | #html_use_modindex = True 139 | 140 | # If false, no index is generated. 141 | #html_use_index = True 142 | 143 | # If true, the index is split into individual pages for each letter. 144 | #html_split_index = False 145 | 146 | # If true, the reST sources are included in the HTML build as _sources/. 147 | #html_copy_source = True 148 | 149 | # If true, an OpenSearch description file will be output, and all pages will 150 | # contain a tag referring to it. The value of this option must be the 151 | # base URL from which the finished HTML is served. 152 | #html_use_opensearch = '' 153 | 154 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 155 | #html_file_suffix = '' 156 | 157 | # Output file base name for HTML help builder. 158 | htmlhelp_basename = 'multiprocessingdoc' 159 | 160 | 161 | # Options for LaTeX output 162 | # ------------------------ 163 | 164 | # The paper size ('letter' or 'a4'). 165 | #latex_paper_size = 'letter' 166 | 167 | # The font size ('10pt', '11pt' or '12pt'). 168 | #latex_font_size = '10pt' 169 | 170 | # Grouping the document tree into LaTeX files. List of tuples 171 | # (source start file, target name, title, author, document class [howto/manual]). 172 | latex_documents = [ 173 | ('index', 'multiprocessing.tex', 'multiprocessing Documentation', 174 | 'Python Software Foundation', 'manual'), 175 | ] 176 | 177 | # The name of an image file (relative to this directory) to place at the top of 178 | # the title page. 179 | #latex_logo = None 180 | 181 | # For "manual" documents, if this is true, then toplevel headings are parts, 182 | # not chapters. 183 | #latex_use_parts = False 184 | 185 | # Additional stuff for the LaTeX preamble. 186 | #latex_preamble = '' 187 | 188 | # Documents to append as an appendix to all manuals. 189 | #latex_appendices = [] 190 | 191 | # If false, no module index is generated. 192 | #latex_use_modindex = True 193 | -------------------------------------------------------------------------------- /billiard/util.py: -------------------------------------------------------------------------------- 1 | # 2 | # Module providing various facilities to other parts of the package 3 | # 4 | # billiard/util.py 5 | # 6 | # Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt 7 | # Licensed to PSF under a Contributor Agreement. 8 | # 9 | 10 | import sys 11 | import errno 12 | import functools 13 | import atexit 14 | 15 | try: 16 | import cffi 17 | except ImportError: 18 | import ctypes 19 | 20 | try: 21 | from subprocess import _args_from_interpreter_flags # noqa 22 | except ImportError: # pragma: no cover 23 | def _args_from_interpreter_flags(): # noqa 24 | """Return a list of command-line arguments reproducing the current 25 | settings in sys.flags and sys.warnoptions.""" 26 | flag_opt_map = { 27 | 'debug': 'd', 28 | 'optimize': 'O', 29 | 'dont_write_bytecode': 'B', 30 | 'no_user_site': 's', 31 | 'no_site': 'S', 32 | 'ignore_environment': 'E', 33 | 'verbose': 'v', 34 | 'bytes_warning': 'b', 35 | 'hash_randomization': 'R', 36 | 'py3k_warning': '3', 37 | } 38 | args = [] 39 | for flag, opt in flag_opt_map.items(): 40 | v = getattr(sys.flags, flag) 41 | if v > 0: 42 | args.append('-' + opt * v) 43 | for opt in sys.warnoptions: 44 | args.append('-W' + opt) 45 | return args 46 | 47 | from multiprocessing.util import ( # noqa 48 | _afterfork_registry, 49 | _afterfork_counter, 50 | _exit_function, 51 | _finalizer_registry, 52 | _finalizer_counter, 53 | Finalize, 54 | ForkAwareLocal, 55 | ForkAwareThreadLock, 56 | get_temp_dir, 57 | is_exiting, 58 | register_after_fork, 59 | _run_after_forkers, 60 | _run_finalizers, 61 | ) 62 | 63 | from .compat import get_errno 64 | 65 | __all__ = [ 66 | 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', 67 | 'log_to_stderr', 'get_temp_dir', 'register_after_fork', 68 | 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', 69 | 'SUBDEBUG', 'SUBWARNING', 70 | ] 71 | 72 | 73 | # Constants from prctl.h 74 | PR_GET_PDEATHSIG = 2 75 | PR_SET_PDEATHSIG = 1 76 | 77 | # 78 | # Logging 79 | # 80 | 81 | NOTSET = 0 82 | SUBDEBUG = 5 83 | DEBUG = 10 84 | INFO = 20 85 | SUBWARNING = 25 86 | WARNING = 30 87 | ERROR = 40 88 | 89 | LOGGER_NAME = 'multiprocessing' 90 | DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' 91 | 92 | _logger = None 93 | _log_to_stderr = False 94 | 95 | 96 | def sub_debug(msg, *args, **kwargs): 97 | if _logger: 98 | _logger.log(SUBDEBUG, msg, *args, **kwargs) 99 | 100 | 101 | def debug(msg, *args, **kwargs): 102 | if _logger: 103 | _logger.log(DEBUG, msg, *args, **kwargs) 104 | 105 | 106 | def info(msg, *args, **kwargs): 107 | if _logger: 108 | _logger.log(INFO, msg, *args, **kwargs) 109 | 110 | 111 | def sub_warning(msg, *args, **kwargs): 112 | if _logger: 113 | _logger.log(SUBWARNING, msg, *args, **kwargs) 114 | 115 | def warning(msg, *args, **kwargs): 116 | if _logger: 117 | _logger.log(WARNING, msg, *args, **kwargs) 118 | 119 | def error(msg, *args, **kwargs): 120 | if _logger: 121 | _logger.log(ERROR, msg, *args, **kwargs) 122 | 123 | 124 | def get_logger(): 125 | ''' 126 | Returns logger used by multiprocessing 127 | ''' 128 | global _logger 129 | import logging 130 | 131 | try: 132 | # Python 3.13+ 133 | acquire, release = logging._prepareFork, logging._afterFork 134 | except AttributeError: 135 | acquire, release = logging._acquireLock, logging._releaseLock 136 | acquire() 137 | try: 138 | if not _logger: 139 | 140 | _logger = logging.getLogger(LOGGER_NAME) 141 | _logger.propagate = 0 142 | logging.addLevelName(SUBDEBUG, 'SUBDEBUG') 143 | logging.addLevelName(SUBWARNING, 'SUBWARNING') 144 | 145 | # XXX multiprocessing should cleanup before logging 146 | if hasattr(atexit, 'unregister'): 147 | atexit.unregister(_exit_function) 148 | atexit.register(_exit_function) 149 | else: 150 | atexit._exithandlers.remove((_exit_function, (), {})) 151 | atexit._exithandlers.append((_exit_function, (), {})) 152 | finally: 153 | release() 154 | 155 | return _logger 156 | 157 | 158 | def log_to_stderr(level=None): 159 | ''' 160 | Turn on logging and add a handler which prints to stderr 161 | ''' 162 | global _log_to_stderr 163 | import logging 164 | 165 | logger = get_logger() 166 | formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) 167 | handler = logging.StreamHandler() 168 | handler.setFormatter(formatter) 169 | logger.addHandler(handler) 170 | 171 | if level: 172 | logger.setLevel(level) 173 | _log_to_stderr = True 174 | return _logger 175 | 176 | 177 | def get_pdeathsig(): 178 | """ 179 | Return the current value of the parent process death signal 180 | """ 181 | if not sys.platform.startswith('linux'): 182 | # currently we support only linux platform. 183 | raise OSError() 184 | try: 185 | if 'cffi' in sys.modules: 186 | ffi = cffi.FFI() 187 | ffi.cdef("int prctl (int __option, ...);") 188 | arg = ffi.new("int *") 189 | C = ffi.dlopen(None) 190 | C.prctl(PR_GET_PDEATHSIG, arg) 191 | return arg[0] 192 | else: 193 | sig = ctypes.c_int() 194 | libc = ctypes.cdll.LoadLibrary("libc.so.6") 195 | libc.prctl(PR_GET_PDEATHSIG, ctypes.byref(sig)) 196 | return sig.value 197 | except Exception: 198 | raise OSError() 199 | 200 | 201 | def set_pdeathsig(sig): 202 | """ 203 | Set the parent process death signal of the calling process to sig 204 | (either a signal value in the range 1..maxsig, or 0 to clear). 205 | This is the signal that the calling process will get when its parent dies. 206 | This value is cleared for the child of a fork(2) and 207 | (since Linux 2.4.36 / 2.6.23) when executing a set-user-ID or set-group-ID binary. 208 | """ 209 | if not sys.platform.startswith('linux'): 210 | # currently we support only linux platform. 211 | raise OSError("pdeathsig is only supported on linux") 212 | try: 213 | if 'cffi' in sys.modules: 214 | ffi = cffi.FFI() 215 | ffi.cdef("int prctl (int __option, ...);") 216 | C = ffi.dlopen(None) 217 | C.prctl(PR_SET_PDEATHSIG, ffi.cast("int", sig)) 218 | else: 219 | libc = ctypes.cdll.LoadLibrary("libc.so.6") 220 | libc.prctl(PR_SET_PDEATHSIG, ctypes.c_int(sig)) 221 | except Exception as e: 222 | raise OSError("An error occurred while setting pdeathsig") from e 223 | 224 | def _eintr_retry(func): 225 | ''' 226 | Automatic retry after EINTR. 227 | ''' 228 | 229 | @functools.wraps(func) 230 | def wrapped(*args, **kwargs): 231 | while 1: 232 | try: 233 | return func(*args, **kwargs) 234 | except OSError as exc: 235 | if get_errno(exc) != errno.EINTR: 236 | raise 237 | return wrapped 238 | -------------------------------------------------------------------------------- /Doc/includes/mp_synchronize.py: -------------------------------------------------------------------------------- 1 | # 2 | # A test file for the `multiprocessing` package 3 | # 4 | # Copyright (c) 2006-2008, R Oudkerk 5 | # All rights reserved. 6 | # 7 | 8 | import time, sys, random 9 | from Queue import Empty 10 | 11 | import multiprocessing # may get overwritten 12 | 13 | 14 | #### TEST_VALUE 15 | 16 | def value_func(running, mutex): 17 | random.seed() 18 | time.sleep(random.random()*4) 19 | 20 | mutex.acquire() 21 | print '\n\t\t\t' + str(multiprocessing.current_process()) + ' has finished' 22 | running.value -= 1 23 | mutex.release() 24 | 25 | def test_value(): 26 | TASKS = 10 27 | running = multiprocessing.Value('i', TASKS) 28 | mutex = multiprocessing.Lock() 29 | 30 | for i in range(TASKS): 31 | p = multiprocessing.Process(target=value_func, args=(running, mutex)) 32 | p.start() 33 | 34 | while running.value > 0: 35 | time.sleep(0.08) 36 | mutex.acquire() 37 | print running.value, 38 | sys.stdout.flush() 39 | mutex.release() 40 | 41 | print 42 | print 'No more running processes' 43 | 44 | 45 | #### TEST_QUEUE 46 | 47 | def queue_func(queue): 48 | for i in range(30): 49 | time.sleep(0.5 * random.random()) 50 | queue.put(i*i) 51 | queue.put('STOP') 52 | 53 | def test_queue(): 54 | q = multiprocessing.Queue() 55 | 56 | p = multiprocessing.Process(target=queue_func, args=(q,)) 57 | p.start() 58 | 59 | o = None 60 | while o != 'STOP': 61 | try: 62 | o = q.get(timeout=0.3) 63 | print o, 64 | sys.stdout.flush() 65 | except Empty: 66 | print 'TIMEOUT' 67 | 68 | print 69 | 70 | 71 | #### TEST_CONDITION 72 | 73 | def condition_func(cond): 74 | cond.acquire() 75 | print '\t' + str(cond) 76 | time.sleep(2) 77 | print '\tchild is notifying' 78 | print '\t' + str(cond) 79 | cond.notify() 80 | cond.release() 81 | 82 | def test_condition(): 83 | cond = multiprocessing.Condition() 84 | 85 | p = multiprocessing.Process(target=condition_func, args=(cond,)) 86 | print cond 87 | 88 | cond.acquire() 89 | print cond 90 | cond.acquire() 91 | print cond 92 | 93 | p.start() 94 | 95 | print 'main is waiting' 96 | cond.wait() 97 | print 'main has woken up' 98 | 99 | print cond 100 | cond.release() 101 | print cond 102 | cond.release() 103 | 104 | p.join() 105 | print cond 106 | 107 | 108 | #### TEST_SEMAPHORE 109 | 110 | def semaphore_func(sema, mutex, running): 111 | sema.acquire() 112 | 113 | mutex.acquire() 114 | running.value += 1 115 | print running.value, 'tasks are running' 116 | mutex.release() 117 | 118 | random.seed() 119 | time.sleep(random.random()*2) 120 | 121 | mutex.acquire() 122 | running.value -= 1 123 | print '%s has finished' % multiprocessing.current_process() 124 | mutex.release() 125 | 126 | sema.release() 127 | 128 | def test_semaphore(): 129 | sema = multiprocessing.Semaphore(3) 130 | mutex = multiprocessing.RLock() 131 | running = multiprocessing.Value('i', 0) 132 | 133 | processes = [ 134 | multiprocessing.Process(target=semaphore_func, 135 | args=(sema, mutex, running)) 136 | for i in range(10) 137 | ] 138 | 139 | for p in processes: 140 | p.start() 141 | 142 | for p in processes: 143 | p.join() 144 | 145 | 146 | #### TEST_JOIN_TIMEOUT 147 | 148 | def join_timeout_func(): 149 | print '\tchild sleeping' 150 | time.sleep(5.5) 151 | print '\n\tchild terminating' 152 | 153 | def test_join_timeout(): 154 | p = multiprocessing.Process(target=join_timeout_func) 155 | p.start() 156 | 157 | print 'waiting for process to finish' 158 | 159 | while 1: 160 | p.join(timeout=1) 161 | if not p.is_alive(): 162 | break 163 | print '.', 164 | sys.stdout.flush() 165 | 166 | 167 | #### TEST_EVENT 168 | 169 | def event_func(event): 170 | print '\t%r is waiting' % multiprocessing.current_process() 171 | event.wait() 172 | print '\t%r has woken up' % multiprocessing.current_process() 173 | 174 | def test_event(): 175 | event = multiprocessing.Event() 176 | 177 | processes = [multiprocessing.Process(target=event_func, args=(event,)) 178 | for i in range(5)] 179 | 180 | for p in processes: 181 | p.start() 182 | 183 | print 'main is sleeping' 184 | time.sleep(2) 185 | 186 | print 'main is setting event' 187 | event.set() 188 | 189 | for p in processes: 190 | p.join() 191 | 192 | 193 | #### TEST_SHAREDVALUES 194 | 195 | def sharedvalues_func(values, arrays, shared_values, shared_arrays): 196 | for i in range(len(values)): 197 | v = values[i][1] 198 | sv = shared_values[i].value 199 | assert v == sv 200 | 201 | for i in range(len(values)): 202 | a = arrays[i][1] 203 | sa = list(shared_arrays[i][:]) 204 | assert a == sa 205 | 206 | print 'Tests passed' 207 | 208 | def test_sharedvalues(): 209 | values = [ 210 | ('i', 10), 211 | ('h', -2), 212 | ('d', 1.25) 213 | ] 214 | arrays = [ 215 | ('i', range(100)), 216 | ('d', [0.25 * i for i in range(100)]), 217 | ('H', range(1000)) 218 | ] 219 | 220 | shared_values = [multiprocessing.Value(id, v) for id, v in values] 221 | shared_arrays = [multiprocessing.Array(id, a) for id, a in arrays] 222 | 223 | p = multiprocessing.Process( 224 | target=sharedvalues_func, 225 | args=(values, arrays, shared_values, shared_arrays) 226 | ) 227 | p.start() 228 | p.join() 229 | 230 | assert p.exitcode == 0 231 | 232 | 233 | #### 234 | 235 | def test(namespace=multiprocessing): 236 | global multiprocessing 237 | 238 | multiprocessing = namespace 239 | 240 | for func in [ test_value, test_queue, test_condition, 241 | test_semaphore, test_join_timeout, test_event, 242 | test_sharedvalues ]: 243 | 244 | print '\n\t######## %s\n' % func.__name__ 245 | func() 246 | 247 | ignore = multiprocessing.active_children() # cleanup any old processes 248 | if hasattr(multiprocessing, '_debug_info'): 249 | info = multiprocessing._debug_info() 250 | if info: 251 | print info 252 | raise ValueError, 'there should be no positive refcounts left' 253 | 254 | 255 | if __name__ == '__main__': 256 | multiprocessing.freeze_support() 257 | 258 | assert len(sys.argv) in (1, 2) 259 | 260 | if len(sys.argv) == 1 or sys.argv[1] == 'processes': 261 | print ' Using processes '.center(79, '-') 262 | namespace = multiprocessing 263 | elif sys.argv[1] == 'manager': 264 | print ' Using processes and a manager '.center(79, '-') 265 | namespace = multiprocessing.Manager() 266 | namespace.Process = multiprocessing.Process 267 | namespace.current_process = multiprocessing.current_process 268 | namespace.active_children = multiprocessing.active_children 269 | elif sys.argv[1] == 'threads': 270 | print ' Using threads '.center(79, '-') 271 | import multiprocessing.dummy as namespace 272 | else: 273 | print 'Usage:\n\t%s [processes | manager | threads]' % sys.argv[0] 274 | raise SystemExit, 2 275 | 276 | test(namespace) 277 | -------------------------------------------------------------------------------- /billiard/sharedctypes.py: -------------------------------------------------------------------------------- 1 | # 2 | # Module which supports allocation of ctypes objects from shared memory 3 | # 4 | # multiprocessing/sharedctypes.py 5 | # 6 | # Copyright (c) 2006-2008, R Oudkerk 7 | # Licensed to PSF under a Contributor Agreement. 8 | # 9 | 10 | import ctypes 11 | import sys 12 | import weakref 13 | 14 | from . import heap 15 | from . import get_context 16 | from .context import assert_spawning 17 | from .reduction import ForkingPickler 18 | 19 | __all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] 20 | 21 | PY3 = sys.version_info[0] == 3 22 | 23 | typecode_to_type = { 24 | 'c': ctypes.c_char, 'u': ctypes.c_wchar, 25 | 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, 26 | 'h': ctypes.c_short, 'H': ctypes.c_ushort, 27 | 'i': ctypes.c_int, 'I': ctypes.c_uint, 28 | 'l': ctypes.c_long, 'L': ctypes.c_ulong, 29 | 'f': ctypes.c_float, 'd': ctypes.c_double 30 | } 31 | 32 | 33 | def _new_value(type_): 34 | size = ctypes.sizeof(type_) 35 | wrapper = heap.BufferWrapper(size) 36 | return rebuild_ctype(type_, wrapper, None) 37 | 38 | 39 | def RawValue(typecode_or_type, *args): 40 | ''' 41 | Returns a ctypes object allocated from shared memory 42 | ''' 43 | type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) 44 | obj = _new_value(type_) 45 | ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) 46 | obj.__init__(*args) 47 | return obj 48 | 49 | 50 | def RawArray(typecode_or_type, size_or_initializer): 51 | ''' 52 | Returns a ctypes array allocated from shared memory 53 | ''' 54 | type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) 55 | if isinstance(size_or_initializer, int): 56 | type_ = type_ * size_or_initializer 57 | obj = _new_value(type_) 58 | ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) 59 | return obj 60 | else: 61 | type_ = type_ * len(size_or_initializer) 62 | result = _new_value(type_) 63 | result.__init__(*size_or_initializer) 64 | return result 65 | 66 | 67 | def Value(typecode_or_type, *args, **kwds): 68 | ''' 69 | Return a synchronization wrapper for a Value 70 | ''' 71 | lock = kwds.pop('lock', None) 72 | ctx = kwds.pop('ctx', None) 73 | if kwds: 74 | raise ValueError( 75 | 'unrecognized keyword argument(s): %s' % list(kwds.keys())) 76 | obj = RawValue(typecode_or_type, *args) 77 | if lock is False: 78 | return obj 79 | if lock in (True, None): 80 | ctx = ctx or get_context() 81 | lock = ctx.RLock() 82 | if not hasattr(lock, 'acquire'): 83 | raise AttributeError("'%r' has no method 'acquire'" % lock) 84 | return synchronized(obj, lock, ctx=ctx) 85 | 86 | 87 | def Array(typecode_or_type, size_or_initializer, **kwds): 88 | ''' 89 | Return a synchronization wrapper for a RawArray 90 | ''' 91 | lock = kwds.pop('lock', None) 92 | ctx = kwds.pop('ctx', None) 93 | if kwds: 94 | raise ValueError( 95 | 'unrecognized keyword argument(s): %s' % list(kwds.keys())) 96 | obj = RawArray(typecode_or_type, size_or_initializer) 97 | if lock is False: 98 | return obj 99 | if lock in (True, None): 100 | ctx = ctx or get_context() 101 | lock = ctx.RLock() 102 | if not hasattr(lock, 'acquire'): 103 | raise AttributeError("'%r' has no method 'acquire'" % lock) 104 | return synchronized(obj, lock, ctx=ctx) 105 | 106 | 107 | def copy(obj): 108 | new_obj = _new_value(type(obj)) 109 | ctypes.pointer(new_obj)[0] = obj 110 | return new_obj 111 | 112 | 113 | def synchronized(obj, lock=None, ctx=None): 114 | assert not isinstance(obj, SynchronizedBase), 'object already synchronized' 115 | ctx = ctx or get_context() 116 | 117 | if isinstance(obj, ctypes._SimpleCData): 118 | return Synchronized(obj, lock, ctx) 119 | elif isinstance(obj, ctypes.Array): 120 | if obj._type_ is ctypes.c_char: 121 | return SynchronizedString(obj, lock, ctx) 122 | return SynchronizedArray(obj, lock, ctx) 123 | else: 124 | cls = type(obj) 125 | try: 126 | scls = class_cache[cls] 127 | except KeyError: 128 | names = [field[0] for field in cls._fields_] 129 | d = dict((name, make_property(name)) for name in names) 130 | classname = 'Synchronized' + cls.__name__ 131 | scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) 132 | return scls(obj, lock, ctx) 133 | 134 | # 135 | # Functions for pickling/unpickling 136 | # 137 | 138 | 139 | def reduce_ctype(obj): 140 | assert_spawning(obj) 141 | if isinstance(obj, ctypes.Array): 142 | return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) 143 | else: 144 | return rebuild_ctype, (type(obj), obj._wrapper, None) 145 | 146 | 147 | def rebuild_ctype(type_, wrapper, length): 148 | if length is not None: 149 | type_ = type_ * length 150 | ForkingPickler.register(type_, reduce_ctype) 151 | if PY3: 152 | buf = wrapper.create_memoryview() 153 | obj = type_.from_buffer(buf) 154 | else: 155 | obj = type_.from_address(wrapper.get_address()) 156 | obj._wrapper = wrapper 157 | return obj 158 | 159 | # 160 | # Function to create properties 161 | # 162 | 163 | 164 | def make_property(name): 165 | try: 166 | return prop_cache[name] 167 | except KeyError: 168 | d = {} 169 | exec(template % ((name, ) * 7), d) 170 | prop_cache[name] = d[name] 171 | return d[name] 172 | 173 | 174 | template = ''' 175 | def get%s(self): 176 | self.acquire() 177 | try: 178 | return self._obj.%s 179 | finally: 180 | self.release() 181 | def set%s(self, value): 182 | self.acquire() 183 | try: 184 | self._obj.%s = value 185 | finally: 186 | self.release() 187 | %s = property(get%s, set%s) 188 | ''' 189 | 190 | prop_cache = {} 191 | class_cache = weakref.WeakKeyDictionary() 192 | 193 | # 194 | # Synchronized wrappers 195 | # 196 | 197 | 198 | class SynchronizedBase: 199 | 200 | def __init__(self, obj, lock=None, ctx=None): 201 | self._obj = obj 202 | if lock: 203 | self._lock = lock 204 | else: 205 | ctx = ctx or get_context(force=True) 206 | self._lock = ctx.RLock() 207 | self.acquire = self._lock.acquire 208 | self.release = self._lock.release 209 | 210 | def __enter__(self): 211 | return self._lock.__enter__() 212 | 213 | def __exit__(self, *args): 214 | return self._lock.__exit__(*args) 215 | 216 | def __reduce__(self): 217 | assert_spawning(self) 218 | return synchronized, (self._obj, self._lock) 219 | 220 | def get_obj(self): 221 | return self._obj 222 | 223 | def get_lock(self): 224 | return self._lock 225 | 226 | def __repr__(self): 227 | return '<%s wrapper for %s>' % (type(self).__name__, self._obj) 228 | 229 | 230 | class Synchronized(SynchronizedBase): 231 | value = make_property('value') 232 | 233 | 234 | class SynchronizedArray(SynchronizedBase): 235 | 236 | def __len__(self): 237 | return len(self._obj) 238 | 239 | def __getitem__(self, i): 240 | with self: 241 | return self._obj[i] 242 | 243 | def __setitem__(self, i, value): 244 | with self: 245 | self._obj[i] = value 246 | 247 | def __getslice__(self, start, stop): 248 | with self: 249 | return self._obj[start:stop] 250 | 251 | def __setslice__(self, start, stop, values): 252 | with self: 253 | self._obj[start:stop] = values 254 | 255 | 256 | class SynchronizedString(SynchronizedArray): 257 | value = make_property('value') 258 | raw = make_property('raw') 259 | -------------------------------------------------------------------------------- /Doc/includes/mp_pool.py: -------------------------------------------------------------------------------- 1 | # 2 | # A test of `multiprocessing.Pool` class 3 | # 4 | # Copyright (c) 2006-2008, R Oudkerk 5 | # All rights reserved. 6 | # 7 | 8 | import multiprocessing 9 | import time 10 | import random 11 | import sys 12 | 13 | # 14 | # Functions used by test code 15 | # 16 | 17 | def calculate(func, args): 18 | result = func(*args) 19 | return '%s says that %s%s = %s' % ( 20 | multiprocessing.current_process().name, 21 | func.__name__, args, result 22 | ) 23 | 24 | def calculatestar(args): 25 | return calculate(*args) 26 | 27 | def mul(a, b): 28 | time.sleep(0.5*random.random()) 29 | return a * b 30 | 31 | def plus(a, b): 32 | time.sleep(0.5*random.random()) 33 | return a + b 34 | 35 | def f(x): 36 | return 1.0 / (x-5.0) 37 | 38 | def pow3(x): 39 | return x**3 40 | 41 | def noop(x): 42 | pass 43 | 44 | # 45 | # Test code 46 | # 47 | 48 | def test(): 49 | print 'cpu_count() = %d\n' % multiprocessing.cpu_count() 50 | 51 | # 52 | # Create pool 53 | # 54 | 55 | PROCESSES = 4 56 | print 'Creating pool with %d processes\n' % PROCESSES 57 | pool = multiprocessing.Pool(PROCESSES) 58 | print 'pool = %s' % pool 59 | print 60 | 61 | # 62 | # Tests 63 | # 64 | 65 | TASKS = [(mul, (i, 7)) for i in range(10)] + \ 66 | [(plus, (i, 8)) for i in range(10)] 67 | 68 | results = [pool.apply_async(calculate, t) for t in TASKS] 69 | imap_it = pool.imap(calculatestar, TASKS) 70 | imap_unordered_it = pool.imap_unordered(calculatestar, TASKS) 71 | 72 | print 'Ordered results using pool.apply_async():' 73 | for r in results: 74 | print '\t', r.get() 75 | print 76 | 77 | print 'Ordered results using pool.imap():' 78 | for x in imap_it: 79 | print '\t', x 80 | print 81 | 82 | print 'Unordered results using pool.imap_unordered():' 83 | for x in imap_unordered_it: 84 | print '\t', x 85 | print 86 | 87 | print 'Ordered results using pool.map() --- will block till complete:' 88 | for x in pool.map(calculatestar, TASKS): 89 | print '\t', x 90 | print 91 | 92 | # 93 | # Simple benchmarks 94 | # 95 | 96 | N = 100000 97 | print 'def pow3(x): return x**3' 98 | 99 | t = time.time() 100 | A = map(pow3, xrange(N)) 101 | print '\tmap(pow3, xrange(%d)):\n\t\t%s seconds' % \ 102 | (N, time.time() - t) 103 | 104 | t = time.time() 105 | B = pool.map(pow3, xrange(N)) 106 | print '\tpool.map(pow3, xrange(%d)):\n\t\t%s seconds' % \ 107 | (N, time.time() - t) 108 | 109 | t = time.time() 110 | C = list(pool.imap(pow3, xrange(N), chunksize=N//8)) 111 | print '\tlist(pool.imap(pow3, xrange(%d), chunksize=%d)):\n\t\t%s' \ 112 | ' seconds' % (N, N//8, time.time() - t) 113 | 114 | assert A == B == C, (len(A), len(B), len(C)) 115 | print 116 | 117 | L = [None] * 1000000 118 | print 'def noop(x): pass' 119 | print 'L = [None] * 1000000' 120 | 121 | t = time.time() 122 | A = map(noop, L) 123 | print '\tmap(noop, L):\n\t\t%s seconds' % \ 124 | (time.time() - t) 125 | 126 | t = time.time() 127 | B = pool.map(noop, L) 128 | print '\tpool.map(noop, L):\n\t\t%s seconds' % \ 129 | (time.time() - t) 130 | 131 | t = time.time() 132 | C = list(pool.imap(noop, L, chunksize=len(L)//8)) 133 | print '\tlist(pool.imap(noop, L, chunksize=%d)):\n\t\t%s seconds' % \ 134 | (len(L)//8, time.time() - t) 135 | 136 | assert A == B == C, (len(A), len(B), len(C)) 137 | print 138 | 139 | del A, B, C, L 140 | 141 | # 142 | # Test error handling 143 | # 144 | 145 | print 'Testing error handling:' 146 | 147 | try: 148 | print pool.apply(f, (5,)) 149 | except ZeroDivisionError: 150 | print '\tGot ZeroDivisionError as expected from pool.apply()' 151 | else: 152 | raise AssertionError, 'expected ZeroDivisionError' 153 | 154 | try: 155 | print pool.map(f, range(10)) 156 | except ZeroDivisionError: 157 | print '\tGot ZeroDivisionError as expected from pool.map()' 158 | else: 159 | raise AssertionError, 'expected ZeroDivisionError' 160 | 161 | try: 162 | print list(pool.imap(f, range(10))) 163 | except ZeroDivisionError: 164 | print '\tGot ZeroDivisionError as expected from list(pool.imap())' 165 | else: 166 | raise AssertionError, 'expected ZeroDivisionError' 167 | 168 | it = pool.imap(f, range(10)) 169 | for i in range(10): 170 | try: 171 | x = it.next() 172 | except ZeroDivisionError: 173 | if i == 5: 174 | pass 175 | except StopIteration: 176 | break 177 | else: 178 | if i == 5: 179 | raise AssertionError, 'expected ZeroDivisionError' 180 | 181 | assert i == 9 182 | print '\tGot ZeroDivisionError as expected from IMapIterator.next()' 183 | print 184 | 185 | # 186 | # Testing timeouts 187 | # 188 | 189 | print 'Testing ApplyResult.get() with timeout:', 190 | res = pool.apply_async(calculate, TASKS[0]) 191 | while 1: 192 | sys.stdout.flush() 193 | try: 194 | sys.stdout.write('\n\t%s' % res.get(0.02)) 195 | break 196 | except multiprocessing.TimeoutError: 197 | sys.stdout.write('.') 198 | print 199 | print 200 | 201 | print 'Testing IMapIterator.next() with timeout:', 202 | it = pool.imap(calculatestar, TASKS) 203 | while 1: 204 | sys.stdout.flush() 205 | try: 206 | sys.stdout.write('\n\t%s' % it.next(0.02)) 207 | except StopIteration: 208 | break 209 | except multiprocessing.TimeoutError: 210 | sys.stdout.write('.') 211 | print 212 | print 213 | 214 | # 215 | # Testing callback 216 | # 217 | 218 | print 'Testing callback:' 219 | 220 | A = [] 221 | B = [56, 0, 1, 8, 27, 64, 125, 216, 343, 512, 729] 222 | 223 | r = pool.apply_async(mul, (7, 8), callback=A.append) 224 | r.wait() 225 | 226 | r = pool.map_async(pow3, range(10), callback=A.extend) 227 | r.wait() 228 | 229 | if A == B: 230 | print '\tcallbacks succeeded\n' 231 | else: 232 | print '\t*** callbacks failed\n\t\t%s != %s\n' % (A, B) 233 | 234 | # 235 | # Check there are no outstanding tasks 236 | # 237 | 238 | assert not pool._cache, 'cache = %r' % pool._cache 239 | 240 | # 241 | # Check close() methods 242 | # 243 | 244 | print 'Testing close():' 245 | 246 | for worker in pool._pool: 247 | assert worker.is_alive() 248 | 249 | result = pool.apply_async(time.sleep, [0.5]) 250 | pool.close() 251 | pool.join() 252 | 253 | assert result.get() is None 254 | 255 | for worker in pool._pool: 256 | assert not worker.is_alive() 257 | 258 | print '\tclose() succeeded\n' 259 | 260 | # 261 | # Check terminate() method 262 | # 263 | 264 | print 'Testing terminate():' 265 | 266 | pool = multiprocessing.Pool(2) 267 | DELTA = 0.1 268 | ignore = pool.apply(pow3, [2]) 269 | results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)] 270 | pool.terminate() 271 | pool.join() 272 | 273 | for worker in pool._pool: 274 | assert not worker.is_alive() 275 | 276 | print '\tterminate() succeeded\n' 277 | 278 | # 279 | # Check garbage collection 280 | # 281 | 282 | print 'Testing garbage collection:' 283 | 284 | pool = multiprocessing.Pool(2) 285 | DELTA = 0.1 286 | processes = pool._pool 287 | ignore = pool.apply(pow3, [2]) 288 | results = [pool.apply_async(time.sleep, [DELTA]) for i in range(100)] 289 | 290 | results = pool = None 291 | 292 | time.sleep(DELTA * 2) 293 | 294 | for worker in processes: 295 | assert not worker.is_alive() 296 | 297 | print '\tgarbage collection succeeded\n' 298 | 299 | 300 | if __name__ == '__main__': 301 | multiprocessing.freeze_support() 302 | 303 | assert len(sys.argv) in (1, 2) 304 | 305 | if len(sys.argv) == 1 or sys.argv[1] == 'processes': 306 | print ' Using processes '.center(79, '-') 307 | elif sys.argv[1] == 'threads': 308 | print ' Using threads '.center(79, '-') 309 | import multiprocessing.dummy as multiprocessing 310 | else: 311 | print 'Usage:\n\t%s [processes | threads]' % sys.argv[0] 312 | raise SystemExit(2) 313 | 314 | test() 315 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import glob 4 | 5 | import setuptools 6 | 7 | from distutils import sysconfig 8 | from distutils.errors import ( 9 | CCompilerError, 10 | DistutilsExecError, 11 | DistutilsPlatformError 12 | ) 13 | HERE = os.path.dirname(os.path.abspath(__file__)) 14 | 15 | ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) 16 | if sys.platform == 'win32': 17 | # distutils.msvc9compiler can raise IOError if the compiler is missing 18 | ext_errors += (IOError, ) 19 | 20 | is_pypy = hasattr(sys, 'pypy_version_info') 21 | is_py3k = sys.version_info[0] == 3 22 | 23 | BUILD_WARNING = """ 24 | 25 | ----------------------------------------------------------------------- 26 | WARNING: The C extensions could not be compiled 27 | ----------------------------------------------------------------------- 28 | 29 | Maybe you do not have a C compiler installed on this system? 30 | The reason was: 31 | %s 32 | 33 | This is just a warning as most of the functionality will work even 34 | without the updated C extension. It will simply fallback to the 35 | built-in _multiprocessing module. Most notably you will not be able to use 36 | FORCE_EXECV on POSIX systems. If this is a problem for you then please 37 | install a C compiler or fix the error(s) above. 38 | ----------------------------------------------------------------------- 39 | 40 | """ 41 | 42 | # -*- py3k -*- 43 | extras = {} 44 | 45 | # -*- Distribution Meta -*- 46 | 47 | import re 48 | re_meta = re.compile(r'__(\w+?)__\s*=\s*(.*)') 49 | re_vers = re.compile(r'VERSION\s*=\s*\((.*?)\)') 50 | re_doc = re.compile(r'^"""(.+?)"""') 51 | rq = lambda s: s.strip("\"'") 52 | 53 | 54 | def add_default(m): 55 | attr_name, attr_value = m.groups() 56 | return ((attr_name, rq(attr_value)), ) 57 | 58 | 59 | def add_version(m): 60 | v = list(map(rq, m.groups()[0].split(', '))) 61 | return (('VERSION', '.'.join(v[0:4]) + ''.join(v[4:])), ) 62 | 63 | 64 | def add_doc(m): 65 | return (('doc', m.groups()[0]), ) 66 | 67 | pats = {re_meta: add_default, 68 | re_vers: add_version, 69 | re_doc: add_doc} 70 | here = os.path.abspath(os.path.dirname(__file__)) 71 | meta_fh = open(os.path.join(here, 'billiard/__init__.py')) 72 | try: 73 | meta = {} 74 | for line in meta_fh: 75 | if line.strip() == '# -eof meta-': 76 | break 77 | for pattern, handler in pats.items(): 78 | m = pattern.match(line.strip()) 79 | if m: 80 | meta.update(handler(m)) 81 | finally: 82 | meta_fh.close() 83 | 84 | 85 | if sys.version_info < (3, 7): 86 | raise ValueError('Versions of Python before 3.7 are not supported') 87 | 88 | if sys.platform == 'win32': # Windows 89 | macros = dict() 90 | libraries = ['ws2_32'] 91 | elif sys.platform.startswith('darwin'): # macOS 92 | macros = dict( 93 | HAVE_SEM_OPEN=1, 94 | HAVE_SEM_TIMEDWAIT=0, 95 | HAVE_FD_TRANSFER=1, 96 | HAVE_BROKEN_SEM_GETVALUE=1 97 | ) 98 | libraries = [] 99 | elif sys.platform.startswith('cygwin'): # Cygwin 100 | macros = dict( 101 | HAVE_SEM_OPEN=1, 102 | HAVE_SEM_TIMEDWAIT=1, 103 | HAVE_FD_TRANSFER=0, 104 | HAVE_BROKEN_SEM_UNLINK=1 105 | ) 106 | libraries = [] 107 | elif sys.platform in ('freebsd4', 'freebsd5', 'freebsd6'): 108 | # FreeBSD's P1003.1b semaphore support is very experimental 109 | # and has many known problems. (as of June 2008) 110 | macros = dict( # FreeBSD 4-6 111 | HAVE_SEM_OPEN=0, 112 | HAVE_SEM_TIMEDWAIT=0, 113 | HAVE_FD_TRANSFER=1, 114 | ) 115 | libraries = [] 116 | elif re.match('^(gnukfreebsd(8|9|10|11)|freebsd(7|8|9|0))', sys.platform): 117 | macros = dict( # FreeBSD 7+ and GNU/kFreeBSD 8+ 118 | HAVE_SEM_OPEN=bool( 119 | sysconfig.get_config_var('HAVE_SEM_OPEN') and not 120 | bool(sysconfig.get_config_var('POSIX_SEMAPHORES_NOT_ENABLED')) 121 | ), 122 | HAVE_SEM_TIMEDWAIT=1, 123 | HAVE_FD_TRANSFER=1, 124 | ) 125 | libraries = [] 126 | elif sys.platform.startswith('openbsd'): 127 | macros = dict( # OpenBSD 128 | HAVE_SEM_OPEN=0, # Not implemented 129 | HAVE_SEM_TIMEDWAIT=0, 130 | HAVE_FD_TRANSFER=1, 131 | ) 132 | libraries = [] 133 | else: # Linux and other unices 134 | macros = dict( 135 | HAVE_SEM_OPEN=1, 136 | HAVE_SEM_TIMEDWAIT=1, 137 | HAVE_FD_TRANSFER=1, 138 | ) 139 | libraries = ['rt'] 140 | 141 | if sys.platform == 'win32': 142 | multiprocessing_srcs = [ 143 | 'Modules/_billiard/multiprocessing.c', 144 | 'Modules/_billiard/semaphore.c', 145 | 'Modules/_billiard/win32_functions.c', 146 | ] 147 | else: 148 | multiprocessing_srcs = [ 149 | 'Modules/_billiard/multiprocessing.c', 150 | ] 151 | 152 | if macros.get('HAVE_SEM_OPEN', False): 153 | multiprocessing_srcs.append('Modules/_billiard/semaphore.c') 154 | 155 | long_description = open(os.path.join(HERE, 'README.rst')).read() 156 | 157 | # -*- Installation Requires -*- 158 | 159 | py_version = sys.version_info 160 | is_pypy = hasattr(sys, 'pypy_version_info') 161 | 162 | 163 | def _is_build_command(argv=sys.argv, cmds=('install', 'build', 'bdist')): 164 | for arg in argv: 165 | if arg.startswith(cmds): 166 | return arg 167 | 168 | 169 | def run_setup(with_extensions=True): 170 | extensions = [] 171 | if with_extensions: 172 | extensions = [ 173 | setuptools.Extension( 174 | '_billiard', 175 | sources=multiprocessing_srcs, 176 | define_macros=macros.items(), 177 | libraries=libraries, 178 | include_dirs=['Modules/_billiard'], 179 | depends=glob.glob('Modules/_billiard/*.h') + ['setup.py'], 180 | ), 181 | ] 182 | if sys.platform == 'win32': 183 | extensions.append( 184 | setuptools.Extension( 185 | '_winapi', 186 | sources=multiprocessing_srcs, 187 | define_macros=macros.items(), 188 | libraries=libraries, 189 | include_dirs=['Modules/_billiard'], 190 | depends=glob.glob('Modules/_billiard/*.h') + ['setup.py'], 191 | ), 192 | ) 193 | packages = setuptools.find_packages(exclude=['ez_setup', 't', 't.*']) 194 | setuptools.setup( 195 | name='billiard', 196 | version=meta['VERSION'], 197 | description=meta['doc'], 198 | long_description=long_description, 199 | packages=packages, 200 | ext_modules=extensions, 201 | author=meta['author'], 202 | author_email=meta['author_email'], 203 | keywords='multiprocessing pool process', 204 | maintainer=meta['maintainer'], 205 | maintainer_email=meta['contact'], 206 | url=meta['homepage'], 207 | zip_safe=False, 208 | license='BSD', 209 | python_requires='>=3.7', 210 | classifiers=[ 211 | 'Development Status :: 5 - Production/Stable', 212 | 'Intended Audience :: Developers', 213 | 'Programming Language :: Python', 214 | 'Programming Language :: C', 215 | 'Programming Language :: Python :: 3', 216 | 'Programming Language :: Python :: 3.12', 217 | 'Programming Language :: Python :: 3.8', 218 | 'Programming Language :: Python :: 3.9', 219 | 'Programming Language :: Python :: 3.10', 220 | 'Programming Language :: Python :: 3.11', 221 | 'Programming Language :: Python :: 3.13', 222 | 'Programming Language :: Python :: Implementation :: CPython', 223 | 'Programming Language :: Python :: Implementation :: PyPy', 224 | 'Operating System :: Microsoft :: Windows', 225 | 'Operating System :: POSIX', 226 | 'License :: OSI Approved :: BSD License', 227 | 'Topic :: Software Development :: Libraries :: Python Modules', 228 | 'Topic :: System :: Distributed Computing', 229 | ], 230 | **extras 231 | ) 232 | 233 | try: 234 | run_setup(not (is_pypy or is_py3k)) 235 | except BaseException: 236 | if _is_build_command(sys.argv): 237 | import traceback 238 | print(BUILD_WARNING % '\n'.join(traceback.format_stack()), 239 | file=sys.stderr) 240 | run_setup(False) 241 | else: 242 | raise 243 | -------------------------------------------------------------------------------- /billiard/compat.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import numbers 3 | import os 4 | import subprocess 5 | import sys 6 | 7 | from itertools import zip_longest 8 | 9 | if sys.platform == 'win32': 10 | try: 11 | import _winapi # noqa 12 | except ImportError: # pragma: no cover 13 | from _multiprocessing import win32 as _winapi # noqa 14 | else: 15 | _winapi = None # noqa 16 | 17 | try: 18 | import resource 19 | except ImportError: # pragma: no cover 20 | resource = None 21 | 22 | from io import UnsupportedOperation 23 | FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation) 24 | 25 | 26 | if hasattr(os, 'write'): 27 | __write__ = os.write 28 | 29 | def send_offset(fd, buf, offset): 30 | return __write__(fd, buf[offset:]) 31 | 32 | else: # non-posix platform 33 | 34 | def send_offset(fd, buf, offset): # noqa 35 | raise NotImplementedError('send_offset') 36 | 37 | 38 | try: 39 | fsencode = os.fsencode 40 | fsdecode = os.fsdecode 41 | except AttributeError: 42 | def _fscodec(): 43 | encoding = sys.getfilesystemencoding() 44 | if encoding == 'mbcs': 45 | errors = 'strict' 46 | else: 47 | errors = 'surrogateescape' 48 | 49 | def fsencode(filename): 50 | """ 51 | Encode filename to the filesystem encoding with 'surrogateescape' 52 | error handler, return bytes unchanged. On Windows, use 'strict' 53 | error handler if the file system encoding is 'mbcs' (which is the 54 | default encoding). 55 | """ 56 | if isinstance(filename, bytes): 57 | return filename 58 | elif isinstance(filename, str): 59 | return filename.encode(encoding, errors) 60 | else: 61 | raise TypeError("expect bytes or str, not %s" 62 | % type(filename).__name__) 63 | 64 | def fsdecode(filename): 65 | """ 66 | Decode filename from the filesystem encoding with 'surrogateescape' 67 | error handler, return str unchanged. On Windows, use 'strict' error 68 | handler if the file system encoding is 'mbcs' (which is the default 69 | encoding). 70 | """ 71 | if isinstance(filename, str): 72 | return filename 73 | elif isinstance(filename, bytes): 74 | return filename.decode(encoding, errors) 75 | else: 76 | raise TypeError("expect bytes or str, not %s" 77 | % type(filename).__name__) 78 | 79 | return fsencode, fsdecode 80 | 81 | fsencode, fsdecode = _fscodec() 82 | del _fscodec 83 | 84 | 85 | def maybe_fileno(f): 86 | """Get object fileno, or :const:`None` if not defined.""" 87 | if isinstance(f, numbers.Integral): 88 | return f 89 | try: 90 | return f.fileno() 91 | except FILENO_ERRORS: 92 | pass 93 | 94 | 95 | def get_fdmax(default=None): 96 | """Return the maximum number of open file descriptors 97 | on this system. 98 | 99 | :keyword default: Value returned if there's no file 100 | descriptor limit. 101 | 102 | """ 103 | try: 104 | return os.sysconf('SC_OPEN_MAX') 105 | except: 106 | pass 107 | if resource is None: # Windows 108 | return default 109 | fdmax = resource.getrlimit(resource.RLIMIT_NOFILE)[1] 110 | if fdmax == resource.RLIM_INFINITY: 111 | return default 112 | return fdmax 113 | 114 | 115 | def uniq(it): 116 | """Return all unique elements in ``it``, preserving order.""" 117 | seen = set() 118 | return (seen.add(obj) or obj for obj in it if obj not in seen) 119 | 120 | 121 | try: 122 | closerange = os.closerange 123 | except AttributeError: 124 | 125 | def closerange(fd_low, fd_high): # noqa 126 | for fd in reversed(range(fd_low, fd_high)): 127 | try: 128 | os.close(fd) 129 | except OSError as exc: 130 | if exc.errno != errno.EBADF: 131 | raise 132 | 133 | def close_open_fds(keep=None): 134 | # must make sure this is 0-inclusive (Issue #celery/1882) 135 | keep = list(uniq(sorted( 136 | f for f in map(maybe_fileno, keep or []) if f is not None 137 | ))) 138 | maxfd = get_fdmax(default=2048) 139 | kL, kH = iter([-1] + keep), iter(keep + [maxfd]) 140 | for low, high in zip_longest(kL, kH): 141 | if low + 1 != high: 142 | closerange(low + 1, high) 143 | else: 144 | def close_open_fds(keep=None): # noqa 145 | keep = [maybe_fileno(f) 146 | for f in (keep or []) if maybe_fileno(f) is not None] 147 | for fd in reversed(range(get_fdmax(default=2048))): 148 | if fd not in keep: 149 | try: 150 | os.close(fd) 151 | except OSError as exc: 152 | if exc.errno != errno.EBADF: 153 | raise 154 | 155 | 156 | def get_errno(exc): 157 | """:exc:`socket.error` and :exc:`IOError` first got 158 | the ``.errno`` attribute in Py2.7""" 159 | try: 160 | return exc.errno 161 | except AttributeError: 162 | return 0 163 | 164 | 165 | try: 166 | import _posixsubprocess 167 | except ImportError: 168 | def spawnv_passfds(path, args, passfds): 169 | if sys.platform != 'win32': 170 | # when not using _posixsubprocess (on earlier python) and not on 171 | # windows, we want to keep stdout/stderr open... 172 | passfds = passfds + [ 173 | maybe_fileno(sys.stdout), 174 | maybe_fileno(sys.stderr), 175 | ] 176 | pid = os.fork() 177 | if not pid: 178 | close_open_fds(keep=sorted(f for f in passfds if f)) 179 | os.execv(fsencode(path), args) 180 | return pid 181 | else: 182 | def spawnv_passfds(path, args, passfds): 183 | passfds = sorted(passfds) 184 | errpipe_read, errpipe_write = os.pipe() 185 | try: 186 | args = [ 187 | args, [fsencode(path)], True, tuple(passfds), None, None, 188 | -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, 189 | False, False] 190 | if sys.version_info >= (3, 11): 191 | args.append(-1) # process_group 192 | if sys.version_info >= (3, 9): 193 | args.extend((None, None, None, -1)) # group, extra_groups, user, umask 194 | args.append(None) # preexec_fn 195 | if (3, 11) <= sys.version_info < (3, 14): 196 | args.append(subprocess._USE_VFORK) 197 | return _posixsubprocess.fork_exec(*args) 198 | finally: 199 | os.close(errpipe_read) 200 | os.close(errpipe_write) 201 | 202 | 203 | if sys.platform == 'win32': 204 | 205 | def setblocking(handle, blocking): 206 | raise NotImplementedError('setblocking not implemented on win32') 207 | 208 | def isblocking(handle): 209 | raise NotImplementedError('isblocking not implemented on win32') 210 | 211 | else: 212 | from os import O_NONBLOCK 213 | from fcntl import fcntl, F_GETFL, F_SETFL 214 | 215 | def isblocking(handle): # noqa 216 | return not (fcntl(handle, F_GETFL) & O_NONBLOCK) 217 | 218 | def setblocking(handle, blocking): # noqa 219 | flags = fcntl(handle, F_GETFL, 0) 220 | fcntl( 221 | handle, F_SETFL, 222 | flags & (~O_NONBLOCK) if blocking else flags | O_NONBLOCK, 223 | ) 224 | 225 | 226 | E_PSUTIL_MISSING = """ 227 | On Windows, the ability to inspect memory usage requires the psutil library. 228 | 229 | You can install it using pip: 230 | 231 | $ pip install psutil 232 | """ 233 | 234 | 235 | E_RESOURCE_MISSING = """ 236 | Your platform ({0}) does not seem to have the `resource.getrusage' function. 237 | 238 | Please open an issue so that we can add support for this platform. 239 | """ 240 | 241 | 242 | if sys.platform == 'win32': 243 | 244 | try: 245 | import psutil 246 | except ImportError: # pragma: no cover 247 | psutil = None # noqa 248 | 249 | def mem_rss(): 250 | # type () -> int 251 | if psutil is None: 252 | raise ImportError(E_PSUTIL_MISSING.strip()) 253 | return int(psutil.Process(os.getpid()).memory_info()[0] / 1024.0) 254 | 255 | else: 256 | try: 257 | from resource import getrusage, RUSAGE_SELF 258 | except ImportError: # pragma: no cover 259 | getrusage = RUSAGE_SELF = None # noqa 260 | 261 | if 'bsd' in sys.platform or sys.platform == 'darwin': 262 | # On BSD platforms :man:`getrusage(2)` ru_maxrss field is in bytes. 263 | 264 | def maxrss_to_kb(v): 265 | # type: (SupportsInt) -> int 266 | return int(v) / 1024.0 267 | 268 | else: 269 | # On Linux it's kilobytes. 270 | 271 | def maxrss_to_kb(v): 272 | # type: (SupportsInt) -> int 273 | return int(v) 274 | 275 | def mem_rss(): 276 | # type () -> int 277 | if resource is None: 278 | raise ImportError(E_RESOURCE_MISSING.strip().format(sys.platform)) 279 | return maxrss_to_kb(getrusage(RUSAGE_SELF).ru_maxrss) 280 | -------------------------------------------------------------------------------- /billiard/forkserver.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import os 3 | import selectors 4 | import signal 5 | import socket 6 | import struct 7 | import sys 8 | import threading 9 | 10 | from . import connection 11 | from . import process 12 | from . import reduction 13 | from . import semaphore_tracker 14 | from . import spawn 15 | from . import util 16 | 17 | from .compat import spawnv_passfds 18 | 19 | __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process', 20 | 'set_forkserver_preload'] 21 | 22 | # 23 | # 24 | # 25 | 26 | MAXFDS_TO_SEND = 256 27 | UNSIGNED_STRUCT = struct.Struct('Q') # large enough for pid_t 28 | 29 | # 30 | # Forkserver class 31 | # 32 | 33 | 34 | class ForkServer: 35 | 36 | def __init__(self): 37 | self._forkserver_address = None 38 | self._forkserver_alive_fd = None 39 | self._inherited_fds = None 40 | self._lock = threading.Lock() 41 | self._preload_modules = ['__main__'] 42 | 43 | def set_forkserver_preload(self, modules_names): 44 | '''Set list of module names to try to load in forkserver process.''' 45 | if not all(type(mod) is str for mod in self._preload_modules): 46 | raise TypeError('module_names must be a list of strings') 47 | self._preload_modules = modules_names 48 | 49 | def get_inherited_fds(self): 50 | '''Return list of fds inherited from parent process. 51 | 52 | This returns None if the current process was not started by fork 53 | server. 54 | ''' 55 | return self._inherited_fds 56 | 57 | def connect_to_new_process(self, fds): 58 | '''Request forkserver to create a child process. 59 | 60 | Returns a pair of fds (status_r, data_w). The calling process can read 61 | the child process's pid and (eventually) its returncode from status_r. 62 | The calling process should write to data_w the pickled preparation and 63 | process data. 64 | ''' 65 | self.ensure_running() 66 | if len(fds) + 4 >= MAXFDS_TO_SEND: 67 | raise ValueError('too many fds') 68 | with socket.socket(socket.AF_UNIX) as client: 69 | client.connect(self._forkserver_address) 70 | parent_r, child_w = os.pipe() 71 | child_r, parent_w = os.pipe() 72 | allfds = [child_r, child_w, self._forkserver_alive_fd, 73 | semaphore_tracker.getfd()] 74 | allfds += fds 75 | try: 76 | reduction.sendfds(client, allfds) 77 | return parent_r, parent_w 78 | except: 79 | os.close(parent_r) 80 | os.close(parent_w) 81 | raise 82 | finally: 83 | os.close(child_r) 84 | os.close(child_w) 85 | 86 | def ensure_running(self): 87 | '''Make sure that a fork server is running. 88 | 89 | This can be called from any process. Note that usually a child 90 | process will just reuse the forkserver started by its parent, so 91 | ensure_running() will do nothing. 92 | ''' 93 | with self._lock: 94 | semaphore_tracker.ensure_running() 95 | if self._forkserver_alive_fd is not None: 96 | return 97 | 98 | cmd = ('from billiard.forkserver import main; ' + 99 | 'main(%d, %d, %r, **%r)') 100 | 101 | if self._preload_modules: 102 | desired_keys = {'main_path', 'sys_path'} 103 | data = spawn.get_preparation_data('ignore') 104 | data = { 105 | x: y for (x, y) in data.items() if x in desired_keys 106 | } 107 | else: 108 | data = {} 109 | 110 | with socket.socket(socket.AF_UNIX) as listener: 111 | address = connection.arbitrary_address('AF_UNIX') 112 | listener.bind(address) 113 | os.chmod(address, 0o600) 114 | listener.listen() 115 | 116 | # all client processes own the write end of the "alive" pipe; 117 | # when they all terminate the read end becomes ready. 118 | alive_r, alive_w = os.pipe() 119 | try: 120 | fds_to_pass = [listener.fileno(), alive_r] 121 | cmd %= (listener.fileno(), alive_r, self._preload_modules, 122 | data) 123 | exe = spawn.get_executable() 124 | args = [exe] + util._args_from_interpreter_flags() 125 | args += ['-c', cmd] 126 | spawnv_passfds(exe, args, fds_to_pass) 127 | except: 128 | os.close(alive_w) 129 | raise 130 | finally: 131 | os.close(alive_r) 132 | self._forkserver_address = address 133 | self._forkserver_alive_fd = alive_w 134 | 135 | # 136 | # 137 | # 138 | 139 | 140 | def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): 141 | '''Run forkserver.''' 142 | if preload: 143 | if '__main__' in preload and main_path is not None: 144 | process.current_process()._inheriting = True 145 | try: 146 | spawn.import_main_path(main_path) 147 | finally: 148 | del process.current_process()._inheriting 149 | for modname in preload: 150 | try: 151 | __import__(modname) 152 | except ImportError: 153 | pass 154 | 155 | # close sys.stdin 156 | if sys.stdin is not None: 157 | try: 158 | sys.stdin.close() 159 | sys.stdin = open(os.devnull) 160 | except (OSError, ValueError): 161 | pass 162 | 163 | # ignoring SIGCHLD means no need to reap zombie processes 164 | handler = signal.signal(signal.SIGCHLD, signal.SIG_IGN) 165 | with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ 166 | selectors.DefaultSelector() as selector: 167 | _forkserver._forkserver_address = listener.getsockname() 168 | selector.register(listener, selectors.EVENT_READ) 169 | selector.register(alive_r, selectors.EVENT_READ) 170 | 171 | while True: 172 | try: 173 | while True: 174 | rfds = [key.fileobj for (key, events) in selector.select()] 175 | if rfds: 176 | break 177 | 178 | if alive_r in rfds: 179 | # EOF because no more client processes left 180 | assert os.read(alive_r, 1) == b'' 181 | raise SystemExit 182 | 183 | assert listener in rfds 184 | with listener.accept()[0] as s: 185 | code = 1 186 | if os.fork() == 0: 187 | try: 188 | _serve_one(s, listener, alive_r, handler) 189 | except Exception: 190 | sys.excepthook(*sys.exc_info()) 191 | sys.stderr.flush() 192 | finally: 193 | os._exit(code) 194 | except OSError as e: 195 | if e.errno != errno.ECONNABORTED: 196 | raise 197 | 198 | 199 | def __unpack_fds(child_r, child_w, alive, stfd, *inherited): 200 | return child_r, child_w, alive, stfd, inherited 201 | 202 | 203 | def _serve_one(s, listener, alive_r, handler): 204 | # close unnecessary stuff and reset SIGCHLD handler 205 | listener.close() 206 | os.close(alive_r) 207 | signal.signal(signal.SIGCHLD, handler) 208 | 209 | # receive fds from parent process 210 | fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) 211 | s.close() 212 | assert len(fds) <= MAXFDS_TO_SEND 213 | 214 | (child_r, child_w, _forkserver._forkserver_alive_fd, 215 | stfd, _forkserver._inherited_fds) = __unpack_fds(*fds) 216 | semaphore_tracker._semaphore_tracker._fd = stfd 217 | 218 | # send pid to client processes 219 | write_unsigned(child_w, os.getpid()) 220 | 221 | # reseed random number generator 222 | if 'random' in sys.modules: 223 | import random 224 | random.seed() 225 | 226 | # run process object received over pipe 227 | code = spawn._main(child_r) 228 | 229 | # write the exit code to the pipe 230 | write_unsigned(child_w, code) 231 | 232 | # 233 | # Read and write unsigned numbers 234 | # 235 | 236 | 237 | def read_unsigned(fd): 238 | data = b'' 239 | length = UNSIGNED_STRUCT.size 240 | while len(data) < length: 241 | s = os.read(fd, length - len(data)) 242 | if not s: 243 | raise EOFError('unexpected EOF') 244 | data += s 245 | return UNSIGNED_STRUCT.unpack(data)[0] 246 | 247 | 248 | def write_unsigned(fd, n): 249 | msg = UNSIGNED_STRUCT.pack(n) 250 | while msg: 251 | nbytes = os.write(fd, msg) 252 | if nbytes == 0: 253 | raise RuntimeError('should not get here') 254 | msg = msg[nbytes:] 255 | 256 | # 257 | # 258 | # 259 | 260 | _forkserver = ForkServer() 261 | ensure_running = _forkserver.ensure_running 262 | get_inherited_fds = _forkserver.get_inherited_fds 263 | connect_to_new_process = _forkserver.connect_to_new_process 264 | set_forkserver_preload = _forkserver.set_forkserver_preload 265 | -------------------------------------------------------------------------------- /billiard/heap.py: -------------------------------------------------------------------------------- 1 | # 2 | # Module which supports allocation of memory from an mmap 3 | # 4 | # multiprocessing/heap.py 5 | # 6 | # Copyright (c) 2006-2008, R Oudkerk 7 | # Licensed to PSF under a Contributor Agreement. 8 | # 9 | 10 | import bisect 11 | import errno 12 | import io 13 | import mmap 14 | import os 15 | import sys 16 | import threading 17 | import tempfile 18 | 19 | from . import context 20 | from . import reduction 21 | from . import util 22 | 23 | from ._ext import _billiard, win32 24 | 25 | __all__ = ['BufferWrapper'] 26 | 27 | PY3 = sys.version_info[0] == 3 28 | 29 | # 30 | # Inheritable class which wraps an mmap, and from which blocks can be allocated 31 | # 32 | 33 | if sys.platform == 'win32': 34 | 35 | class Arena: 36 | 37 | _rand = tempfile._RandomNameSequence() 38 | 39 | def __init__(self, size): 40 | self.size = size 41 | for i in range(100): 42 | name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) 43 | buf = mmap.mmap(-1, size, tagname=name) 44 | if win32.GetLastError() == 0: 45 | break 46 | # we have reopened a preexisting map 47 | buf.close() 48 | else: 49 | exc = IOError('Cannot find name for new mmap') 50 | exc.errno = errno.EEXIST 51 | raise exc 52 | self.name = name 53 | self.buffer = buf 54 | self._state = (self.size, self.name) 55 | 56 | def __getstate__(self): 57 | context.assert_spawning(self) 58 | return self._state 59 | 60 | def __setstate__(self, state): 61 | self.size, self.name = self._state = state 62 | self.buffer = mmap.mmap(-1, self.size, tagname=self.name) 63 | # XXX Temporarily preventing buildbot failures while determining 64 | # XXX the correct long-term fix. See issue #23060 65 | # assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS 66 | 67 | else: 68 | 69 | class Arena: 70 | 71 | def __init__(self, size, fd=-1): 72 | self.size = size 73 | self.fd = fd 74 | if fd == -1: 75 | if PY3: 76 | self.fd, name = tempfile.mkstemp( 77 | prefix='pym-%d-' % (os.getpid(),), 78 | dir=util.get_temp_dir(), 79 | ) 80 | 81 | os.unlink(name) 82 | util.Finalize(self, os.close, (self.fd,)) 83 | with io.open(self.fd, 'wb', closefd=False) as f: 84 | bs = 1024 * 1024 85 | if size >= bs: 86 | zeros = b'\0' * bs 87 | for _ in range(size // bs): 88 | f.write(zeros) 89 | del(zeros) 90 | f.write(b'\0' * (size % bs)) 91 | assert f.tell() == size 92 | else: 93 | self.fd, name = tempfile.mkstemp( 94 | prefix='pym-%d-' % (os.getpid(),), 95 | dir=util.get_temp_dir(), 96 | ) 97 | os.unlink(name) 98 | util.Finalize(self, os.close, (self.fd,)) 99 | os.ftruncate(self.fd, size) 100 | self.buffer = mmap.mmap(self.fd, self.size) 101 | 102 | def reduce_arena(a): 103 | if a.fd == -1: 104 | raise ValueError('Arena is unpicklable because' 105 | 'forking was enabled when it was created') 106 | return rebuild_arena, (a.size, reduction.DupFd(a.fd)) 107 | 108 | def rebuild_arena(size, dupfd): 109 | return Arena(size, dupfd.detach()) 110 | 111 | reduction.register(Arena, reduce_arena) 112 | 113 | # 114 | # Class allowing allocation of chunks of memory from arenas 115 | # 116 | 117 | 118 | class Heap: 119 | 120 | _alignment = 8 121 | 122 | def __init__(self, size=mmap.PAGESIZE): 123 | self._lastpid = os.getpid() 124 | self._lock = threading.Lock() 125 | self._size = size 126 | self._lengths = [] 127 | self._len_to_seq = {} 128 | self._start_to_block = {} 129 | self._stop_to_block = {} 130 | self._allocated_blocks = set() 131 | self._arenas = [] 132 | # list of pending blocks to free - see free() comment below 133 | self._pending_free_blocks = [] 134 | 135 | @staticmethod 136 | def _roundup(n, alignment): 137 | # alignment must be a power of 2 138 | mask = alignment - 1 139 | return (n + mask) & ~mask 140 | 141 | def _malloc(self, size): 142 | # returns a large enough block -- it might be much larger 143 | i = bisect.bisect_left(self._lengths, size) 144 | if i == len(self._lengths): 145 | length = self._roundup(max(self._size, size), mmap.PAGESIZE) 146 | self._size *= 2 147 | util.info('allocating a new mmap of length %d', length) 148 | arena = Arena(length) 149 | self._arenas.append(arena) 150 | return (arena, 0, length) 151 | else: 152 | length = self._lengths[i] 153 | seq = self._len_to_seq[length] 154 | block = seq.pop() 155 | if not seq: 156 | del self._len_to_seq[length], self._lengths[i] 157 | 158 | (arena, start, stop) = block 159 | del self._start_to_block[(arena, start)] 160 | del self._stop_to_block[(arena, stop)] 161 | return block 162 | 163 | def _free(self, block): 164 | # free location and try to merge with neighbours 165 | (arena, start, stop) = block 166 | 167 | try: 168 | prev_block = self._stop_to_block[(arena, start)] 169 | except KeyError: 170 | pass 171 | else: 172 | start, _ = self._absorb(prev_block) 173 | 174 | try: 175 | next_block = self._start_to_block[(arena, stop)] 176 | except KeyError: 177 | pass 178 | else: 179 | _, stop = self._absorb(next_block) 180 | 181 | block = (arena, start, stop) 182 | length = stop - start 183 | 184 | try: 185 | self._len_to_seq[length].append(block) 186 | except KeyError: 187 | self._len_to_seq[length] = [block] 188 | bisect.insort(self._lengths, length) 189 | 190 | self._start_to_block[(arena, start)] = block 191 | self._stop_to_block[(arena, stop)] = block 192 | 193 | def _absorb(self, block): 194 | # deregister this block so it can be merged with a neighbour 195 | (arena, start, stop) = block 196 | del self._start_to_block[(arena, start)] 197 | del self._stop_to_block[(arena, stop)] 198 | 199 | length = stop - start 200 | seq = self._len_to_seq[length] 201 | seq.remove(block) 202 | if not seq: 203 | del self._len_to_seq[length] 204 | self._lengths.remove(length) 205 | 206 | return start, stop 207 | 208 | def _free_pending_blocks(self): 209 | # Free all the blocks in the pending list - called with the lock held 210 | while 1: 211 | try: 212 | block = self._pending_free_blocks.pop() 213 | except IndexError: 214 | break 215 | self._allocated_blocks.remove(block) 216 | self._free(block) 217 | 218 | def free(self, block): 219 | # free a block returned by malloc() 220 | # Since free() can be called asynchronously by the GC, it could happen 221 | # that it's called while self._lock is held: in that case, 222 | # self._lock.acquire() would deadlock (issue #12352). To avoid that, a 223 | # trylock is used instead, and if the lock can't be acquired 224 | # immediately, the block is added to a list of blocks to be freed 225 | # synchronously sometimes later from malloc() or free(), by calling 226 | # _free_pending_blocks() (appending and retrieving from a list is not 227 | # strictly thread-safe but under cPython it's atomic 228 | # thanks to the GIL). 229 | assert os.getpid() == self._lastpid 230 | if not self._lock.acquire(False): 231 | # can't acquire the lock right now, add the block to the list of 232 | # pending blocks to free 233 | self._pending_free_blocks.append(block) 234 | else: 235 | # we hold the lock 236 | try: 237 | self._free_pending_blocks() 238 | self._allocated_blocks.remove(block) 239 | self._free(block) 240 | finally: 241 | self._lock.release() 242 | 243 | def malloc(self, size): 244 | # return a block of right size (possibly rounded up) 245 | assert 0 <= size < sys.maxsize 246 | if os.getpid() != self._lastpid: 247 | self.__init__() # reinitialize after fork 248 | with self._lock: 249 | self._free_pending_blocks() 250 | size = self._roundup(max(size, 1), self._alignment) 251 | (arena, start, stop) = self._malloc(size) 252 | new_stop = start + size 253 | if new_stop < stop: 254 | self._free((arena, new_stop, stop)) 255 | block = (arena, start, new_stop) 256 | self._allocated_blocks.add(block) 257 | return block 258 | 259 | # 260 | # Class representing a chunk of an mmap -- can be inherited 261 | # 262 | 263 | 264 | class BufferWrapper: 265 | 266 | _heap = Heap() 267 | 268 | def __init__(self, size): 269 | assert 0 <= size < sys.maxsize 270 | block = BufferWrapper._heap.malloc(size) 271 | self._state = (block, size) 272 | util.Finalize(self, BufferWrapper._heap.free, args=(block,)) 273 | 274 | def get_address(self): 275 | (arena, start, stop), size = self._state 276 | address, length = _billiard.address_of_buffer(arena.buffer) 277 | assert size <= length 278 | return address + start 279 | 280 | def get_size(self): 281 | return self._state[1] 282 | 283 | def create_memoryview(self): 284 | (arena, start, stop), size = self._state 285 | return memoryview(arena.buffer)[start:start + size] 286 | -------------------------------------------------------------------------------- /billiard/reduction.py: -------------------------------------------------------------------------------- 1 | # 2 | # Module which deals with pickling of objects. 3 | # 4 | # multiprocessing/reduction.py 5 | # 6 | # Copyright (c) 2006-2008, R Oudkerk 7 | # Licensed to PSF under a Contributor Agreement. 8 | # 9 | 10 | import functools 11 | import io 12 | import os 13 | import pickle 14 | import socket 15 | import sys 16 | 17 | from . import context 18 | 19 | __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump'] 20 | 21 | PY3 = sys.version_info[0] == 3 22 | 23 | 24 | HAVE_SEND_HANDLE = (sys.platform == 'win32' or 25 | (hasattr(socket, 'CMSG_LEN') and 26 | hasattr(socket, 'SCM_RIGHTS') and 27 | hasattr(socket.socket, 'sendmsg'))) 28 | 29 | # 30 | # Pickler subclass 31 | # 32 | 33 | 34 | if PY3: 35 | import copyreg 36 | 37 | class ForkingPickler(pickle.Pickler): 38 | '''Pickler subclass used by multiprocessing.''' 39 | _extra_reducers = {} 40 | _copyreg_dispatch_table = copyreg.dispatch_table 41 | 42 | def __init__(self, *args): 43 | super(ForkingPickler, self).__init__(*args) 44 | self.dispatch_table = self._copyreg_dispatch_table.copy() 45 | self.dispatch_table.update(self._extra_reducers) 46 | 47 | @classmethod 48 | def register(cls, type, reduce): 49 | '''Register a reduce function for a type.''' 50 | cls._extra_reducers[type] = reduce 51 | 52 | @classmethod 53 | def dumps(cls, obj, protocol=None): 54 | buf = io.BytesIO() 55 | cls(buf, protocol).dump(obj) 56 | return buf.getbuffer() 57 | 58 | @classmethod 59 | def loadbuf(cls, buf, protocol=None): 60 | return cls.loads(buf.getbuffer()) 61 | 62 | loads = pickle.loads 63 | 64 | else: 65 | 66 | class ForkingPickler(pickle.Pickler): # noqa 67 | '''Pickler subclass used by multiprocessing.''' 68 | dispatch = pickle.Pickler.dispatch.copy() 69 | 70 | @classmethod 71 | def register(cls, type, reduce): 72 | '''Register a reduce function for a type.''' 73 | def dispatcher(self, obj): 74 | rv = reduce(obj) 75 | self.save_reduce(obj=obj, *rv) 76 | cls.dispatch[type] = dispatcher 77 | 78 | @classmethod 79 | def dumps(cls, obj, protocol=None): 80 | buf = io.BytesIO() 81 | cls(buf, protocol).dump(obj) 82 | return buf.getvalue() 83 | 84 | @classmethod 85 | def loadbuf(cls, buf, protocol=None): 86 | return cls.loads(buf.getvalue()) 87 | 88 | @classmethod 89 | def loads(cls, buf, loads=pickle.loads): 90 | if isinstance(buf, io.BytesIO): 91 | buf = buf.getvalue() 92 | return loads(buf) 93 | register = ForkingPickler.register 94 | 95 | 96 | def dump(obj, file, protocol=None): 97 | '''Replacement for pickle.dump() using ForkingPickler.''' 98 | ForkingPickler(file, protocol).dump(obj) 99 | 100 | # 101 | # Platform specific definitions 102 | # 103 | 104 | if sys.platform == 'win32': 105 | # Windows 106 | __all__ += ['DupHandle', 'duplicate', 'steal_handle'] 107 | from .compat import _winapi 108 | 109 | def duplicate(handle, target_process=None, inheritable=False): 110 | '''Duplicate a handle. (target_process is a handle not a pid!)''' 111 | if target_process is None: 112 | target_process = _winapi.GetCurrentProcess() 113 | return _winapi.DuplicateHandle( 114 | _winapi.GetCurrentProcess(), handle, target_process, 115 | 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS) 116 | 117 | def steal_handle(source_pid, handle): 118 | '''Steal a handle from process identified by source_pid.''' 119 | source_process_handle = _winapi.OpenProcess( 120 | _winapi.PROCESS_DUP_HANDLE, False, source_pid) 121 | try: 122 | return _winapi.DuplicateHandle( 123 | source_process_handle, handle, 124 | _winapi.GetCurrentProcess(), 0, False, 125 | _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE) 126 | finally: 127 | _winapi.CloseHandle(source_process_handle) 128 | 129 | def send_handle(conn, handle, destination_pid): 130 | '''Send a handle over a local connection.''' 131 | dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid) 132 | conn.send(dh) 133 | 134 | def recv_handle(conn): 135 | '''Receive a handle over a local connection.''' 136 | return conn.recv().detach() 137 | 138 | class DupHandle: 139 | '''Picklable wrapper for a handle.''' 140 | def __init__(self, handle, access, pid=None): 141 | if pid is None: 142 | # We just duplicate the handle in the current process and 143 | # let the receiving process steal the handle. 144 | pid = os.getpid() 145 | proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid) 146 | try: 147 | self._handle = _winapi.DuplicateHandle( 148 | _winapi.GetCurrentProcess(), 149 | handle, proc, access, False, 0) 150 | finally: 151 | _winapi.CloseHandle(proc) 152 | self._access = access 153 | self._pid = pid 154 | 155 | def detach(self): 156 | '''Get the handle. This should only be called once.''' 157 | # retrieve handle from process which currently owns it 158 | if self._pid == os.getpid(): 159 | # The handle has already been duplicated for this process. 160 | return self._handle 161 | # We must steal the handle from the process whose pid is self._pid. 162 | proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, 163 | self._pid) 164 | try: 165 | return _winapi.DuplicateHandle( 166 | proc, self._handle, _winapi.GetCurrentProcess(), 167 | self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE) 168 | finally: 169 | _winapi.CloseHandle(proc) 170 | 171 | else: 172 | # Unix 173 | __all__ += ['DupFd', 'sendfds', 'recvfds'] 174 | import array 175 | 176 | # On macOS we should acknowledge receipt of fds -- see Issue14669 177 | ACKNOWLEDGE = sys.platform == 'darwin' 178 | 179 | def sendfds(sock, fds): 180 | '''Send an array of fds over an AF_UNIX socket.''' 181 | fds = array.array('i', fds) 182 | msg = bytes([len(fds) % 256]) 183 | sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)]) 184 | if ACKNOWLEDGE and sock.recv(1) != b'A': 185 | raise RuntimeError('did not receive acknowledgement of fd') 186 | 187 | def recvfds(sock, size): 188 | '''Receive an array of fds over an AF_UNIX socket.''' 189 | a = array.array('i') 190 | bytes_size = a.itemsize * size 191 | msg, ancdata, flags, addr = sock.recvmsg( 192 | 1, socket.CMSG_LEN(bytes_size), 193 | ) 194 | if not msg and not ancdata: 195 | raise EOFError 196 | try: 197 | if ACKNOWLEDGE: 198 | sock.send(b'A') 199 | if len(ancdata) != 1: 200 | raise RuntimeError( 201 | 'received %d items of ancdata' % len(ancdata), 202 | ) 203 | cmsg_level, cmsg_type, cmsg_data = ancdata[0] 204 | if (cmsg_level == socket.SOL_SOCKET and 205 | cmsg_type == socket.SCM_RIGHTS): 206 | if len(cmsg_data) % a.itemsize != 0: 207 | raise ValueError 208 | a.frombytes(cmsg_data) 209 | assert len(a) % 256 == msg[0] 210 | return list(a) 211 | except (ValueError, IndexError): 212 | pass 213 | raise RuntimeError('Invalid data received') 214 | 215 | def send_handle(conn, handle, destination_pid): # noqa 216 | '''Send a handle over a local connection.''' 217 | fd = conn.fileno() 218 | with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s: 219 | sendfds(s, [handle]) 220 | 221 | def recv_handle(conn): # noqa 222 | '''Receive a handle over a local connection.''' 223 | fd = conn.fileno() 224 | with socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM) as s: 225 | return recvfds(s, 1)[0] 226 | 227 | def DupFd(fd): 228 | '''Return a wrapper for an fd.''' 229 | popen_obj = context.get_spawning_popen() 230 | if popen_obj is not None: 231 | return popen_obj.DupFd(popen_obj.duplicate_for_child(fd)) 232 | elif HAVE_SEND_HANDLE: 233 | from . import resource_sharer 234 | return resource_sharer.DupFd(fd) 235 | else: 236 | raise ValueError('SCM_RIGHTS appears not to be available') 237 | 238 | # 239 | # Try making some callable types picklable 240 | # 241 | 242 | 243 | def _reduce_method(m): 244 | if m.__self__ is None: 245 | return getattr, (m.__class__, m.__func__.__name__) 246 | else: 247 | return getattr, (m.__self__, m.__func__.__name__) 248 | 249 | 250 | class _C: 251 | def f(self): 252 | pass 253 | register(type(_C().f), _reduce_method) 254 | 255 | 256 | def _reduce_method_descriptor(m): 257 | return getattr, (m.__objclass__, m.__name__) 258 | register(type(list.append), _reduce_method_descriptor) 259 | register(type(int.__add__), _reduce_method_descriptor) 260 | 261 | 262 | def _reduce_partial(p): 263 | return _rebuild_partial, (p.func, p.args, p.keywords or {}) 264 | 265 | 266 | def _rebuild_partial(func, args, keywords): 267 | return functools.partial(func, *args, **keywords) 268 | register(functools.partial, _reduce_partial) 269 | 270 | # 271 | # Make sockets picklable 272 | # 273 | 274 | if sys.platform == 'win32': 275 | 276 | def _reduce_socket(s): 277 | from .resource_sharer import DupSocket 278 | return _rebuild_socket, (DupSocket(s),) 279 | 280 | def _rebuild_socket(ds): 281 | return ds.detach() 282 | register(socket.socket, _reduce_socket) 283 | 284 | else: 285 | 286 | def _reduce_socket(s): # noqa 287 | df = DupFd(s.fileno()) 288 | return _rebuild_socket, (df, s.family, s.type, s.proto) 289 | 290 | def _rebuild_socket(df, family, type, proto): # noqa 291 | fd = df.detach() 292 | return socket.socket(family, type, proto, fileno=fd) 293 | register(socket.socket, _reduce_socket) 294 | -------------------------------------------------------------------------------- /Modules/_billiard/multiprocessing.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Extension module used by multiprocessing package 3 | * 4 | * multiprocessing.c 5 | * 6 | * Copyright (c) 2006-2008, R Oudkerk 7 | * Licensed to PSF under a Contributor Agreement. 8 | */ 9 | 10 | #include "multiprocessing.h" 11 | 12 | #ifdef SCM_RIGHTS 13 | #define HAVE_FD_TRANSFER 1 14 | #else 15 | #define HAVE_FD_TRANSFER 0 16 | #endif 17 | 18 | /* 19 | * Function which raises exceptions based on error codes 20 | */ 21 | 22 | PyObject * 23 | Billiard_SetError(PyObject *Type, int num) 24 | { 25 | switch (num) { 26 | case MP_SUCCESS: 27 | break; 28 | #ifdef MS_WINDOWS 29 | case MP_STANDARD_ERROR: 30 | if (Type == NULL) 31 | Type = PyExc_OSError; 32 | PyErr_SetExcFromWindowsErr(Type, 0); 33 | break; 34 | case MP_SOCKET_ERROR: 35 | if (Type == NULL) 36 | Type = PyExc_OSError; 37 | PyErr_SetExcFromWindowsErr(Type, WSAGetLastError()); 38 | break; 39 | #else /* !MS_WINDOWS */ 40 | case MP_STANDARD_ERROR: 41 | case MP_SOCKET_ERROR: 42 | if (Type == NULL) 43 | Type = PyExc_OSError; 44 | PyErr_SetFromErrno(Type); 45 | break; 46 | #endif /* !MS_WINDOWS */ 47 | case MP_MEMORY_ERROR: 48 | PyErr_NoMemory(); 49 | break; 50 | case MP_END_OF_FILE: 51 | PyErr_SetNone(PyExc_EOFError); 52 | break; 53 | case MP_EARLY_END_OF_FILE: 54 | PyErr_SetString(PyExc_IOError, 55 | "got end of file during message"); 56 | break; 57 | case MP_BAD_MESSAGE_LENGTH: 58 | PyErr_SetString(PyExc_IOError, "bad message length"); 59 | break; 60 | case MP_EXCEPTION_HAS_BEEN_SET: 61 | break; 62 | default: 63 | PyErr_Format(PyExc_RuntimeError, 64 | "unknown error number %d", num); 65 | } 66 | return NULL; 67 | } 68 | 69 | 70 | /* 71 | * Windows only 72 | */ 73 | 74 | #ifdef MS_WINDOWS 75 | 76 | /* On Windows we set an event to signal Ctrl-C; compare with timemodule.c */ 77 | 78 | HANDLE sigint_event = NULL; 79 | 80 | static BOOL WINAPI 81 | ProcessingCtrlHandler(DWORD dwCtrlType) 82 | { 83 | SetEvent(sigint_event); 84 | return FALSE; 85 | } 86 | 87 | 88 | static PyObject * 89 | Billiard_closesocket(PyObject *self, PyObject *args) 90 | { 91 | HANDLE handle; 92 | int ret; 93 | 94 | if (!PyArg_ParseTuple(args, F_HANDLE ":closesocket" , &handle)) 95 | return NULL; 96 | 97 | Py_BEGIN_ALLOW_THREADS 98 | ret = closesocket((SOCKET) handle); 99 | Py_END_ALLOW_THREADS 100 | 101 | if (ret) 102 | return PyErr_SetExcFromWindowsErr(PyExc_IOError, WSAGetLastError()); 103 | Py_RETURN_NONE; 104 | } 105 | 106 | static PyObject * 107 | Billiard_recv(PyObject *self, PyObject *args) 108 | { 109 | HANDLE handle; 110 | int size, nread; 111 | PyObject *buf; 112 | 113 | if (!PyArg_ParseTuple(args, F_HANDLE "i:recv" , &handle, &size)) 114 | return NULL; 115 | 116 | buf = PyBytes_FromStringAndSize(NULL, size); 117 | if (!buf) 118 | return NULL; 119 | 120 | Py_BEGIN_ALLOW_THREADS 121 | nread = recv((SOCKET) handle, PyBytes_AS_STRING(buf), size, 0); 122 | Py_END_ALLOW_THREADS 123 | 124 | if (nread < 0) { 125 | Py_DECREF(buf); 126 | return PyErr_SetExcFromWindowsErr(PyExc_IOError, WSAGetLastError()); 127 | } 128 | _PyBytes_Resize(&buf, nread); 129 | return buf; 130 | } 131 | 132 | static PyObject * 133 | Billiard_send(PyObject *self, PyObject *args) 134 | { 135 | HANDLE handle; 136 | Py_buffer buf; 137 | int ret, length; 138 | 139 | if (!PyArg_ParseTuple(args, F_HANDLE "y*:send" , &handle, &buf)) 140 | return NULL; 141 | 142 | length = (int)Py_MIN(buf.len, INT_MAX); 143 | 144 | Py_BEGIN_ALLOW_THREADS 145 | ret = send((SOCKET) handle, buf.buf, length, 0); 146 | Py_END_ALLOW_THREADS 147 | 148 | PyBuffer_Release(&buf); 149 | if (ret < 0) 150 | return PyErr_SetExcFromWindowsErr(PyExc_IOError, WSAGetLastError()); 151 | return PyLong_FromLong(ret); 152 | } 153 | 154 | 155 | /* 156 | * Unix only 157 | */ 158 | 159 | #else /* !MS_WINDOWS */ 160 | 161 | #if HAVE_FD_TRANSFER 162 | 163 | /* Functions for transferring file descriptors between processes. 164 | Reimplements some of the functionality of the fdcred 165 | module at http://www.mca-ltd.com/resources/fdcred_1.tgz. */ 166 | 167 | static PyObject * 168 | Billiard_multiprocessing_sendfd(PyObject *self, PyObject *args) 169 | { 170 | int conn, fd, res; 171 | char dummy_char; 172 | char buf[CMSG_SPACE(sizeof(int))]; 173 | struct msghdr msg = {0}; 174 | struct iovec dummy_iov; 175 | struct cmsghdr *cmsg; 176 | 177 | if (!PyArg_ParseTuple(args, "ii", &conn, &fd)) 178 | return NULL; 179 | 180 | dummy_iov.iov_base = &dummy_char; 181 | dummy_iov.iov_len = 1; 182 | msg.msg_control = buf; 183 | msg.msg_controllen = sizeof(buf); 184 | msg.msg_iov = &dummy_iov; 185 | msg.msg_iovlen = 1; 186 | cmsg = CMSG_FIRSTHDR(&msg); 187 | cmsg->cmsg_level = SOL_SOCKET; 188 | cmsg->cmsg_type = SCM_RIGHTS; 189 | cmsg->cmsg_len = CMSG_LEN(sizeof(int)); 190 | msg.msg_controllen = cmsg->cmsg_len; 191 | *(int*)CMSG_DATA(cmsg) = fd; 192 | 193 | Py_BEGIN_ALLOW_THREADS 194 | res = sendmsg(conn, &msg, 0); 195 | Py_END_ALLOW_THREADS 196 | 197 | if (res < 0) 198 | return PyErr_SetFromErrno(PyExc_OSError); 199 | Py_RETURN_NONE; 200 | } 201 | 202 | static PyObject * 203 | Billiard_multiprocessing_recvfd(PyObject *self, PyObject *args) 204 | { 205 | int conn, fd, res; 206 | char dummy_char; 207 | char buf[CMSG_SPACE(sizeof(int))]; 208 | struct msghdr msg = {0}; 209 | struct iovec dummy_iov; 210 | struct cmsghdr *cmsg; 211 | 212 | if (!PyArg_ParseTuple(args, "i", &conn)) 213 | return NULL; 214 | 215 | dummy_iov.iov_base = &dummy_char; 216 | dummy_iov.iov_len = 1; 217 | msg.msg_control = buf; 218 | msg.msg_controllen = sizeof(buf); 219 | msg.msg_iov = &dummy_iov; 220 | msg.msg_iovlen = 1; 221 | cmsg = CMSG_FIRSTHDR(&msg); 222 | cmsg->cmsg_level = SOL_SOCKET; 223 | cmsg->cmsg_type = SCM_RIGHTS; 224 | cmsg->cmsg_len = CMSG_LEN(sizeof(int)); 225 | msg.msg_controllen = cmsg->cmsg_len; 226 | 227 | Py_BEGIN_ALLOW_THREADS 228 | res = recvmsg(conn, &msg, 0); 229 | Py_END_ALLOW_THREADS 230 | 231 | if (res < 0) 232 | return PyErr_SetFromErrno(PyExc_OSError); 233 | 234 | fd = *(int*)CMSG_DATA(cmsg); 235 | return Py_BuildValue("i", fd); 236 | } 237 | 238 | #endif /* HAVE_FD_TRANSFER */ 239 | 240 | #endif /* !MS_WINDOWS */ 241 | 242 | 243 | /* 244 | * All platforms 245 | */ 246 | 247 | static PyObject* 248 | Billiard_multiprocessing_address_of_buffer(PyObject *self, PyObject *obj) 249 | { 250 | void *buffer; 251 | Py_ssize_t buffer_len; 252 | 253 | if (PyObject_AsWriteBuffer(obj, &buffer, &buffer_len) < 0) 254 | return NULL; 255 | 256 | return Py_BuildValue("N" F_PY_SSIZE_T, 257 | PyLong_FromVoidPtr(buffer), buffer_len); 258 | } 259 | 260 | #if !defined(MS_WINDOWS) 261 | 262 | static PyObject * 263 | Billiard_read(PyObject *self, PyObject *args) 264 | { 265 | int fd; 266 | Py_buffer view; 267 | Py_ssize_t buflen, recvlen = 0; 268 | 269 | char *buf = NULL; 270 | 271 | Py_ssize_t n = 0; 272 | 273 | if (!PyArg_ParseTuple(args, "iw*|n", &fd, &view, &recvlen)) 274 | return NULL; 275 | buflen = view.len; 276 | buf = view.buf; 277 | 278 | if (recvlen < 0) { 279 | PyBuffer_Release(&view); 280 | PyErr_SetString(PyExc_ValueError, "negative len for read"); 281 | return NULL; 282 | } 283 | 284 | if (recvlen == 0) { 285 | recvlen = buflen; 286 | } 287 | 288 | if (buflen < recvlen) { 289 | PyBuffer_Release(&view); 290 | PyErr_SetString(PyExc_ValueError, 291 | "Buffer too small for requested bytes"); 292 | return NULL; 293 | 294 | } 295 | 296 | if (buflen < 0 || buflen == 0) { 297 | errno = EINVAL; 298 | goto bail; 299 | } 300 | // Requires Python 2.7 301 | //if (!_PyVerify_fd(fd)) goto bail; 302 | 303 | Py_BEGIN_ALLOW_THREADS 304 | n = read(fd, buf, recvlen); 305 | Py_END_ALLOW_THREADS 306 | if (n < 0) goto bail; 307 | PyBuffer_Release(&view); 308 | return PyInt_FromSsize_t(n); 309 | 310 | bail: 311 | PyBuffer_Release(&view); 312 | return PyErr_SetFromErrno(PyExc_OSError); 313 | } 314 | 315 | # endif /* !MS_WINDOWS */ 316 | 317 | 318 | 319 | /* 320 | * Function table 321 | */ 322 | 323 | static PyMethodDef Billiard_module_methods[] = { 324 | {"address_of_buffer", Billiard_multiprocessing_address_of_buffer, METH_O, 325 | "address_of_buffer(obj) -> int\n\n" 326 | "Return address of obj assuming obj supports buffer interface"}, 327 | #if HAVE_FD_TRANSFER 328 | {"sendfd", Billiard_multiprocessing_sendfd, METH_VARARGS, 329 | "sendfd(sockfd, fd) -> None\n\n" 330 | "Send file descriptor given by fd over the unix domain socket\n" 331 | "whose file descriptor is sockfd"}, 332 | {"recvfd", Billiard_multiprocessing_recvfd, METH_VARARGS, 333 | "recvfd(sockfd) -> fd\n\n" 334 | "Receive a file descriptor over a unix domain socket\n" 335 | "whose file descriptor is sockfd"}, 336 | #endif 337 | #if !defined(MS_WINDOWS) 338 | {"read", Billiard_read, METH_VARARGS, 339 | "read(fd, buffer) -> bytes\n\n" 340 | "Read from file descriptor into buffer."}, 341 | #endif 342 | #ifdef MS_WINDOWS 343 | {"closesocket", Billiard_closesocket, METH_VARARGS, ""}, 344 | {"recv", Billiard_recv, METH_VARARGS, ""}, 345 | {"send", Billiard_send, METH_VARARGS, ""}, 346 | #endif 347 | #ifndef POSIX_SEMAPHORES_NOT_ENABLED 348 | {"sem_unlink", Billiard_semlock_unlink, METH_VARARGS, ""}, 349 | #endif 350 | {NULL} 351 | }; 352 | 353 | 354 | /* 355 | * Initialize 356 | */ 357 | 358 | PyMODINIT_FUNC 359 | init_billiard(void) 360 | { 361 | PyObject *module, *temp, *value; 362 | 363 | /* Initialize module */ 364 | module = Py_InitModule("_billiard", Billiard_module_methods); 365 | if (!module) 366 | return; 367 | 368 | #if defined(MS_WINDOWS) || \ 369 | (defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED)) 370 | /* Add SemLock type to module */ 371 | if (PyType_Ready(&BilliardSemLockType) < 0) 372 | return; 373 | Py_INCREF(&BilliardSemLockType); 374 | PyDict_SetItemString(BilliardSemLockType.tp_dict, "SEM_VALUE_MAX", 375 | Py_BuildValue("i", SEM_VALUE_MAX)); 376 | PyModule_AddObject(module, "SemLock", (PyObject*)&BilliardSemLockType); 377 | #endif 378 | 379 | #ifdef MS_WINDOWS 380 | /* Initialize the event handle used to signal Ctrl-C */ 381 | sigint_event = CreateEvent(NULL, TRUE, FALSE, NULL); 382 | if (!sigint_event) { 383 | PyErr_SetFromWindowsErr(0); 384 | return; 385 | } 386 | if (!SetConsoleCtrlHandler(ProcessingCtrlHandler, TRUE)) { 387 | PyErr_SetFromWindowsErr(0); 388 | return; 389 | } 390 | #endif 391 | 392 | /* Add configuration macros */ 393 | temp = PyDict_New(); 394 | if (!temp) 395 | return; 396 | #define ADD_FLAG(name) \ 397 | value = Py_BuildValue("i", name); \ 398 | if (value == NULL) { Py_DECREF(temp); return; } \ 399 | if (PyDict_SetItemString(temp, #name, value) < 0) { \ 400 | Py_DECREF(temp); Py_DECREF(value); return; } \ 401 | Py_DECREF(value) 402 | 403 | #if defined(HAVE_SEM_OPEN) && !defined(POSIX_SEMAPHORES_NOT_ENABLED) 404 | ADD_FLAG(HAVE_SEM_OPEN); 405 | #endif 406 | #ifdef HAVE_SEM_TIMEDWAIT 407 | ADD_FLAG(HAVE_SEM_TIMEDWAIT); 408 | #endif 409 | #ifdef HAVE_FD_TRANSFER 410 | ADD_FLAG(HAVE_FD_TRANSFER); 411 | #endif 412 | #ifdef HAVE_BROKEN_SEM_GETVALUE 413 | ADD_FLAG(HAVE_BROKEN_SEM_GETVALUE); 414 | #endif 415 | #ifdef HAVE_BROKEN_SEM_UNLINK 416 | ADD_FLAG(HAVE_BROKEN_SEM_UNLINK); 417 | #endif 418 | if (PyModule_AddObject(module, "flags", temp) < 0) 419 | return; 420 | } 421 | -------------------------------------------------------------------------------- /billiard/process.py: -------------------------------------------------------------------------------- 1 | # 2 | # Module providing the `Process` class which emulates `threading.Thread` 3 | # 4 | # multiprocessing/process.py 5 | # 6 | # Copyright (c) 2006-2008, R Oudkerk 7 | # Licensed to PSF under a Contributor Agreement. 8 | # 9 | # 10 | # Imports 11 | # 12 | 13 | import os 14 | import sys 15 | import signal 16 | import itertools 17 | import logging 18 | import threading 19 | from _weakrefset import WeakSet 20 | 21 | from multiprocessing import process as _mproc 22 | 23 | try: 24 | ORIGINAL_DIR = os.path.abspath(os.getcwd()) 25 | except OSError: 26 | ORIGINAL_DIR = None 27 | 28 | __all__ = ['BaseProcess', 'Process', 'current_process', 'active_children'] 29 | 30 | # 31 | # Public functions 32 | # 33 | 34 | 35 | def current_process(): 36 | ''' 37 | Return process object representing the current process 38 | ''' 39 | return _current_process 40 | 41 | 42 | def _set_current_process(process): 43 | global _current_process 44 | _current_process = _mproc._current_process = process 45 | 46 | 47 | def _cleanup(): 48 | # check for processes which have finished 49 | for p in list(_children): 50 | if p._popen.poll() is not None: 51 | _children.discard(p) 52 | 53 | 54 | def _maybe_flush(f): 55 | try: 56 | f.flush() 57 | except (AttributeError, EnvironmentError, NotImplementedError): 58 | pass 59 | 60 | 61 | def active_children(_cleanup=_cleanup): 62 | ''' 63 | Return list of process objects corresponding to live child processes 64 | ''' 65 | try: 66 | _cleanup() 67 | except TypeError: 68 | # called after gc collect so _cleanup does not exist anymore 69 | return [] 70 | return list(_children) 71 | 72 | 73 | class BaseProcess: 74 | ''' 75 | Process objects represent activity that is run in a separate process 76 | 77 | The class is analogous to `threading.Thread` 78 | ''' 79 | 80 | def _Popen(self): 81 | raise NotImplementedError() 82 | 83 | def __init__(self, group=None, target=None, name=None, 84 | args=(), kwargs={}, daemon=None, **_kw): 85 | assert group is None, 'group argument must be None for now' 86 | count = next(_process_counter) 87 | self._identity = _current_process._identity + (count, ) 88 | self._config = _current_process._config.copy() 89 | self._parent_pid = os.getpid() 90 | self._popen = None 91 | self._target = target 92 | self._args = tuple(args) 93 | self._kwargs = dict(kwargs) 94 | self._name = ( 95 | name or type(self).__name__ + '-' + 96 | ':'.join(str(i) for i in self._identity) 97 | ) 98 | if daemon is not None: 99 | self.daemon = daemon 100 | if _dangling is not None: 101 | _dangling.add(self) 102 | 103 | self._controlled_termination = False 104 | 105 | def run(self): 106 | ''' 107 | Method to be run in sub-process; can be overridden in sub-class 108 | ''' 109 | if self._target: 110 | self._target(*self._args, **self._kwargs) 111 | 112 | def start(self): 113 | ''' 114 | Start child process 115 | ''' 116 | assert self._popen is None, 'cannot start a process twice' 117 | assert self._parent_pid == os.getpid(), \ 118 | 'can only start a process object created by current process' 119 | _cleanup() 120 | self._popen = self._Popen(self) 121 | self._sentinel = self._popen.sentinel 122 | _children.add(self) 123 | 124 | def close(self): 125 | if self._popen is not None: 126 | self._popen.close() 127 | 128 | def terminate(self): 129 | ''' 130 | Terminate process; sends SIGTERM signal or uses TerminateProcess() 131 | ''' 132 | self._popen.terminate() 133 | 134 | def terminate_controlled(self): 135 | self._controlled_termination = True 136 | self.terminate() 137 | 138 | def join(self, timeout=None): 139 | ''' 140 | Wait until child process terminates 141 | ''' 142 | assert self._parent_pid == os.getpid(), 'can only join a child process' 143 | assert self._popen is not None, 'can only join a started process' 144 | res = self._popen.wait(timeout) 145 | if res is not None: 146 | _children.discard(self) 147 | self.close() 148 | 149 | def is_alive(self): 150 | ''' 151 | Return whether process is alive 152 | ''' 153 | if self is _current_process: 154 | return True 155 | assert self._parent_pid == os.getpid(), 'can only test a child process' 156 | if self._popen is None: 157 | return False 158 | self._popen.poll() 159 | return self._popen.returncode is None 160 | 161 | def _is_alive(self): 162 | if self._popen is None: 163 | return False 164 | return self._popen.poll() is None 165 | 166 | @property 167 | def name(self): 168 | return self._name 169 | 170 | @name.setter 171 | def name(self, name): # noqa 172 | assert isinstance(name, str), 'name must be a string' 173 | self._name = name 174 | 175 | @property 176 | def daemon(self): 177 | ''' 178 | Return whether process is a daemon 179 | ''' 180 | return self._config.get('daemon', False) 181 | 182 | @daemon.setter # noqa 183 | def daemon(self, daemonic): 184 | ''' 185 | Set whether process is a daemon 186 | ''' 187 | assert self._popen is None, 'process has already started' 188 | self._config['daemon'] = daemonic 189 | 190 | @property 191 | def authkey(self): 192 | return self._config['authkey'] 193 | 194 | @authkey.setter # noqa 195 | def authkey(self, authkey): 196 | ''' 197 | Set authorization key of process 198 | ''' 199 | self._config['authkey'] = AuthenticationString(authkey) 200 | 201 | @property 202 | def exitcode(self): 203 | ''' 204 | Return exit code of process or `None` if it has yet to stop 205 | ''' 206 | if self._popen is None: 207 | return self._popen 208 | return self._popen.poll() 209 | 210 | @property 211 | def ident(self): 212 | ''' 213 | Return identifier (PID) of process or `None` if it has yet to start 214 | ''' 215 | if self is _current_process: 216 | return os.getpid() 217 | else: 218 | return self._popen and self._popen.pid 219 | 220 | pid = ident 221 | 222 | @property 223 | def sentinel(self): 224 | ''' 225 | Return a file descriptor (Unix) or handle (Windows) suitable for 226 | waiting for process termination. 227 | ''' 228 | try: 229 | return self._sentinel 230 | except AttributeError: 231 | raise ValueError("process not started") 232 | 233 | @property 234 | def _counter(self): 235 | # compat for 2.7 236 | return _process_counter 237 | 238 | @property 239 | def _children(self): 240 | # compat for 2.7 241 | return _children 242 | 243 | @property 244 | def _authkey(self): 245 | # compat for 2.7 246 | return self.authkey 247 | 248 | @property 249 | def _daemonic(self): 250 | # compat for 2.7 251 | return self.daemon 252 | 253 | @property 254 | def _tempdir(self): 255 | # compat for 2.7 256 | return self._config.get('tempdir') 257 | 258 | def __repr__(self): 259 | if self is _current_process: 260 | status = 'started' 261 | elif self._parent_pid != os.getpid(): 262 | status = 'unknown' 263 | elif self._popen is None: 264 | status = 'initial' 265 | else: 266 | if self._popen.poll() is not None: 267 | status = self.exitcode 268 | else: 269 | status = 'started' 270 | 271 | if type(status) is int: 272 | if status == 0: 273 | status = 'stopped' 274 | else: 275 | status = 'stopped[%s]' % _exitcode_to_name.get(status, status) 276 | 277 | return '<%s(%s, %s%s)>' % (type(self).__name__, self._name, 278 | status, self.daemon and ' daemon' or '') 279 | 280 | ## 281 | 282 | def _bootstrap(self): 283 | from . import util, context 284 | global _current_process, _process_counter, _children 285 | 286 | try: 287 | if self._start_method is not None: 288 | context._force_start_method(self._start_method) 289 | _process_counter = itertools.count(1) 290 | _children = set() 291 | if sys.stdin is not None: 292 | try: 293 | sys.stdin.close() 294 | sys.stdin = open(os.devnull) 295 | except (EnvironmentError, OSError, ValueError): 296 | pass 297 | old_process = _current_process 298 | _set_current_process(self) 299 | 300 | # Re-init logging system. 301 | # Workaround for https://bugs.python.org/issue6721/#msg140215 302 | # Python logging module uses RLock() objects which are broken 303 | # after fork. This can result in a deadlock (Celery Issue #496). 304 | loggerDict = logging.Logger.manager.loggerDict 305 | logger_names = list(loggerDict.keys()) 306 | logger_names.append(None) # for root logger 307 | for name in logger_names: 308 | if not name or not isinstance(loggerDict[name], 309 | logging.PlaceHolder): 310 | for handler in logging.getLogger(name).handlers: 311 | handler.createLock() 312 | logging._lock = threading.RLock() 313 | 314 | try: 315 | util._finalizer_registry.clear() 316 | util._run_after_forkers() 317 | finally: 318 | # delay finalization of the old process object until after 319 | # _run_after_forkers() is executed 320 | del old_process 321 | util.info('child process %s calling self.run()', self.pid) 322 | try: 323 | self.run() 324 | exitcode = 0 325 | finally: 326 | util._exit_function() 327 | except SystemExit as exc: 328 | if not exc.args: 329 | exitcode = 1 330 | elif isinstance(exc.args[0], int): 331 | exitcode = exc.args[0] 332 | else: 333 | sys.stderr.write(str(exc.args[0]) + '\n') 334 | _maybe_flush(sys.stderr) 335 | exitcode = 0 if isinstance(exc.args[0], str) else 1 336 | except: 337 | exitcode = 1 338 | if not util.error('Process %s', self.name, exc_info=True): 339 | import traceback 340 | sys.stderr.write('Process %s:\n' % self.name) 341 | traceback.print_exc() 342 | finally: 343 | util.info('process %s exiting with exitcode %d', 344 | self.pid, exitcode) 345 | _maybe_flush(sys.stdout) 346 | _maybe_flush(sys.stderr) 347 | 348 | return exitcode 349 | 350 | # 351 | # We subclass bytes to avoid accidental transmission of auth keys over network 352 | # 353 | 354 | 355 | class AuthenticationString(bytes): 356 | 357 | def __reduce__(self): 358 | from .context import get_spawning_popen 359 | 360 | if get_spawning_popen() is None: 361 | raise TypeError( 362 | 'Pickling an AuthenticationString object is ' 363 | 'disallowed for security reasons') 364 | return AuthenticationString, (bytes(self),) 365 | 366 | # 367 | # Create object representing the main process 368 | # 369 | 370 | 371 | class _MainProcess(BaseProcess): 372 | 373 | def __init__(self): 374 | self._identity = () 375 | self._name = 'MainProcess' 376 | self._parent_pid = None 377 | self._popen = None 378 | self._config = {'authkey': AuthenticationString(os.urandom(32)), 379 | 'semprefix': '/mp'} 380 | 381 | _current_process = _MainProcess() 382 | _process_counter = itertools.count(1) 383 | _children = set() 384 | del _MainProcess 385 | 386 | 387 | Process = BaseProcess 388 | 389 | # 390 | # Give names to some return codes 391 | # 392 | 393 | _exitcode_to_name = {} 394 | 395 | for name, signum in signal.__dict__.items(): 396 | if name[:3] == 'SIG' and '_' not in name: 397 | _exitcode_to_name[-signum] = name 398 | 399 | # For debug and leak testing 400 | _dangling = WeakSet() 401 | --------------------------------------------------------------------------------