├── .gitattributes ├── .github ├── chronographer.yml ├── dependabot.yaml ├── patchback.yml └── workflows │ ├── deploy.yml │ └── test.yml ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── CHANGELOG.rst ├── LICENSE ├── README.rst ├── RELEASING.rst ├── Vagrantfile ├── doc ├── Makefile ├── __init__.py ├── _static │ ├── basic1.png │ ├── codespeak.png │ ├── execnet.png │ └── pythonring.png ├── _templates │ ├── indexsidebar.html │ └── layout.html ├── basics.rst ├── changelog.rst ├── conf.py ├── example │ ├── conftest.py │ ├── funcmultiplier.py │ ├── hybridpython.rst │ ├── popen_read_multiple.py │ ├── py3topy2.py │ ├── redirect_remote_output.py │ ├── remote1.py │ ├── remotecmd.py │ ├── servefiles.py │ ├── svn-sync-repo.py │ ├── sysinfo.py │ ├── taskserver.py │ ├── test_debug.rst │ ├── test_funcmultiplier.py │ ├── test_group.rst │ ├── test_info.rst │ ├── test_multi.rst │ ├── test_proxy.rst │ └── test_ssh_fileserver.rst ├── examples.rst ├── implnotes.rst ├── index.rst ├── install.rst └── support.rst ├── pyproject.toml ├── src └── execnet │ ├── __init__.py │ ├── gateway.py │ ├── gateway_base.py │ ├── gateway_bootstrap.py │ ├── gateway_io.py │ ├── gateway_socket.py │ ├── multi.py │ ├── py.typed │ ├── rsync.py │ ├── rsync_remote.py │ ├── script │ ├── __init__.py │ ├── loop_socketserver.py │ ├── quitserver.py │ ├── shell.py │ ├── socketserver.py │ └── socketserverservice.py │ └── xspec.py ├── testing ├── conftest.py ├── test_basics.py ├── test_channel.py ├── test_compatibility_regressions.py ├── test_gateway.py ├── test_multi.py ├── test_rsync.py ├── test_serializer.py ├── test_termination.py ├── test_threadpool.py └── test_xspec.py └── tox.ini /.gitattributes: -------------------------------------------------------------------------------- 1 | # restructured text files forced to LF because of doc8 pre-commit hook. 2 | *.rst eol=lf 3 | -------------------------------------------------------------------------------- /.github/chronographer.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | branch-protection-check-name: Changelog entry 4 | action-hints: 5 | check-title-prefix: "Chronographer: " 6 | external-docs-url: >- 7 | https://docs.pytest.org/en/latest/contributing.html#preparing-pull-requests 8 | inline-markdown: >- 9 | See 10 | https://docs.pytest.org/en/latest/contributing.html#preparing-pull-requests 11 | for details. 12 | enforce-name: 13 | suffix: .rst 14 | exclude: 15 | humans: 16 | - pyup-bot 17 | labels: 18 | skip-changelog: skip news 19 | 20 | ... 21 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /.github/patchback.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | backport_branch_prefix: patchback/backports/ 4 | backport_label_prefix: 'backport ' # IMPORTANT: the labels are space-delimited 5 | # target_branch_prefix: '' # The project's backport branches are non-prefixed 6 | 7 | ... 8 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: deploy 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | version: 7 | description: 'Release version' 8 | required: true 9 | default: '1.2.3' 10 | 11 | jobs: 12 | 13 | package: 14 | runs-on: ubuntu-latest 15 | env: 16 | SETUPTOOLS_SCM_PRETEND_VERSION: ${{ github.event.inputs.version }} 17 | 18 | steps: 19 | - uses: actions/checkout@v4 20 | 21 | - name: Build and Check Package 22 | uses: hynek/build-and-inspect-python-package@v2.12 23 | 24 | deploy: 25 | needs: package 26 | runs-on: ubuntu-latest 27 | environment: deploy 28 | permissions: 29 | id-token: write # For PyPI trusted publishers. 30 | contents: write # For tag. 31 | 32 | steps: 33 | - uses: actions/checkout@v4 34 | 35 | - name: Download Package 36 | uses: actions/download-artifact@v4 37 | with: 38 | name: Packages 39 | path: dist 40 | 41 | - name: Publish package to PyPI 42 | uses: pypa/gh-action-pypi-publish@v1.12.4 43 | 44 | - name: Push tag 45 | run: | 46 | git config user.name "pytest bot" 47 | git config user.email "pytestbot@gmail.com" 48 | git tag --annotate --message=v${{ github.event.inputs.version }} v${{ github.event.inputs.version }} ${{ github.sha }} 49 | git push origin v${{ github.event.inputs.version }} 50 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | 3 | on: 4 | push: 5 | branches: 6 | - "master" 7 | - "test-me-*" 8 | 9 | pull_request: 10 | branches: 11 | - "master" 12 | 13 | # Cancel running jobs for the same workflow and branch. 14 | concurrency: 15 | group: ${{ github.workflow }}-${{ github.ref }} 16 | cancel-in-progress: true 17 | 18 | jobs: 19 | 20 | package: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v4 24 | - name: Build and Check Package 25 | uses: hynek/build-and-inspect-python-package@v2.12 26 | 27 | test: 28 | 29 | needs: [package] 30 | 31 | runs-on: ${{ matrix.os }} 32 | 33 | strategy: 34 | fail-fast: false 35 | matrix: 36 | os: [ windows-latest, ubuntu-latest ] 37 | python: [ "3.8","3.10","3.11","3.12", "pypy-3.8" ] 38 | 39 | steps: 40 | - uses: actions/checkout@v4 41 | with: 42 | fetch-depth: 0 43 | 44 | - name: Download Package 45 | uses: actions/download-artifact@v4 46 | with: 47 | name: Packages 48 | path: dist 49 | 50 | - name: Set up Python 51 | uses: actions/setup-python@v5 52 | with: 53 | python-version: ${{ matrix.python }} 54 | 55 | - name: Install tox 56 | run: pip install tox 57 | 58 | - name: Test 59 | shell: bash 60 | run: | 61 | tox run -e py --installpkg `find dist/*.tar.gz` 62 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | doc/_build 2 | build/ 3 | src/execnet/_version.py 4 | dist/ 5 | .pytest_cache/ 6 | .eggs/ 7 | *.pyc 8 | *$py.class 9 | *.orig 10 | *~ 11 | *.swp 12 | .tox 13 | lib/ 14 | bin/ 15 | include/ 16 | .Python 17 | .env/ 18 | .cache/ 19 | .vagrant/ 20 | .vagrant.d/ 21 | .config/ 22 | .local/ 23 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/codespell-project/codespell 3 | rev: v2.4.1 4 | hooks: 5 | - id: codespell 6 | - repo: https://github.com/asottile/blacken-docs 7 | rev: 1.19.1 8 | hooks: 9 | - id: blacken-docs 10 | additional_dependencies: [black==22.12.0] 11 | - repo: https://github.com/pre-commit/pre-commit-hooks 12 | rev: v5.0.0 13 | hooks: 14 | - id: check-yaml 15 | - repo: https://github.com/astral-sh/ruff-pre-commit 16 | rev: v0.11.12 17 | hooks: 18 | - id: ruff 19 | args: [ --fix ] 20 | exclude: "^doc/" 21 | - id: ruff-format 22 | - repo: https://github.com/PyCQA/doc8 23 | rev: 'v1.1.2' 24 | hooks: 25 | - id: doc8 26 | args: ["--ignore", "D001"] 27 | 28 | - repo: https://github.com/pre-commit/mirrors-mypy 29 | rev: 'v1.16.0' 30 | hooks: 31 | - id: mypy 32 | additional_dependencies: 33 | - pytest 34 | - types-pywin32 35 | - types-gevent 36 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3" 7 | 8 | python: 9 | install: 10 | - method: pip 11 | path: . 12 | 13 | sphinx: 14 | configuration: doc/conf.py 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Permission is hereby granted, free of charge, to any person obtaining a copy 3 | of this software and associated documentation files (the "Software"), to deal 4 | in the Software without restriction, including without limitation the rights 5 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 6 | copies of the Software, and to permit persons to whom the Software is 7 | furnished to do so, subject to the following conditions: 8 | 9 | The above copyright notice and this permission notice shall be included in all 10 | copies or substantial portions of the Software. 11 | 12 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 13 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 15 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 16 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 17 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 18 | SOFTWARE. 19 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | execnet: distributed Python deployment and communication 2 | ======================================================== 3 | 4 | .. image:: https://img.shields.io/pypi/v/execnet.svg 5 | :target: https://pypi.org/project/execnet/ 6 | 7 | .. image:: https://anaconda.org/conda-forge/execnet/badges/version.svg 8 | :target: https://anaconda.org/conda-forge/execnet 9 | 10 | .. image:: https://img.shields.io/pypi/pyversions/execnet.svg 11 | :target: https://pypi.org/project/execnet/ 12 | 13 | .. image:: https://github.com/pytest-dev/execnet/workflows/test/badge.svg 14 | :target: https://github.com/pytest-dev/execnet/actions?query=workflow%3Atest 15 | 16 | .. image:: https://img.shields.io/badge/code%20style-black-000000.svg 17 | :target: https://github.com/python/black 18 | 19 | .. _execnet: https://execnet.readthedocs.io 20 | 21 | execnet_ provides carefully tested means to ad-hoc interact with Python 22 | interpreters across version, platform and network barriers. It provides 23 | a minimal and fast API targeting the following uses: 24 | 25 | * distribute tasks to local or remote processes 26 | * write and deploy hybrid multi-process applications 27 | * write scripts to administer multiple hosts 28 | 29 | Features 30 | -------- 31 | 32 | * zero-install bootstrapping: no remote installation required! 33 | 34 | * flexible communication: send/receive as well as 35 | callback/queue mechanisms supported 36 | 37 | * simple serialization of python builtin types (no pickling) 38 | 39 | * grouped creation and robust termination of processes 40 | 41 | * interoperable between Windows and Unix-ish systems. 42 | 43 | * integrates with different threading models, including standard 44 | os threads, eventlet and gevent based systems. 45 | -------------------------------------------------------------------------------- /RELEASING.rst: -------------------------------------------------------------------------------- 1 | ================= 2 | Releasing execnet 3 | ================= 4 | 5 | This document describes the steps to make a new ``execnet`` release. 6 | 7 | Version 8 | ------- 9 | 10 | ``master`` should always be green and a potential release candidate. ``execnet`` follows 11 | semantic versioning, so given that the current version is ``X.Y.Z``, to find the next version number 12 | one needs to look at the ``CHANGELOG.rst`` file: 13 | 14 | - If there any new feature, then we must make a new **minor** release: next 15 | release will be ``X.Y+1.0``. 16 | 17 | - Otherwise it is just a **bug fix** release: ``X.Y.Z+1``. 18 | 19 | 20 | Steps 21 | ----- 22 | 23 | To publish a new release ``X.Y.Z``, the steps are as follows: 24 | 25 | #. Create a new branch named ``release-X.Y.Z`` from the latest ``master``. 26 | 27 | #. Update the ``CHANGELOG.rst`` file with the new release information. 28 | 29 | #. Commit and push the branch to ``upstream`` and open a PR. 30 | 31 | #. Once the PR is **green** and **approved**, start the ``deploy`` workflow manually from the branch ``release-VERSION``, passing ``VERSION`` as parameter. 32 | 33 | #. Merge the release PR to ``master``. 34 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure("2") do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://vagrantcloud.com/search. 15 | config.vm.box = "generic/debian11" 16 | 17 | # Disable automatic box update checking. If you disable this, then 18 | # boxes will only be checked for updates when the user runs 19 | # `vagrant box outdated`. This is not recommended. 20 | # config.vm.box_check_update = false 21 | 22 | # Create a forwarded port mapping which allows access to a specific port 23 | # within the machine from a port on the host machine. In the example below, 24 | # accessing "localhost:8080" will access port 80 on the guest machine. 25 | # NOTE: This will enable public access to the opened port 26 | # config.vm.network "forwarded_port", guest: 80, host: 8080 27 | 28 | # Create a forwarded port mapping which allows access to a specific port 29 | # within the machine from a port on the host machine and only allow access 30 | # via 127.0.0.1 to disable public access 31 | # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" 32 | 33 | # Create a private network, which allows host-only access to the machine 34 | # using a specific IP. 35 | # config.vm.network "private_network", ip: "192.168.33.10" 36 | 37 | # Create a public network, which generally matched to bridged network. 38 | # Bridged networks make the machine appear as another physical device on 39 | # your network. 40 | # config.vm.network "public_network" 41 | 42 | # Share an additional folder to the guest VM. The first argument is 43 | # the path on the host to the actual folder. The second argument is 44 | # the path on the guest to mount the folder. And the optional third 45 | # argument is a set of non-required options. 46 | # config.vm.synced_folder "../data", "/vagrant_data" 47 | 48 | # Provider-specific configuration so you can fine-tune various 49 | # backing providers for Vagrant. These expose provider-specific options. 50 | # Example for VirtualBox: 51 | # 52 | # config.vm.provider "virtualbox" do |vb| 53 | # # Display the VirtualBox GUI when booting the machine 54 | # vb.gui = true 55 | # 56 | # # Customize the amount of memory on the VM: 57 | # vb.memory = "1024" 58 | # end 59 | # 60 | # View the documentation for the provider you are using for more 61 | # information on available options. 62 | 63 | # Enable provisioning with a shell script. Additional provisioners such as 64 | # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the 65 | # documentation for more information about their specific syntax and use. 66 | # config.vm.provision "shell", inline: <<-SHELL 67 | # apt-get update 68 | # apt-get install -y apache2 69 | # SHELL 70 | end 71 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " pickle to make pickle files" 22 | @echo " json to make JSON files" 23 | @echo " htmlhelp to make HTML files and a HTML help project" 24 | @echo " qthelp to make HTML files and a qthelp project" 25 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 26 | @echo " changes to make an overview of all changed/added/deprecated items" 27 | @echo " linkcheck to check all external links for integrity" 28 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 29 | 30 | clean: 31 | -rm -rf $(BUILDDIR)/* 32 | 33 | install: clean html 34 | rsync -avz $(BUILDDIR)/html/ code:www-execnet/ 35 | 36 | html: 37 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 38 | @echo 39 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 40 | 41 | dirhtml: 42 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 43 | @echo 44 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 45 | 46 | pickle: 47 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 48 | @echo 49 | @echo "Build finished; now you can process the pickle files." 50 | 51 | json: 52 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 53 | @echo 54 | @echo "Build finished; now you can process the JSON files." 55 | 56 | htmlhelp: 57 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 58 | @echo 59 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 60 | ".hhp project file in $(BUILDDIR)/htmlhelp." 61 | 62 | qthelp: 63 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 64 | @echo 65 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 66 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 67 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/execnet.qhcp" 68 | @echo "To view the help file:" 69 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/execnet.qhc" 70 | 71 | latex: 72 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 73 | @echo 74 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 75 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 76 | "run these through (pdf)latex." 77 | 78 | changes: 79 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 80 | @echo 81 | @echo "The overview file is in $(BUILDDIR)/changes." 82 | 83 | linkcheck: 84 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 85 | @echo 86 | @echo "Link check complete; look for any errors in the above output " \ 87 | "or in $(BUILDDIR)/linkcheck/output.txt." 88 | 89 | doctest: 90 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 91 | @echo "Testing of doctests in the sources finished, look at the " \ 92 | "results in $(BUILDDIR)/doctest/output.txt." 93 | -------------------------------------------------------------------------------- /doc/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | -------------------------------------------------------------------------------- /doc/_static/basic1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pytest-dev/execnet/e0d703541453d4d7a623e749184b73440f63d525/doc/_static/basic1.png -------------------------------------------------------------------------------- /doc/_static/codespeak.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pytest-dev/execnet/e0d703541453d4d7a623e749184b73440f63d525/doc/_static/codespeak.png -------------------------------------------------------------------------------- /doc/_static/execnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pytest-dev/execnet/e0d703541453d4d7a623e749184b73440f63d525/doc/_static/execnet.png -------------------------------------------------------------------------------- /doc/_static/pythonring.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pytest-dev/execnet/e0d703541453d4d7a623e749184b73440f63d525/doc/_static/pythonring.png -------------------------------------------------------------------------------- /doc/_templates/indexsidebar.html: -------------------------------------------------------------------------------- 1 |

Download

2 | {% if version.endswith('(hg)') %} 3 |

This documentation is for version {{ version }}, which is 4 | not released yet.

5 |

You can use it from the 6 | Git repo or look for 7 | released versions in the Python 8 | Package Index.

9 | {% else %} 10 |

Current: {{ version }} 11 | [Changes]

12 |

Get execnet from the Python Package 13 | Index, or install it with:

14 |
pip install -U execnet
15 | {% endif %} 16 | 17 |

Questions? Suggestions?

18 | 19 |

Join 20 | execnet-dev mailing list

21 |

come to #pytest on Libera Chat

22 | -------------------------------------------------------------------------------- /doc/_templates/layout.html: -------------------------------------------------------------------------------- 1 | {% extends "!layout.html" %} 2 | 3 | {% block rootrellink %} 4 | 5 | {% endblock %} 6 | 7 | {% block header %} 8 |
9 | 11 |

execnet: Distributed Python deployment and communication

12 | 13 | home |  14 | install |  15 | examples |  16 | basic API |  17 | support  18 | 19 |
20 | {% endblock %} 21 | -------------------------------------------------------------------------------- /doc/basics.rst: -------------------------------------------------------------------------------- 1 | ============================================================================== 2 | API in a nutshell 3 | ============================================================================== 4 | 5 | execnet ad-hoc instantiates local and remote Python interpreters. 6 | Each interpreter is accessible through a **Gateway** which manages 7 | code and data communication. **Channels** allow to exchange 8 | data between the local and the remote end. **Groups** 9 | help to manage creation and termination of sub-interpreters. 10 | 11 | .. image:: _static/basic1.png 12 | 13 | .. currentmodule:: execnet 14 | 15 | Gateways: bootstrapping Python interpreters 16 | =================================================== 17 | 18 | All Gateways are instantiated via a call to ``makegateway()`` 19 | passing it a gateway specification or URL. 20 | 21 | .. _xspec: 22 | 23 | .. autofunction:: execnet.makegateway(spec) 24 | 25 | Here is an example which instantiates a simple Python subprocess:: 26 | 27 | >>> gateway = execnet.makegateway() 28 | 29 | Gateways allow to `remote execute code`_ and 30 | `exchange data`_ bidirectionally. 31 | 32 | Examples for valid gateway specifications 33 | ------------------------------------------- 34 | 35 | * ``ssh=wyvern//python=python3.3//chdir=mycache`` specifies a Python3.3 36 | interpreter on the host ``wyvern``. The remote process will have 37 | ``mycache`` as its current working directory. 38 | 39 | * ``ssh=-p 5000 myhost`` makes execnet pass "-p 5000 myhost" arguments 40 | to the underlying ssh client binary, effectively specifying a custom port. 41 | 42 | * ``vagrant_ssh=default`` makes execnet connect to a Vagrant VM named 43 | ``default`` via SSH through Vagrant's ``vagrant ssh`` command. It supports 44 | the same additional parameters as regular SSH connections. 45 | 46 | * ``popen//python=python2.7//nice=20`` specification of 47 | a python subprocess using the ``python2.7`` executable which must be 48 | discoverable through the system ``PATH``; running with the lowest 49 | CPU priority ("nice" level). By default current dir will be the 50 | current dir of the instantiator. 51 | 52 | * ``popen//dont_write_bytecode`` uses the same executable as the current 53 | Python, and also passes the ``-B`` flag on startup, which tells Python not 54 | write ``.pyc`` or ``.pyo`` files. 55 | 56 | * ``popen//env:NAME=value`` specifies a subprocess that uses the 57 | same interpreter as the one it is initiated from and additionally 58 | remotely sets an environment variable ``NAME`` to ``value``. 59 | 60 | * ``popen//execmodel=eventlet`` specifies a subprocess that uses the 61 | same interpreter as the one it is initiated from but will run the 62 | other side using eventlet for handling IO and dispatching threads. 63 | 64 | * ``socket=192.168.1.4:8888`` specifies a Python Socket server 65 | process that listens on ``192.168.1.4:8888`` 66 | 67 | .. versionadded:: 1.5 68 | 69 | * ``vagarant_ssh`` opens a python interpreter via the vagarant ssh command 70 | 71 | 72 | .. _`remote execute code`: 73 | 74 | remote_exec: execute source code remotely 75 | =================================================== 76 | 77 | .. currentmodule:: execnet.gateway 78 | 79 | All gateways offer a simple method to execute source code 80 | in the instantiated subprocess-interpreter: 81 | 82 | .. automethod:: Gateway.remote_exec(source) 83 | 84 | It is allowed to pass a module object as source code 85 | in which case its source code will be obtained and 86 | get sent for remote execution. ``remote_exec`` returns 87 | a channel object whose symmetric counterpart channel 88 | is available to the remotely executing source. 89 | 90 | 91 | .. method:: Gateway.reconfigure([py2str_as_py3str=True, py3str_as_py2str=False]) 92 | 93 | Reconfigures the string-coercion behaviour of the gateway 94 | 95 | .. _`Channel`: 96 | .. _`channel-api`: 97 | 98 | .. _`exchange data`: 99 | 100 | Channels: exchanging data with remote code 101 | ======================================================= 102 | 103 | .. currentmodule:: execnet.gateway_base 104 | 105 | A channel object allows to send and receive data between 106 | two asynchronously running programs. 107 | 108 | .. automethod:: Channel.send(item) 109 | .. automethod:: Channel.receive(timeout) 110 | .. automethod:: Channel.setcallback(callback, endmarker=_NOENDMARKER) 111 | .. automethod:: Channel.makefile(mode, proxyclose=False) 112 | .. automethod:: Channel.close(error) 113 | .. automethod:: Channel.waitclose(timeout) 114 | .. autoattribute:: Channel.RemoteError 115 | .. autoattribute:: Channel.TimeoutError 116 | 117 | 118 | .. _Group: 119 | 120 | Grouped Gateways and robust termination 121 | =============================================== 122 | 123 | .. currentmodule:: execnet.multi 124 | 125 | All created gateway instances are part of a group. If you 126 | call ``execnet.makegateway`` it actually is forwarded to 127 | the ``execnet.default_group``. Group objects are container 128 | objects (see :doc:`group examples `) 129 | and manage the final termination procedure: 130 | 131 | .. automethod:: Group.terminate(timeout=None) 132 | 133 | This method is implicitly called for each gateway group at 134 | process-exit, using a small timeout. This is fine 135 | for interactive sessions or random scripts which 136 | you rather like to error out than hang. If you start many 137 | processes then you often want to call ``group.terminate()`` 138 | yourself and specify a larger or not timeout. 139 | 140 | 141 | threading models: gevent, eventlet, thread, main_thread_only 142 | ==================================================================== 143 | 144 | .. versionadded:: 1.2 (status: experimental!) 145 | 146 | execnet supports "main_thread_only", "thread", "eventlet" and "gevent" 147 | as thread models on each of the two sides. You need to decide which 148 | model to use before you create any gateways:: 149 | 150 | # content of threadmodel.py 151 | import execnet 152 | # locally use "eventlet", remotely use "thread" model 153 | execnet.set_execmodel("eventlet", "thread") 154 | gw = execnet.makegateway() 155 | print (gw) 156 | print (gw.remote_status()) 157 | print (gw.remote_exec("channel.send(1)").receive()) 158 | 159 | You need to have eventlet installed in your environment and then 160 | you can execute this little test file:: 161 | 162 | $ python threadmodel.py 163 | 164 | 165 | 1 166 | 167 | How to execute in the main thread 168 | ------------------------------------------------ 169 | 170 | When the remote side of a gateway uses the "thread" model, execution 171 | will preferably run in the main thread. This allows GUI loops 172 | or other code to behave correctly. If you, however, start multiple 173 | executions concurrently, they will run in non-main threads. 174 | 175 | 176 | remote_status: get low-level execution info 177 | =================================================== 178 | 179 | .. currentmodule:: execnet.gateway 180 | 181 | All gateways offer a simple method to obtain some status 182 | information from the remote side. 183 | 184 | .. automethod:: Gateway.remote_status(source) 185 | 186 | Calling this method tells you e.g. how many execution 187 | tasks are queued, how many are executing and how many 188 | channels are active. 189 | 190 | rsync: synchronise filesystem with remote 191 | =============================================================== 192 | 193 | .. currentmodule:: execnet 194 | 195 | 196 | ``execnet`` implements a simple efficient rsyncing protocol. 197 | Here is a basic example for using RSync:: 198 | 199 | rsync = execnet.RSync('/tmp/source') 200 | gw = execnet.makegateway() 201 | rsync.add_target(gw, '/tmp/dest') 202 | rsync.send() 203 | 204 | 205 | And here is API info about the RSync class. 206 | 207 | .. autoclass:: RSync 208 | :members: add_target,send 209 | 210 | Debugging execnet 211 | =============================================================== 212 | 213 | By setting the environment variable ``EXECNET_DEBUG`` you can 214 | configure a tracing mechanism: 215 | 216 | :EXECNET_DEBUG=1: write per-process trace-files to ``execnet-debug-PID`` 217 | :EXECNET_DEBUG=2: perform tracing to stderr (popen-gateway workers will send this to their instantiator) 218 | 219 | 220 | .. _`dumps/loads`: 221 | .. _`dumps/loads API`: 222 | 223 | Cross-interpreter serialization of Python objects 224 | ======================================================= 225 | 226 | .. versionadded:: 1.1 227 | 228 | Execnet exposes a function pair which you can safely use to 229 | store and load values from different Python interpreters 230 | (e.g. Python2 and Python3, PyPy and Jython). Here is 231 | a basic example:: 232 | 233 | >>> import execnet 234 | >>> dump = execnet.dumps([1,2,3]) 235 | >>> execnet.loads(dump) 236 | [1,2,3] 237 | 238 | For more examples see :ref:`dumps/loads examples`. 239 | 240 | .. autofunction:: execnet.dumps(spec) 241 | .. autofunction:: execnet.loads(spec) 242 | -------------------------------------------------------------------------------- /doc/changelog.rst: -------------------------------------------------------------------------------- 1 | :tocdepth: 2 2 | 3 | .. _changes: 4 | 5 | execnet CHANGELOG 6 | ******************** 7 | 8 | .. include:: ../CHANGELOG.rst 9 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # 2 | # execnet documentation build configuration file, created by 3 | # sphinx-quickstart on Wed Sep 30 21:16:59 2009. 4 | # 5 | # This file is execfile()d with the wd set to its containing dir. 6 | # 7 | # Note that not all possible configuration values are present in this 8 | # autogenerated file. 9 | # 10 | # All configuration values have a default; values that are commented out 11 | # serve to show the default. 12 | import os 13 | import sys 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | sys.path.append(os.path.dirname(os.path.dirname(__file__))) 19 | 20 | from execnet._version import version 21 | 22 | release = ".".join(version.split(".")[:2]) 23 | 24 | # -- General configuration ---------------------------------------------------- 25 | 26 | # Add any Sphinx extension module names here, as strings. 27 | # They can be extensions 28 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 29 | extensions = [ 30 | "sphinx.ext.autodoc", 31 | "sphinx.ext.doctest", 32 | "sphinx.ext.intersphinx", 33 | ] 34 | 35 | # Add any paths that contain templates here, relative to this directory. 36 | templates_path = ["_templates"] 37 | 38 | # The suffix of source filenames. 39 | source_suffix = ".rst" 40 | 41 | # The encoding of source files. 42 | # source_encoding = 'utf-8' 43 | 44 | # The master toctree document. 45 | master_doc = "index" 46 | 47 | # General information about the project. 48 | project = "execnet" 49 | copyright = "2012, holger krekel and others" 50 | 51 | # The language for content autogenerated by Sphinx. Refer to documentation 52 | # for a list of supported languages. 53 | # language = None 54 | 55 | # There are two options for replacing |today|: either, you set today to some 56 | # non-false value, then it is used: 57 | # today = '' 58 | # Else, today_fmt is used as the format for a strftime call. 59 | # today_fmt = '%B %d, %Y' 60 | 61 | # List of documents that shouldn't be included in the build. 62 | # unused_docs = [] 63 | 64 | # List of directories, relative to source directory, that shouldn't be searched 65 | # for source files. 66 | exclude_trees = ["_build"] 67 | 68 | # The reST default role (used for this markup: `text`) to use for all documents 69 | # dfault_role = None 70 | 71 | # If true, '()' will be appended to :func: etc. cross-reference text. 72 | # add_function_parentheses = True 73 | 74 | # If true, the current module name will be prepended to all description 75 | # unit titles (such as .. function::). 76 | add_module_names = True 77 | 78 | linkcheck_timeout = 20 79 | 80 | # If true, sectionauthor and moduleauthor directives will be shown in the 81 | # output. They are ignored by default. 82 | # show_authors = False 83 | 84 | # The name of the Pygments (syntax highlighting) style to use. 85 | pygments_style = "sphinx" 86 | 87 | # A list of ignored prefixes for module index sorting. 88 | # modindex_common_prefix = [] 89 | 90 | intersphinx_mapping = { 91 | "python": ("https://docs.python.org/3", None), 92 | } 93 | 94 | nitpicky = True 95 | nitpick_ignore = [ 96 | ("py:class", "execnet.gateway_base.ChannelFileRead"), 97 | ("py:class", "execnet.gateway_base.ChannelFileWrite"), 98 | ("py:class", "execnet.gateway.Gateway"), 99 | ] 100 | 101 | # -- Options for HTML output -------------------------------------------------- 102 | 103 | # The theme to use for HTML and HTML Help pages. Major themes that come with 104 | # Sphinx are currently 'default' and 'sphinxdoc'. 105 | html_theme = "sphinxdoc" 106 | 107 | # html_index = 'index.html' 108 | html_sidebars = {"index": ["indexsidebar.html"]} 109 | # html_additional_pages = {'index': 'index.html'} 110 | 111 | # Theme options are theme-specific and customize the look and feel of a theme 112 | # further. For a list of options available for each theme, see the 113 | # documentation. 114 | # html_theme_options = {} 115 | 116 | # Add any paths that contain custom themes here, relative to this directory. 117 | # html_theme_path = [] 118 | 119 | # The name for this set of Sphinx documents. If None, it defaults to 120 | # " v documentation". 121 | # html_title = None 122 | 123 | # A shorter title for the navigation bar. Default is the same as html_title. 124 | # html_short_title = None 125 | 126 | # The name of an image file (relative to this directory) to place at the top 127 | # of the sidebar. 128 | # html_logo = "codespeak.png" 129 | 130 | # The name of an image file (within the static path) to use as favicon of the 131 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 132 | # pixels large. 133 | # html_favicon = None 134 | 135 | # Add any paths that contain custom static files (such as style sheets) here, 136 | # relative to this directory. They are copied after the builtin static files, 137 | # so a file named "default.css" will overwrite the builtin "default.css". 138 | html_static_path = ["_static"] 139 | 140 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 141 | # using the given strftime format. 142 | # html_last_updated_fmt = '%b %d, %Y' 143 | 144 | # If true, SmartyPants will be used to convert quotes and dashes to 145 | # typographically correct entities. 146 | # html_use_smartypants = True 147 | 148 | # If false, no module index is generated. 149 | html_use_modindex = False 150 | 151 | # If false, no index is generated. 152 | # html_use_index = True 153 | 154 | # If true, the index is split into individual pages for each letter. 155 | # html_split_index = False 156 | 157 | # If true, links to the reST sources are added to the pages. 158 | html_show_sourcelink = False 159 | 160 | # If true, an OpenSearch description file will be output, and all pages will 161 | # contain a tag referring to it. The value of this option must be the 162 | # base URL from which the finished HTML is served. 163 | # html_use_opensearch = '' 164 | 165 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 166 | # html_file_suffix = '' 167 | 168 | # Output file base name for HTML help builder. 169 | htmlhelp_basename = "execnetdoc" 170 | 171 | 172 | # -- Options for LaTeX output ------------------------------------------------- 173 | 174 | # The paper size ('letter' or 'a4'). 175 | # latex_paper_size = 'letter' 176 | 177 | # The font size ('10pt', '11pt' or '12pt'). 178 | # latex_font_size = '10pt' 179 | 180 | # Grouping the document tree into LaTeX files. List of tuples 181 | # (source start file, target name, title, author, documentclass [howto/manual]) 182 | latex_documents = [ 183 | ( 184 | "index", 185 | "execnet.tex", 186 | "execnet Documentation", 187 | "holger krekel and others", 188 | "manual", 189 | ) 190 | ] 191 | 192 | # The name of an image file (relative to this directory) to place at the top of 193 | # the title page. 194 | # latex_logo = None 195 | 196 | # For "manual" documents, if this is true, then toplevel headings are parts, 197 | # not chapters. 198 | # latex_use_parts = False 199 | 200 | # Additional stuff for the LaTeX preamble. 201 | # latex_preamble = '' 202 | 203 | # Documents to append as an appendix to all manuals. 204 | # latex_appendices = [] 205 | 206 | # If false, no module index is generated. 207 | # latex_use_modindex = True 208 | -------------------------------------------------------------------------------- /doc/example/conftest.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | 4 | 5 | # Make execnet and example code importable. 6 | cand = pathlib.Path(__file__).parent.parent.parent 7 | if cand.joinpath("execnet", "__init__.py").exists(): 8 | if str(cand) not in sys.path: 9 | sys.path.insert(0, str(cand)) 10 | cand = pathlib.Path(__file__).parent 11 | if str(cand) not in sys.path: 12 | sys.path.insert(0, str(cand)) 13 | 14 | pytest_plugins = ["doctest"] 15 | -------------------------------------------------------------------------------- /doc/example/funcmultiplier.py: -------------------------------------------------------------------------------- 1 | import execnet 2 | 3 | 4 | def multiplier(channel, factor): 5 | while not channel.isclosed(): 6 | param = channel.receive() 7 | channel.send(param * factor) 8 | 9 | 10 | gw = execnet.makegateway() 11 | channel = gw.remote_exec(multiplier, factor=10) 12 | 13 | for i in range(5): 14 | channel.send(i) 15 | result = channel.receive() 16 | assert result == i * 10 17 | 18 | gw.exit() 19 | -------------------------------------------------------------------------------- /doc/example/hybridpython.rst: -------------------------------------------------------------------------------- 1 | Connecting different Python interpreters 2 | ========================================== 3 | 4 | .. _`dumps/loads examples`: 5 | 6 | Dumping and loading values across interpreter versions 7 | ---------------------------------------------------------- 8 | 9 | .. versionadded:: 1.1 10 | 11 | Execnet offers a new safe and fast :ref:`dumps/loads API` which you 12 | can use to dump builtin python data structures and load them 13 | later with the same or a different python interpreter (including 14 | between Python2 and Python3). The standard library offers 15 | the pickle and marshal modules but they do not work safely 16 | between different interpreter versions. Using xml/json 17 | requires a mapping of Python objects and is not easy to 18 | get right. Moreover, execnet allows to control handling 19 | of bytecode/strings/unicode types. Here is an example:: 20 | 21 | # using python2 22 | import execnet 23 | with open("data.py23", "wb") as f: 24 | f.write(execnet.dumps(["hello", "world"])) 25 | 26 | # using Python3 27 | import execnet 28 | with open("data.py23", "rb") as f: 29 | val = execnet.loads(f.read(), py2str_as_py3str=True) 30 | assert val == ["hello", "world"] 31 | 32 | See the :ref:`dumps/loads API` for more details on string 33 | conversion options. Please note, that you can not dump 34 | user-level instances, only builtin python types. 35 | 36 | Connect to Python2/Numpy from Python3 37 | ---------------------------------------- 38 | 39 | Here we run a Python3 interpreter to connect to a Python2.7 interpreter 40 | that has numpy installed. We send items to be added to an array and 41 | receive back the remote "repr" of the array:: 42 | 43 | import execnet 44 | gw = execnet.makegateway("popen//python=python2.7") 45 | channel = gw.remote_exec(""" 46 | import numpy 47 | array = numpy.array([1,2,3]) 48 | while 1: 49 | x = channel.receive() 50 | if x is None: 51 | break 52 | array = numpy.append(array, x) 53 | channel.send(repr(array)) 54 | """) 55 | for x in range(10): 56 | channel.send(x) 57 | channel.send(None) 58 | print (channel.receive()) 59 | 60 | will print on the CPython3.1 side:: 61 | 62 | array([1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 63 | 64 | A more refined real-life example of python3/python2 interaction 65 | is the anyvc_ project which uses version-control bindings in 66 | a Python2 subprocess in order to offer Python3-based library 67 | functionality. 68 | 69 | .. _anyvc: http://bitbucket.org/RonnyPfannschmidt/anyvc/overview/ 70 | 71 | 72 | Reconfiguring the string coercion between python2 and python3 73 | ------------------------------------------------------------- 74 | 75 | Sometimes the default configuration of string coercion (2str to 3str, 3str to 2unicode) 76 | is inconvient, thus it can be reconfigured via `gw.reconfigure` and `channel.reconfigure`. Here is an example session on a Python2 interpreter:: 77 | 78 | 79 | >>> import execnet 80 | >>> execnet.makegateway("popen//python=python3.2") 81 | 82 | >>> gw=execnet.makegateway("popen//python=python3.2") 83 | >>> gw.remote_exec("channel.send('hello')").receive() 84 | u'hello' 85 | >>> gw.reconfigure(py3str_as_py2str=True) 86 | >>> gw.remote_exec("channel.send('hello')").receive() 87 | 'hello' 88 | >>> ch = gw.remote_exec('channel.send(type(channel.receive()).__name__)') 89 | >>> ch.send('a') 90 | >>> ch.receive() 91 | 'str' 92 | >>> ch = gw.remote_exec('channel.send(type(channel.receive()).__name__)') 93 | >>> ch.reconfigure(py2str_as_py3str=False) 94 | >>> ch.send('a') 95 | >>> ch.receive() 96 | u'bytes' 97 | 98 | 99 | Work with Java objects from CPython 100 | ---------------------------------------- 101 | 102 | Use your CPython interpreter to connect to a `Jython 2.5.1`_ interpreter 103 | and work with Java types:: 104 | 105 | import execnet 106 | gw = execnet.makegateway("popen//python=jython") 107 | channel = gw.remote_exec(""" 108 | from java.util import Vector 109 | v = Vector() 110 | v.add('aaa') 111 | v.add('bbb') 112 | for val in v: 113 | channel.send(val) 114 | """) 115 | 116 | for item in channel: 117 | print (item) 118 | 119 | will print on the CPython side:: 120 | 121 | aaa 122 | bbb 123 | 124 | .. _`Jython 2.5.1`: http://www.jython.org 125 | 126 | Work with C# objects from CPython 127 | ---------------------------------------- 128 | 129 | (Experimental) use your CPython interpreter to connect to a IronPython_ interpreter 130 | which can work with C# classes. Here is an example for instantiating 131 | a CLR Array instance and sending back its representation:: 132 | 133 | import execnet 134 | gw = execnet.makegateway("popen//python=ipy") 135 | 136 | channel = gw.remote_exec(""" 137 | import clr 138 | clr.AddReference("System") 139 | from System import Array 140 | array = Array[float]([1,2]) 141 | channel.send(str(array)) 142 | """) 143 | print (channel.receive()) 144 | 145 | using Mono 2.0 and IronPython-1.1 this will print on the CPython side:: 146 | 147 | System.Double[](1.0, 2.0) 148 | 149 | .. note:: 150 | Using IronPython needs more testing, likely newer versions 151 | will work better. please feedback if you have information. 152 | 153 | .. _IronPython: http://ironpython.net 154 | -------------------------------------------------------------------------------- /doc/example/popen_read_multiple.py: -------------------------------------------------------------------------------- 1 | """ 2 | example 3 | 4 | reading results from possibly blocking code running in sub processes. 5 | """ 6 | 7 | import execnet 8 | 9 | NUM_PROCESSES = 5 10 | 11 | channels = [] 12 | for i in range(NUM_PROCESSES): 13 | gw = execnet.makegateway() # or use SSH or socket gateways 14 | channel = gw.remote_exec( 15 | """ 16 | import time 17 | secs = channel.receive() 18 | time.sleep(secs) 19 | channel.send("waited %d secs" % secs) 20 | """ 21 | ) 22 | channels.append(channel) 23 | print("*** instantiated subprocess", gw) 24 | 25 | mc = execnet.MultiChannel(channels) 26 | queue = mc.make_receive_queue() 27 | 28 | print("*** verifying that timeout on receiving results from blocked subprocesses works") 29 | try: 30 | queue.get(timeout=1.0) 31 | except Exception: 32 | pass 33 | 34 | print("*** sending subprocesses some data to have them unblock") 35 | mc.send_each(1) 36 | 37 | print("*** receiving results asynchronously") 38 | for i in range(NUM_PROCESSES): 39 | channel, result = queue.get(timeout=2.0) 40 | print("result", channel.gateway, result) 41 | -------------------------------------------------------------------------------- /doc/example/py3topy2.py: -------------------------------------------------------------------------------- 1 | import execnet 2 | 3 | gw = execnet.makegateway("popen//python=python2") 4 | channel = gw.remote_exec( 5 | """ 6 | import numpy 7 | array = numpy.array([1,2,3]) 8 | while 1: 9 | x = channel.receive() 10 | if x is None: 11 | break 12 | array = numpy.append(array, x) 13 | channel.send(repr(array)) 14 | """ 15 | ) 16 | for x in range(10): 17 | channel.send(x) 18 | channel.send(None) 19 | print(channel.receive()) 20 | -------------------------------------------------------------------------------- /doc/example/redirect_remote_output.py: -------------------------------------------------------------------------------- 1 | """ 2 | redirect output from remote to a local function 3 | showcasing features of the channel object: 4 | 5 | - sending a channel over a channel 6 | - adapting a channel to a file object 7 | - setting a callback for receiving channel data 8 | 9 | """ 10 | 11 | import execnet 12 | 13 | gw = execnet.makegateway() 14 | 15 | outchan = gw.remote_exec( 16 | """ 17 | import sys 18 | outchan = channel.gateway.newchannel() 19 | sys.stdout = outchan.makefile("w") 20 | channel.send(outchan) 21 | """ 22 | ).receive() 23 | 24 | 25 | # note: callbacks execute in receiver thread! 26 | def write(data): 27 | print("received:", repr(data)) 28 | 29 | 30 | outchan.setcallback(write) # type: ignore[attr-defined] 31 | 32 | gw.remote_exec( 33 | """ 34 | print('hello world') 35 | print('remote execution ends') 36 | """ 37 | ).waitclose() 38 | -------------------------------------------------------------------------------- /doc/example/remote1.py: -------------------------------------------------------------------------------- 1 | # content of a module remote1.py 2 | 3 | if __name__ == "__channelexec__": 4 | channel.send("initialization complete") # type: ignore[name-defined] 5 | -------------------------------------------------------------------------------- /doc/example/remotecmd.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | # contents of: remotecmd.py 5 | def simple(arg): 6 | return arg + 1 7 | 8 | 9 | def listdir(path): 10 | return os.listdir(path) 11 | 12 | 13 | if __name__ == "__channelexec__": 14 | for item in channel: # type: ignore[name-defined] 15 | channel.send(eval(item)) # type: ignore[name-defined] 16 | -------------------------------------------------------------------------------- /doc/example/servefiles.py: -------------------------------------------------------------------------------- 1 | # content of servefiles.py 2 | 3 | 4 | def servefiles(channel): 5 | for fn in channel: 6 | f = open(fn, "rb") 7 | channel.send(f.read()) 8 | f.close() 9 | 10 | 11 | if __name__ == "__channelexec__": 12 | servefiles(channel) # type: ignore[name-defined] 13 | -------------------------------------------------------------------------------- /doc/example/svn-sync-repo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | 4 | small utility for hot-syncing a svn repository through ssh. 5 | uses execnet. 6 | 7 | """ 8 | 9 | import os 10 | import pathlib 11 | import subprocess 12 | import sys 13 | 14 | import execnet 15 | 16 | 17 | def usage(): 18 | arg0 = sys.argv[0] 19 | print(arg0, "[user@]remote-host:/repo/location localrepo [ssh-config-file]") 20 | 21 | 22 | def main(args): 23 | remote = args[0] 24 | localrepo = pathlib.Path(args[1]) 25 | if not localrepo.is_dir(): 26 | raise SystemExit(f"localrepo {localrepo} does not exist") 27 | if len(args) == 3: 28 | configfile = args[2] 29 | else: 30 | configfile = None 31 | remote_host, path = remote.split(":", 1) 32 | print("ssh-connecting to", remote_host) 33 | gw = getgateway(remote_host, configfile) 34 | 35 | local_rev = get_svn_youngest(localrepo) 36 | 37 | # local protocol 38 | # 1. client sends rev/repo -> server 39 | # 2. server checks for newer revisions and sends dumps 40 | # 3. client receives dumps, updates local repo 41 | # 4. client goes back to step 1 42 | c = gw.remote_exec( 43 | """ 44 | import os 45 | import subprocess 46 | import time 47 | 48 | remote_rev, repopath = channel.receive() 49 | while True: 50 | rev = subprocess.run( 51 | ["svnlook", "youngest", repopath], 52 | check=True, 53 | capture_output=True, 54 | text=True, 55 | ).stdout 56 | rev = int(rev) 57 | if rev > remote_rev: 58 | revrange = (remote_rev+1, rev) 59 | dumpchannel = channel.gateway.newchannel() 60 | channel.send(revrange) 61 | channel.send(dumpchannel) 62 | 63 | f = os.popen( 64 | "svnadmin dump -q --incremental -r %s:%s %s" 65 | % (revrange[0], revrange[1], repopath), 'r') 66 | try: 67 | maxcount = dumpchannel.receive() 68 | count = maxcount 69 | while 1: 70 | s = f.read(8192) 71 | if not s: 72 | raise EOFError 73 | dumpchannel.send(s) 74 | count = count - 1 75 | if count <= 0: 76 | ack = dumpchannel.receive() 77 | count = maxcount 78 | 79 | except EOFError: 80 | dumpchannel.close() 81 | remote_rev = rev 82 | else: 83 | # using svn-hook instead would be nice here 84 | time.sleep(30) 85 | """ 86 | ) 87 | 88 | c.send((local_rev, path)) 89 | print("checking revisions from %d in %s" % (local_rev, remote)) 90 | while 1: 91 | revstart, revend = c.receive() 92 | dumpchannel = c.receive() 93 | print("receiving revisions", revstart, "-", revend, "replaying...") 94 | svn_load(localrepo, dumpchannel) 95 | print("current revision", revend) 96 | 97 | 98 | def svn_load(repo, dumpchannel, maxcount=100): 99 | # every maxcount we will send an ACK to the other 100 | # side in order to synchronise and avoid our side 101 | # growing buffers (execnet does not control 102 | # RAM usage or receive queue sizes) 103 | dumpchannel.send(maxcount) 104 | f = os.popen(f"svnadmin load -q {repo}", "w") 105 | count = maxcount 106 | for x in dumpchannel: 107 | sys.stdout.write(".") 108 | sys.stdout.flush() 109 | f.write(x) 110 | count = count - 1 111 | if count <= 0: 112 | dumpchannel.send(maxcount) 113 | count = maxcount 114 | print() 115 | f.close() 116 | 117 | 118 | def get_svn_youngest(repo): 119 | rev = subprocess.run( 120 | ["svnlook", "youngest", repo], 121 | check=True, 122 | capture_output=True, 123 | text=True, 124 | ).stdout 125 | return int(rev) 126 | 127 | 128 | def getgateway(host, configfile=None): 129 | xspec = "ssh=%s" % host 130 | if configfile is not None: 131 | xspec += "//ssh_config=%s" % configfile 132 | return execnet.makegateway(xspec) 133 | 134 | 135 | if __name__ == "__main__": 136 | if len(sys.argv) < 3: 137 | usage() 138 | raise SystemExit(1) 139 | 140 | main(sys.argv[1:]) 141 | -------------------------------------------------------------------------------- /doc/example/sysinfo.py: -------------------------------------------------------------------------------- 1 | """ 2 | sysinfo.py [host1] [host2] [options] 3 | 4 | obtain system info from remote machine. 5 | 6 | (c) Holger Krekel, MIT license 7 | """ 8 | 9 | import optparse 10 | import re 11 | import sys 12 | 13 | import execnet 14 | 15 | parser = optparse.OptionParser(usage=__doc__) 16 | parser.add_option( 17 | "-f", 18 | "--sshconfig", 19 | action="store", 20 | dest="ssh_config", 21 | default=None, 22 | help="use given ssh config file, and add info all contained hosts for getting info", 23 | ) 24 | parser.add_option( 25 | "-i", 26 | "--ignore", 27 | action="store", 28 | dest="ignores", 29 | default=None, 30 | help="ignore hosts (useful if the list of hostnames come from a file list)", 31 | ) 32 | 33 | 34 | def parsehosts(path): 35 | host_regex = re.compile(r"Host\s*(\S+)") 36 | l = [] 37 | 38 | with open(path) as fp: 39 | for line in fp: 40 | m = host_regex.match(line) 41 | if m is not None: 42 | (sshname,) = m.groups() 43 | l.append(sshname) 44 | return l 45 | 46 | 47 | class RemoteInfo: 48 | def __init__(self, gateway): 49 | self.gw = gateway 50 | self._cache = {} 51 | 52 | def exreceive(self, execstring): 53 | if execstring not in self._cache: 54 | channel = self.gw.remote_exec(execstring) 55 | self._cache[execstring] = channel.receive() 56 | return self._cache[execstring] 57 | 58 | def getmodattr(self, modpath): 59 | module = modpath.split(".")[0] 60 | return self.exreceive( 61 | """ 62 | import %s 63 | channel.send(%s) 64 | """ 65 | % (module, modpath) 66 | ) 67 | 68 | def islinux(self): 69 | return self.getmodattr("sys.platform").find("linux") != -1 70 | 71 | def getfqdn(self): 72 | return self.exreceive( 73 | """ 74 | import socket 75 | channel.send(socket.getfqdn()) 76 | """ 77 | ) 78 | 79 | def getmemswap(self): 80 | if self.islinux(): 81 | return self.exreceive( 82 | r""" 83 | import commands, re 84 | out = commands.getoutput("free") 85 | mem = re.search(r"Mem:\s+(\S*)", out).group(1) 86 | swap = re.search(r"Swap:\s+(\S*)", out).group(1) 87 | channel.send((mem, swap)) 88 | """ 89 | ) 90 | 91 | def getcpuinfo(self): 92 | if self.islinux(): 93 | return self.exreceive( 94 | """ 95 | # a hyperthreaded cpu core only counts as 1, although it 96 | # is present as 2 in /proc/cpuinfo. Counting it as 2 is 97 | # misleading because it is *by far* not as efficient as 98 | # two independent cores. 99 | cpus = {} 100 | cpuinfo = {} 101 | f = open("/proc/cpuinfo") 102 | lines = f.readlines() 103 | f.close() 104 | for line in lines + ['']: 105 | if line.strip(): 106 | key, value = line.split(":", 1) 107 | cpuinfo[key.strip()] = value.strip() 108 | else: 109 | corekey = (cpuinfo.get("physical id"), 110 | cpuinfo.get("core id")) 111 | cpus[corekey] = 1 112 | numcpus = len(cpus) 113 | model = cpuinfo.get("model name") 114 | channel.send((numcpus, model)) 115 | """ 116 | ) 117 | 118 | 119 | def debug(*args): 120 | print(" ".join(map(str, args)), file=sys.stderr) 121 | 122 | 123 | def error(*args): 124 | debug("ERROR", args[0] + ":", *args[1:]) 125 | 126 | 127 | def getinfo(sshname, ssh_config=None, loginfo=sys.stdout): 128 | if ssh_config: 129 | spec = f"ssh=-F {ssh_config} {sshname}" 130 | else: 131 | spec = "ssh=%s" % sshname 132 | debug("connecting to", repr(spec)) 133 | try: 134 | gw = execnet.makegateway(spec) 135 | except OSError: 136 | error("could not get sshgatway", sshname) 137 | else: 138 | ri = RemoteInfo(gw) 139 | # print "%s info:" % sshname 140 | prefix = sshname.upper() + " " 141 | print(prefix, "fqdn:", ri.getfqdn(), file=loginfo) 142 | for attr in ("sys.platform", "sys.version_info"): 143 | loginfo.write(f"{prefix} {attr}: ") 144 | loginfo.flush() 145 | value = ri.getmodattr(attr) 146 | loginfo.write(str(value)) 147 | loginfo.write("\n") 148 | loginfo.flush() 149 | memswap = ri.getmemswap() 150 | if memswap: 151 | mem, swap = memswap 152 | print(prefix, "Memory:", mem, "Swap:", swap, file=loginfo) 153 | cpuinfo = ri.getcpuinfo() 154 | if cpuinfo: 155 | numcpu, model = cpuinfo 156 | print(prefix, "number of cpus:", numcpu, file=loginfo) 157 | print(prefix, "cpu model", model, file=loginfo) 158 | return ri 159 | 160 | 161 | if __name__ == "__main__": 162 | options, args = parser.parse_args() 163 | hosts = list(args) 164 | ssh_config = options.ssh_config 165 | if ssh_config: 166 | hosts.extend(parsehosts(ssh_config)) 167 | ignores = options.ignores or () 168 | if ignores: 169 | ignores = ignores.split(",") 170 | for host in hosts: 171 | if host not in ignores: 172 | getinfo(host, ssh_config=ssh_config) 173 | -------------------------------------------------------------------------------- /doc/example/taskserver.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import execnet 4 | 5 | group = execnet.Group() 6 | for i in range(4): # 4 CPUs 7 | group.makegateway() 8 | 9 | 10 | def process_item(channel): 11 | # task processor, sits on each CPU 12 | import time 13 | import random 14 | 15 | channel.send("ready") 16 | for x in channel: 17 | if x is None: # we can shutdown 18 | break 19 | # sleep random time, send result 20 | time.sleep(random.randrange(3)) 21 | channel.send(x * 10) 22 | 23 | 24 | # execute taskprocessor everywhere 25 | mch = group.remote_exec(process_item) 26 | 27 | # get a queue that gives us results 28 | q = mch.make_receive_queue(endmarker=-1) 29 | tasks: list[int] | None = list(range(10)) # a list of tasks, here just integers 30 | terminated = 0 31 | while 1: 32 | channel, item = q.get() 33 | if item == -1: 34 | terminated += 1 35 | print("terminated %s" % channel.gateway.id) 36 | if terminated == len(mch): 37 | print("got all results, terminating") 38 | break 39 | continue 40 | if item != "ready": 41 | print(f"other side {channel.gateway.id} returned {item!r}") 42 | if not tasks and tasks is not None: 43 | print("no tasks remain, sending termination request to all") 44 | mch.send_each(None) 45 | tasks = None 46 | if tasks: 47 | task = tasks.pop() 48 | channel.send(task) 49 | print(f"sent task {task!r} to {channel.gateway.id}") 50 | 51 | group.terminate() 52 | -------------------------------------------------------------------------------- /doc/example/test_debug.rst: -------------------------------------------------------------------------------- 1 | 2 | Debugging execnet / wire messages 3 | =============================================================== 4 | 5 | By setting the environment variable ``EXECNET_DEBUG`` you can 6 | configure the execnet tracing mechanism: 7 | 8 | :EXECNET_DEBUG=1: write per-process trace-files to ``${TEMPROOT}/execnet-debug-PID`` 9 | :EXECNET_DEBUG=2: perform tracing to stderr (popen-gateway workers will send this to their instantiator) 10 | 11 | Here is a simple example to see what goes on with a simple execution:: 12 | 13 | EXECNET_DEBUG=2 # or "set EXECNET_DEBUG=2" on windows 14 | 15 | python -c 'import execnet ; execnet.makegateway().remote_exec("42")' 16 | 17 | which will show PID-prefixed trace entries:: 18 | 19 | [2326] gw0 starting to receive 20 | [2326] gw0 sent 21 | [2327] creating workergateway on 22 | [2327] gw0-worker starting to receive 23 | [2327] gw0-worker received 24 | [2327] gw0-worker execution starts[1]: '42' 25 | [2327] gw0-worker execution finished 26 | [2327] gw0-worker sent 27 | [2327] gw0-worker 1 sent channel close message 28 | [2326] gw0 received 29 | [2326] gw0 1 channel.__del__ 30 | [2326] === atexit cleanup === 31 | [2326] gw0 gateway.exit() called 32 | [2326] gw0 --> sending GATEWAY_TERMINATE 33 | [2326] gw0 sent 34 | [2326] gw0 joining receiver thread 35 | [2327] gw0-worker received 36 | [2327] gw0-worker putting None to execqueue 37 | [2327] gw0-worker io.close_read() 38 | [2327] gw0-worker leaving 39 | [2327] gw0-worker 1 channel.__del__ 40 | [2327] gw0-worker io.close_write() 41 | [2327] gw0-worker workergateway.serve finished 42 | [2327] gw0-worker gateway.join() called while receiverthread already finished 43 | [2326] gw0 leaving 44 | -------------------------------------------------------------------------------- /doc/example/test_funcmultiplier.py: -------------------------------------------------------------------------------- 1 | def test_function(): 2 | import funcmultiplier 3 | -------------------------------------------------------------------------------- /doc/example/test_group.rst: -------------------------------------------------------------------------------- 1 | Managing multiple gateways and clusters 2 | ================================================== 3 | 4 | Usings Groups for managing multiple gateways 5 | ------------------------------------------------------ 6 | 7 | Use ``execnet.Group`` to manage membership and lifetime of 8 | multiple gateways:: 9 | 10 | >>> import execnet 11 | >>> group = execnet.Group(['popen'] * 2) 12 | >>> len(group) 13 | 2 14 | >>> group 15 | 16 | >>> list(group) 17 | [, ] 18 | >>> 'gw0' in group and 'gw1' in group 19 | True 20 | >>> group['gw0'] == group[0] 21 | True 22 | >>> group['gw1'] == group[1] 23 | True 24 | >>> group.terminate() # exit all member gateways 25 | >>> group 26 | 27 | 28 | Assigning gateway IDs 29 | ------------------------------------------------------ 30 | 31 | All gateways are created as part of a group and receive 32 | a per-group unique ``id`` after successful initialization. 33 | Pass an ``id=MYNAME`` part to ``group.makegateway``. Example:: 34 | 35 | >>> import execnet 36 | >>> group = execnet.Group() 37 | >>> gw = group.makegateway("popen//id=sub1") 38 | >>> assert gw.id == "sub1" 39 | >>> group['sub1'] 40 | 41 | 42 | Getting (auto) IDs before instantiation 43 | ------------------------------------------------------ 44 | 45 | Sometimes it's useful to know the gateway ID ahead 46 | of instantiating it:: 47 | 48 | >>> import execnet 49 | >>> group = execnet.Group() 50 | >>> spec = execnet.XSpec("popen") 51 | >>> group.allocate_id(spec) 52 | >>> allocated_id = spec.id 53 | >>> gw = group.makegateway(spec) 54 | >>> assert gw.id == allocated_id 55 | 56 | execnet.makegateway uses execnet.default_group 57 | ------------------------------------------------------ 58 | 59 | Each time you create a gateway with ``execnet.makegateway()`` 60 | you actually use the ``execnet.default_group``:: 61 | 62 | >>> import execnet 63 | >>> gw = execnet.makegateway() 64 | >>> gw in execnet.default_group 65 | True 66 | >>> execnet.default_group.defaultspec # used for empty makegateway() calls 67 | 'popen' 68 | 69 | Robust termination of SSH/popen processes 70 | ----------------------------------------------- 71 | 72 | Use ``group.terminate(timeout)`` if you want to terminate 73 | member gateways and ensure that no local subprocesses remain. 74 | You can specify a ``timeout`` after which an attempt at killing 75 | the related process is made:: 76 | 77 | >>> import execnet 78 | >>> group = execnet.Group() 79 | >>> gw = group.makegateway("popen//id=sleeper") 80 | >>> ch = gw.remote_exec("import time ; time.sleep(2.0)") 81 | >>> group 82 | 83 | >>> group.terminate(timeout=1.0) 84 | >>> group 85 | 86 | 87 | execnet aims to provide totally robust termination so if 88 | you have left-over processes or other termination issues 89 | please :doc:`report them <../support>`. Thanks! 90 | 91 | 92 | Using Groups to manage a certain type of gateway 93 | ------------------------------------------------------ 94 | 95 | Set ``group.defaultspec`` to determine the default gateway 96 | specification used by ``group.makegateway()``: 97 | 98 | >>> import execnet 99 | >>> group = execnet.Group() 100 | >>> group.defaultspec = "ssh=localhost//chdir=mytmp//nice=20" 101 | >>> gw = group.makegateway() 102 | >>> ch = gw.remote_exec(""" 103 | ... import os.path 104 | ... basename = os.path.basename(os.getcwd()) 105 | ... channel.send(basename) 106 | ... """) 107 | >>> ch.receive() 108 | 'mytmp' 109 | 110 | This way a Group object becomes kind of a Gateway factory where 111 | the factory-caller does not need to know the setup. 112 | -------------------------------------------------------------------------------- /doc/example/test_info.rst: -------------------------------------------------------------------------------- 1 | Basic local and remote communication 2 | ==================================== 3 | 4 | Execute source code in subprocess, communicate through a channel 5 | ------------------------------------------------------------------- 6 | 7 | You can instantiate a subprocess gateway, execute code 8 | in it and bidirectionally send messages:: 9 | 10 | >>> import execnet 11 | >>> gw = execnet.makegateway() 12 | >>> channel = gw.remote_exec("channel.send(channel.receive()+1)") 13 | >>> channel.send(1) 14 | >>> channel.receive() 15 | 2 16 | 17 | The initiating and the remote execution happen concurrently. 18 | ``channel.receive()`` operations return when input is available. 19 | ``channel.send(data)`` operations return when the message could 20 | be delivered to the IO system. 21 | 22 | The initiating and the "other" process work use a `share-nothing 23 | model`_ and ``channel.send|receive`` are means to pass basic data 24 | messages between two processes. 25 | 26 | .. _`share-nothing model`: http://en.wikipedia.org/wiki/Shared_nothing_architecture 27 | 28 | Remote-exec a function (avoiding inlined source part I) 29 | ------------------------------------------------------- 30 | 31 | You can send and remote execute parametrized pure functions like this: 32 | 33 | .. include:: funcmultiplier.py 34 | :literal: 35 | 36 | The ``multiplier`` function executes remotely and establishes 37 | a loop multipliying incoming data with a constant factor passed 38 | in via keyword arguments to ``remote_exec``. 39 | 40 | Notes: 41 | 42 | * unfortunately, you can not type this example interactively because 43 | ``inspect.getsource(func)`` fails for interactively defined 44 | functions. 45 | 46 | * You will get an explicit error if you try to execute non-pure 47 | functions, i.e. functions that access any global state (which 48 | will not be available remotely as we have a share-nothing model 49 | between the nodes). 50 | 51 | 52 | Remote-exec a module (avoiding inlined source part II) 53 | ------------------------------------------------------ 54 | 55 | You can pass a module object to ``remote_exec`` in which case 56 | its source code will be sent. No dependencies will be transferred 57 | so the module must be self-contained or only use modules that are 58 | installed on the "other" side. Module code can detect if it is 59 | running in a remote_exec situation by checking for the special 60 | ``__name__`` attribute. 61 | 62 | .. include:: remote1.py 63 | :literal: 64 | 65 | You can now remote-execute the module like this:: 66 | 67 | >>> import execnet, remote1 68 | >>> gw = execnet.makegateway() 69 | >>> ch = gw.remote_exec(remote1) 70 | >>> print (ch.receive()) 71 | initialization complete 72 | 73 | which will print the 'initialization complete' string. 74 | 75 | 76 | Compare current working directories 77 | ---------------------------------------- 78 | 79 | A local subprocess gateway has the same working directory as the instantiatior:: 80 | 81 | >>> import execnet, os 82 | >>> gw = execnet.makegateway() 83 | >>> ch = gw.remote_exec("import os; channel.send(os.getcwd())") 84 | >>> res = ch.receive() 85 | >>> assert res == os.getcwd() 86 | 87 | "ssh" gateways default to the login home directory. 88 | 89 | Get information from remote SSH account 90 | --------------------------------------- 91 | 92 | Use simple execution to obtain information from remote environments:: 93 | 94 | >>> import execnet, os 95 | >>> gw = execnet.makegateway("ssh=codespeak.net") 96 | >>> channel = gw.remote_exec(""" 97 | ... import sys, os 98 | ... channel.send((sys.platform, tuple(sys.version_info), os.getpid())) 99 | ... """) 100 | >>> platform, version_info, remote_pid = channel.receive() 101 | >>> platform 102 | 'linux2' 103 | >>> version_info 104 | (2, 6, 6, 'final', 0) 105 | 106 | Use a callback instead of receive() and wait for completion 107 | ------------------------------------------------------------- 108 | 109 | Set a channel callback to immediately react on incoming data:: 110 | 111 | >>> import execnet 112 | >>> gw = execnet.makegateway() 113 | >>> channel = gw.remote_exec("for i in range(10): channel.send(i)") 114 | >>> l = [] 115 | >>> channel.setcallback(l.append, endmarker=None) 116 | >>> channel.waitclose() # waits for closing, i.e. remote exec finish 117 | >>> l 118 | [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, None] 119 | 120 | Note that the callback function will execute in the receiver thread 121 | so it should not block on IO or long to execute. 122 | 123 | Sending channels over channels 124 | ------------------------------------------------------ 125 | 126 | You can create and transfer a channel over an existing channel 127 | and use it to transfer information:: 128 | 129 | >>> import execnet 130 | >>> gw = execnet.makegateway() 131 | >>> channel = gw.remote_exec(""" 132 | ... ch1, ch2 = channel.receive() 133 | ... ch2.send("world") 134 | ... ch1.send("hello") 135 | ... """) 136 | >>> c1 = gw.newchannel() # create new channel 137 | >>> c2 = gw.newchannel() # create another channel 138 | >>> channel.send((c1, c2)) # send them over 139 | >>> c1.receive() 140 | 'hello' 141 | >>> c2.receive() 142 | 'world' 143 | 144 | 145 | 146 | A simple command loop pattern 147 | -------------------------------------------------------------- 148 | 149 | If you want the remote side to serve a number 150 | of synchronous function calls into your module 151 | you can setup a serving loop and write a local protocol. 152 | 153 | .. include:: remotecmd.py 154 | :literal: 155 | 156 | Then on the local side you can do:: 157 | 158 | >>> import execnet, remotecmd 159 | >>> gw = execnet.makegateway() 160 | >>> ch = gw.remote_exec(remotecmd) 161 | >>> ch.send('simple(10)') # execute func-call remotely 162 | >>> ch.receive() 163 | 11 164 | 165 | Our remotecmd module starts up remote serving 166 | through the ``for item in channel`` loop which 167 | will terminate when the channel closes. It evaluates 168 | all incoming requests in the global name space and 169 | sends back the results. 170 | 171 | 172 | Instantiate gateways through sockets 173 | ----------------------------------------------------- 174 | 175 | .. _`socketserver.py`: https://raw.githubusercontent.com/pytest-dev/execnet/master/execnet/script/socketserver.py 176 | 177 | In cases where you do not have SSH-access to a machine 178 | you need to download a small version-independent standalone 179 | `socketserver.py`_ script to provide a remote bootstrapping-point. 180 | You do not need to install the execnet package remotely. 181 | Simply run the script like this:: 182 | 183 | python socketserver.py :8888 # bind to all IPs, port 8888 184 | 185 | You can then instruct execnet on your local machine to bootstrap 186 | itself into the remote socket endpoint:: 187 | 188 | import execnet 189 | gw = execnet.makegateway("socket=TARGET-IP:8888") 190 | 191 | That's it, you can now use the gateway object just like 192 | a popen- or SSH-based one. 193 | 194 | .. include:: test_ssh_fileserver.rst 195 | -------------------------------------------------------------------------------- /doc/example/test_multi.rst: -------------------------------------------------------------------------------- 1 | Advanced (multi) channel communication 2 | ===================================================== 3 | 4 | MultiChannel: container for multiple channels 5 | ------------------------------------------------------ 6 | 7 | Use ``execnet.MultiChannel`` to work with multiple channels:: 8 | 9 | >>> import execnet 10 | >>> ch1 = execnet.makegateway().remote_exec("channel.send(1)") 11 | >>> ch2 = execnet.makegateway().remote_exec("channel.send(2)") 12 | >>> mch = execnet.MultiChannel([ch1, ch2]) 13 | >>> len(mch) 14 | 2 15 | >>> mch[0] is ch1 and mch[1] is ch2 16 | True 17 | >>> ch1 in mch and ch2 in mch 18 | True 19 | >>> sum(mch.receive_each()) 20 | 3 21 | 22 | Receive results from sub processes with a Queue 23 | ----------------------------------------------------- 24 | 25 | Use ``MultiChannel.make_receive_queue()`` to get a queue 26 | from which to obtain results:: 27 | 28 | >>> ch1 = execnet.makegateway().remote_exec("channel.send(1)") 29 | >>> ch2 = execnet.makegateway().remote_exec("channel.send(2)") 30 | >>> mch = execnet.MultiChannel([ch1, ch2]) 31 | >>> queue = mch.make_receive_queue() 32 | >>> chan1, res1 = queue.get() 33 | >>> chan2, res2 = queue.get(timeout=3) 34 | >>> res1 + res2 35 | 3 36 | 37 | Working asynchronously/event-based with channels 38 | --------------------------------------------------- 39 | 40 | Use channel callbacks if you want to process incoming 41 | data immediately and without blocking execution:: 42 | 43 | >>> import execnet 44 | >>> gw = execnet.makegateway() 45 | >>> ch = gw.remote_exec("channel.receive() ; channel.send(42)") 46 | >>> l = [] 47 | >>> ch.setcallback(l.append) 48 | >>> ch.send(1) 49 | >>> ch.waitclose() 50 | >>> assert l == [42] 51 | 52 | Note that the callback function will be executed in the 53 | receiver thread and should not block or run for too long. 54 | 55 | Robustly receive results and termination notification 56 | ----------------------------------------------------- 57 | 58 | Use ``MultiChannel.make_receive_queue(endmarker)`` to specify 59 | an object to be put to the queue when the remote side of a channel 60 | is closed. The endmarker will also be put to the Queue if the gateway 61 | is blocked in execution and is terminated/killed:: 62 | 63 | >>> group = execnet.Group(['popen'] * 3) # create three gateways 64 | >>> mch = group.remote_exec("channel.send(channel.receive()+1)") 65 | >>> queue = mch.make_receive_queue(endmarker=42) 66 | >>> mch[0].send(1) 67 | >>> chan1, res1 = queue.get() 68 | >>> res1 69 | 2 70 | >>> group.terminate(timeout=1) # kill processes waiting on receive 71 | >>> for i in range(3): 72 | ... chan1, res1 = queue.get() 73 | ... assert res1 == 42 74 | >>> group 75 | 76 | 77 | 78 | 79 | Saturate multiple Hosts and CPUs with tasks to process 80 | -------------------------------------------------------- 81 | 82 | If you have multiple CPUs or hosts you can create as many 83 | gateways and then have a process sit on each CPU and wait 84 | for a task to proceed. One complication is that we 85 | want to ensure clean termination of all processes 86 | and loose no result. Here is an example that just uses 87 | local subprocesses and does the task: 88 | 89 | .. include:: taskserver.py 90 | :literal: 91 | -------------------------------------------------------------------------------- /doc/example/test_proxy.rst: -------------------------------------------------------------------------------- 1 | Managing proxied gateways 2 | ========================== 3 | 4 | Simple proxying 5 | ---------------- 6 | 7 | Using the ``via`` arg of specs we can create a gateway 8 | whose io is created on a remote gateway and proxied to the master. 9 | 10 | The simplest use case, is where one creates one master process 11 | and uses it to control new workers and their environment 12 | 13 | :: 14 | 15 | >>> import execnet 16 | >>> group = execnet.Group() 17 | >>> group.defaultspec = 'popen//via=master' 18 | >>> master = group.makegateway('popen//id=master') 19 | >>> master 20 | 21 | >>> worker = group.makegateway() 22 | >>> worker 23 | 24 | >>> group 25 | 26 | -------------------------------------------------------------------------------- /doc/example/test_ssh_fileserver.rst: -------------------------------------------------------------------------------- 1 | Receive file contents from remote SSH account 2 | ----------------------------------------------------- 3 | 4 | Here is some small server code that you can use to retrieve 5 | contents of remote files: 6 | 7 | .. include:: servefiles.py 8 | :literal: 9 | 10 | And here is some code to use it to retrieve remote contents:: 11 | 12 | import execnet 13 | import servefiles 14 | gw = execnet.makegateway("ssh=codespeak.net") 15 | channel = gw.remote_exec(servefiles) 16 | 17 | for fn in ('/etc/passwd', '/etc/group'): 18 | channel.send(fn) 19 | content = channel.receive() 20 | print(fn) 21 | print(content) 22 | -------------------------------------------------------------------------------- /doc/examples.rst: -------------------------------------------------------------------------------- 1 | ============================================================================== 2 | examples 3 | ============================================================================== 4 | 5 | .. _`execnet-dev`: http://mail.python.org/mailman/listinfo/execnet-dev 6 | .. _`execnet-commit`: http://mail.python.org/mailman/listinfo/execnet-commit 7 | 8 | Note: all examples with `>>>` prompts are automatically tested. 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | 13 | example/test_info 14 | example/test_group 15 | example/test_proxy 16 | example/test_multi 17 | example/hybridpython 18 | example/test_debug 19 | 20 | .. toctree:: 21 | :hidden: 22 | 23 | example/test_ssh_fileserver 24 | -------------------------------------------------------------------------------- /doc/implnotes.rst: -------------------------------------------------------------------------------- 1 | gateway_base.py 2 | ---------------------- 3 | 4 | The code of this module is sent to the "other side" 5 | as a means of bootstrapping a Gateway object 6 | capable of receiving and executing code, 7 | and routing data through channels. 8 | 9 | Gateways operate on InputOutput objects offering 10 | a write and a read(n) method. 11 | 12 | Once bootstrapped a higher level protocol 13 | based on Messages is used. Messages are serialized 14 | to and from InputOutput objects. The details of this protocol 15 | are locally defined in this module. There is no need 16 | for standardizing or versioning the protocol. 17 | 18 | After bootstrapping the BaseGateway opens a receiver thread which 19 | accepts encoded messages and triggers actions to interpret them. 20 | Sending of channel data items happens directly through 21 | write operations to InputOutput objects so there is no 22 | separate thread. 23 | 24 | Code execution messages are put into an execqueue from 25 | which they will be taken for execution. gateway.serve() 26 | will take and execute such items, one by one. This means 27 | that by incoming default execution is single-threaded. 28 | 29 | The receiver thread terminates if the remote side sends 30 | a gateway termination message or if the IO-connection drops. 31 | It puts an end symbol into the execqueue so 32 | that serve() can cleanly finish as well. 33 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. image:: _static/pythonring.png 2 | :align: right 3 | 4 | 5 | .. warning:: 6 | 7 | execnet currently is in maintenance-only mode, mostly because it is still the backend 8 | of the pytest-xdist plugin. Do not use in new projects. 9 | 10 | Python_ is a mature dynamic language whose interpreters can interact with 11 | all major computing platforms today. 12 | 13 | **execnet** provides a `share-nothing model`_ with `channel-send/receive`_ 14 | communication for distributing execution across many Python interpreters 15 | across version, platform and network barriers. It has 16 | a minimal and fast API targeting the following uses: 17 | 18 | * Distribute tasks to (many) local or remote CPUs 19 | * Write and deploy hybrid multi-process applications 20 | * Write scripts to administer multiple environments 21 | 22 | .. _`channel-send/receive`: http://en.wikipedia.org/wiki/Channel_(programming) 23 | .. _`share-nothing model`: http://en.wikipedia.org/wiki/Shared_nothing_architecture 24 | 25 | 26 | .. _Python: http://www.python.org 27 | 28 | Features 29 | ------------------ 30 | 31 | * Automatic bootstrapping: no manual remote installation. 32 | 33 | * Safe and simple serialization of Python builtin 34 | types for sending/receiving structured data messages. 35 | (New in 1.1) execnet offers a new :ref:`dumps/loads ` 36 | API which allows cross-interpreter compatible serialization 37 | of Python builtin types. 38 | 39 | * Flexible communication: synchronous send/receive as well as 40 | callback/queue mechanisms supported 41 | 42 | * Easy creation, handling and termination of multiple processes 43 | 44 | * Well tested interactions between CPython 2.5-2.7, CPython-3.3, Jython 2.5.1 45 | and PyPy interpreters. 46 | 47 | * Fully interoperable between Windows and Unix-ish systems. 48 | 49 | * Many tested :doc:`examples` 50 | 51 | Known uses 52 | ------------------- 53 | 54 | * `pytest`_ uses it for its `distributed testing`_ mechanism. 55 | 56 | * `quora`_ uses it for `connecting CPython and PyPy`_. 57 | 58 | * Jacob Perkins uses it for his `Distributed NTLK with execnet`_ 59 | project to launch computation processes through ssh. He also 60 | compares `disco and execnet`_ in a subsequent post. 61 | 62 | * Ronny Pfannschmidt uses it for his `anyvc`_ VCS-abstraction project 63 | to bridge the Python2/Python3 version gap. 64 | 65 | * Sysadmins and developers are using it for ad-hoc custom scripting 66 | 67 | .. _`quora`: http://quora.com 68 | .. _`connecting CPython and PyPy`: http://www.quora.com/Quora-Infrastructure/Did-Quoras-switch-to-PyPy-result-in-increased-memory-consumption 69 | 70 | .. _`pytest`: https://docs.pytest.org 71 | .. _`distributed testing`: https://pypi.python.org/pypi/pytest-xdist 72 | .. _`Distributed NTLK with execnet`: http://streamhacker.com/2009/11/29/distributed-nltk-execnet/ 73 | .. _`disco and execnet`: http://streamhacker.com/2009/12/14/execnet-disco-distributed-nltk/ 74 | .. _`anyvc`: http://bitbucket.org/RonnyPfannschmidt/anyvc/ 75 | 76 | Project status 77 | -------------------------- 78 | 79 | The project is currently in **maintenance-only mode**, with PRs fixing bugs being gracefully accepted. 80 | 81 | Currently there are no plans to improve the project further, being maintained mostly because it is 82 | used as backend of the popular `pytest-xdist `__ plugin. 83 | 84 | ``execnet`` was conceived originally by `Holger Krekel`_ and is licensed under the MIT license 85 | since version 1.2. 86 | 87 | .. _`basic API`: basics.html 88 | .. _`Holger Krekel`: http://twitter.com/hpk42 89 | 90 | .. toctree:: 91 | :hidden: 92 | 93 | support 94 | implnotes 95 | install 96 | -------------------------------------------------------------------------------- /doc/install.rst: -------------------------------------------------------------------------------- 1 | Info in a nutshell 2 | ==================== 3 | 4 | **Pythons**: 3.8+, PyPy 3 5 | 6 | **Operating systems**: Linux, Windows, OSX, Unix 7 | 8 | **Distribution names**: 9 | 10 | * PyPI name: ``execnet`` 11 | * Redhat Fedora: ``python-execnet`` 12 | * Debian: ``python-execnet`` 13 | * Gentoo: ``dev-python/execnet`` 14 | 15 | **git repository**: https://github.com/pytest-dev/execnet 16 | 17 | Installation 18 | ==================== 19 | 20 | Install via pip_:: 21 | 22 | pip install execnet 23 | 24 | Next checkout the basic api and examples: 25 | 26 | .. toctree:: 27 | :maxdepth: 1 28 | 29 | examples 30 | basics 31 | changelog 32 | 33 | .. _pip: http://pypi.python.org/pypi/pip 34 | .. _`github repository`: https://github.com/pytest-dev/execnet 35 | .. _`execnet git repository`: https://github.com/pytest-dev/execnet 36 | .. _`pypi release`: http://pypi.python.org/pypi/execnet 37 | .. _distribute: http://pypi.python.org/pypi/distribute 38 | -------------------------------------------------------------------------------- /doc/support.rst: -------------------------------------------------------------------------------- 1 | Contact and Support channels 2 | ------------------------------ 3 | 4 | If you have interest, questions, issues or suggestions you 5 | are welcome to: 6 | 7 | * Join `execnet-dev`_ for general discussions 8 | * Join `execnet-commit`_ to be notified of changes 9 | * Clone the `github repository`_ and submit patches 10 | * Hang out on the #pytest channel on `irc.libera.chat `_ 11 | (using an IRC client, via `webchat `_, 12 | or `via Matrix `_). 13 | 14 | .. _`execnet-dev`: http://mail.python.org/mailman/listinfo/execnet-dev 15 | .. _`execnet-commit`: http://mail.python.org/mailman/listinfo/execnet-commit 16 | .. _`github repository`: https://github.com/pytest-dev/execnet 17 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "hatchling", 4 | "hatch-vcs", 5 | ] 6 | build-backend = "hatchling.build" 7 | 8 | [project] 9 | name = "execnet" 10 | dynamic = ["version"] 11 | description = "execnet: rapid multi-Python deployment" 12 | readme = {"file" = "README.rst", "content-type" = "text/x-rst"} 13 | license = "MIT" 14 | requires-python = ">=3.8" 15 | authors = [ 16 | { name = "holger krekel and others" }, 17 | ] 18 | classifiers = [ 19 | "Development Status :: 5 - Production/Stable", 20 | "Intended Audience :: Developers", 21 | "License :: OSI Approved :: MIT License", 22 | "Operating System :: MacOS :: MacOS X", 23 | "Operating System :: Microsoft :: Windows", 24 | "Operating System :: POSIX", 25 | "Programming Language :: Python :: 3.8", 26 | "Programming Language :: Python :: 3.9", 27 | "Programming Language :: Python :: 3.10", 28 | "Programming Language :: Python :: 3.11", 29 | "Programming Language :: Python :: 3.12", 30 | "Programming Language :: Python :: Implementation :: CPython", 31 | "Programming Language :: Python :: Implementation :: PyPy", 32 | "Topic :: Software Development :: Libraries", 33 | "Topic :: System :: Distributed Computing", 34 | "Topic :: System :: Networking", 35 | ] 36 | 37 | [project.optional-dependencies] 38 | testing = [ 39 | "pre-commit", 40 | "pytest", 41 | "tox", 42 | "hatch", 43 | ] 44 | 45 | [project.urls] 46 | Homepage = "https://execnet.readthedocs.io/en/latest/" 47 | 48 | [tool.ruff.lint] 49 | extend-select = [ 50 | "B", # bugbear 51 | "E", # pycodestyle 52 | "F", # pyflakes 53 | "I", # isort 54 | "PYI", # flake8-pyi 55 | "UP", # pyupgrade 56 | "RUF", # ruff 57 | "W", # pycodestyle 58 | "PIE", # flake8-pie 59 | "PGH", # pygrep-hooks 60 | "PLE", # pylint error 61 | "PLW", # pylint warning 62 | ] 63 | ignore = [ 64 | # bugbear ignore 65 | "B007", # Loop control variable `i` not used within loop body 66 | "B011", # Do not `assert False` (`python -O` removes these calls) 67 | # pycodestyle ignore 68 | "E501", # Line too long 69 | "E741", # Ambiguous variable name 70 | # ruff ignore 71 | "RUF012", # Mutable class attributes should be annotated with `typing.ClassVar` 72 | # pylint ignore 73 | "PLW0603", # Using the global statement 74 | "PLW0120", # remove the else and dedent its contents 75 | "PLW2901", # for loop variable overwritten by assignment target 76 | "PLR5501", # Use `elif` instead of `else` then `if` 77 | "UP031", # Use format specifiers instead of percent format 78 | ] 79 | 80 | [tool.ruff.lint.isort] 81 | force-single-line = true 82 | known-third-party = ["src"] 83 | 84 | [tool.hatch.version] 85 | source = "vcs" 86 | 87 | [tool.hatch.build.hooks.vcs] 88 | version-file = "src/execnet/_version.py" 89 | 90 | [tool.hatch.build.targets.sdist] 91 | include = [ 92 | "/doc", 93 | "/src", 94 | "/testing", 95 | "tox.ini", 96 | ] 97 | 98 | [tool.mypy] 99 | python_version = "3.8" 100 | mypy_path = ["src"] 101 | files = ["src", "testing"] 102 | strict = true 103 | warn_unreachable = true 104 | warn_unused_ignores = false 105 | disallow_untyped_calls = false 106 | disallow_untyped_defs = false 107 | disallow_incomplete_defs = false 108 | 109 | [[tool.mypy.overrides]] 110 | module = [ 111 | "eventlet.*", 112 | "gevent.thread.*", 113 | ] 114 | ignore_missing_imports = true 115 | -------------------------------------------------------------------------------- /src/execnet/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | execnet 3 | ------- 4 | 5 | pure python lib for connecting to local and remote Python Interpreters. 6 | 7 | (c) 2012, Holger Krekel and others 8 | """ 9 | 10 | from ._version import version as __version__ 11 | from .gateway import Gateway 12 | from .gateway_base import Channel 13 | from .gateway_base import DataFormatError 14 | from .gateway_base import DumpError 15 | from .gateway_base import LoadError 16 | from .gateway_base import RemoteError 17 | from .gateway_base import TimeoutError 18 | from .gateway_base import dump 19 | from .gateway_base import dumps 20 | from .gateway_base import load 21 | from .gateway_base import loads 22 | from .gateway_bootstrap import HostNotFound 23 | from .multi import Group 24 | from .multi import MultiChannel 25 | from .multi import default_group 26 | from .multi import makegateway 27 | from .multi import set_execmodel 28 | from .rsync import RSync 29 | from .xspec import XSpec 30 | 31 | __all__ = [ 32 | "Channel", 33 | "DataFormatError", 34 | "DumpError", 35 | "Gateway", 36 | "Group", 37 | "HostNotFound", 38 | "LoadError", 39 | "MultiChannel", 40 | "RSync", 41 | "RemoteError", 42 | "TimeoutError", 43 | "XSpec", 44 | "__version__", 45 | "default_group", 46 | "dump", 47 | "dumps", 48 | "load", 49 | "loads", 50 | "makegateway", 51 | "set_execmodel", 52 | ] 53 | -------------------------------------------------------------------------------- /src/execnet/gateway.py: -------------------------------------------------------------------------------- 1 | """Gateway code for initiating popen, socket and ssh connections. 2 | 3 | (c) 2004-2013, Holger Krekel and others 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import inspect 9 | import linecache 10 | import textwrap 11 | import types 12 | from typing import TYPE_CHECKING 13 | from typing import Any 14 | from typing import Callable 15 | 16 | from . import gateway_base 17 | from .gateway_base import IO 18 | from .gateway_base import Channel 19 | from .gateway_base import Message 20 | from .multi import Group 21 | from .xspec import XSpec 22 | 23 | 24 | class Gateway(gateway_base.BaseGateway): 25 | """Gateway to a local or remote Python Interpreter.""" 26 | 27 | _group: Group 28 | 29 | def __init__(self, io: IO, spec: XSpec) -> None: 30 | """:private:""" 31 | super().__init__(io=io, id=spec.id, _startcount=1) 32 | self.spec = spec 33 | self._initreceive() 34 | 35 | @property 36 | def remoteaddress(self) -> str: 37 | # Only defined for remote IO types. 38 | return self._io.remoteaddress # type: ignore[attr-defined,no-any-return] 39 | 40 | def __repr__(self) -> str: 41 | """A string representing gateway type and status.""" 42 | try: 43 | r: str = (self.hasreceiver() and "receive-live") or "not-receiving" 44 | i = str(len(self._channelfactory.channels())) 45 | except AttributeError: 46 | r = "uninitialized" 47 | i = "no" 48 | return f"<{self.__class__.__name__} id={self.id!r} {r}, {self.execmodel.backend} model, {i} active channels>" 49 | 50 | def exit(self) -> None: 51 | """Trigger gateway exit. 52 | 53 | Defer waiting for finishing of receiver-thread and subprocess activity 54 | to when group.terminate() is called. 55 | """ 56 | self._trace("gateway.exit() called") 57 | if self not in self._group: 58 | self._trace("gateway already unregistered with group") 59 | return 60 | self._group._unregister(self) 61 | try: 62 | self._trace("--> sending GATEWAY_TERMINATE") 63 | self._send(Message.GATEWAY_TERMINATE) 64 | self._trace("--> io.close_write") 65 | self._io.close_write() 66 | except (ValueError, EOFError, OSError) as exc: 67 | self._trace("io-error: could not send termination sequence") 68 | self._trace(" exception: %r" % exc) 69 | 70 | def reconfigure( 71 | self, py2str_as_py3str: bool = True, py3str_as_py2str: bool = False 72 | ) -> None: 73 | """Set the string coercion for this gateway. 74 | 75 | The default is to try to convert py2 str as py3 str, but not to try and 76 | convert py3 str to py2 str. 77 | """ 78 | self._strconfig = (py2str_as_py3str, py3str_as_py2str) 79 | data = gateway_base.dumps_internal(self._strconfig) 80 | self._send(Message.RECONFIGURE, data=data) 81 | 82 | def _rinfo(self, update: bool = False) -> RInfo: 83 | """Return some sys/env information from remote.""" 84 | if update or not hasattr(self, "_cache_rinfo"): 85 | ch = self.remote_exec(rinfo_source) 86 | try: 87 | self._cache_rinfo = RInfo(ch.receive()) 88 | finally: 89 | ch.waitclose() 90 | return self._cache_rinfo 91 | 92 | def hasreceiver(self) -> bool: 93 | """Whether gateway is able to receive data.""" 94 | return self._receivepool.active_count() > 0 95 | 96 | def remote_status(self) -> RemoteStatus: 97 | """Obtain information about the remote execution status.""" 98 | channel = self.newchannel() 99 | self._send(Message.STATUS, channel.id) 100 | statusdict = channel.receive() 101 | # the other side didn't actually instantiate a channel 102 | # so we just delete the internal id/channel mapping 103 | self._channelfactory._local_close(channel.id) 104 | return RemoteStatus(statusdict) 105 | 106 | def remote_exec( 107 | self, 108 | source: str | types.FunctionType | Callable[..., object] | types.ModuleType, 109 | **kwargs: object, 110 | ) -> Channel: 111 | """Return channel object and connect it to a remote 112 | execution thread where the given ``source`` executes. 113 | 114 | * ``source`` is a string: execute source string remotely 115 | with a ``channel`` put into the global namespace. 116 | * ``source`` is a pure function: serialize source and 117 | call function with ``**kwargs``, adding a 118 | ``channel`` object to the keyword arguments. 119 | * ``source`` is a pure module: execute source of module 120 | with a ``channel`` in its global namespace. 121 | 122 | In all cases the binding ``__name__='__channelexec__'`` 123 | will be available in the global namespace of the remotely 124 | executing code. 125 | """ 126 | call_name = None 127 | file_name = None 128 | if isinstance(source, types.ModuleType): 129 | file_name = inspect.getsourcefile(source) 130 | linecache.updatecache(file_name) # type: ignore[arg-type] 131 | source = inspect.getsource(source) 132 | elif isinstance(source, types.FunctionType): 133 | call_name = source.__name__ 134 | file_name = inspect.getsourcefile(source) 135 | source = _source_of_function(source) 136 | else: 137 | source = textwrap.dedent(str(source)) 138 | 139 | if not call_name and kwargs: 140 | raise TypeError("can't pass kwargs to non-function remote_exec") 141 | 142 | channel = self.newchannel() 143 | self._send( 144 | Message.CHANNEL_EXEC, 145 | channel.id, 146 | gateway_base.dumps_internal((source, file_name, call_name, kwargs)), 147 | ) 148 | return channel 149 | 150 | def remote_init_threads(self, num: int | None = None) -> None: 151 | """DEPRECATED. Is currently a NO-OPERATION already.""" 152 | print("WARNING: remote_init_threads() is a no-operation in execnet-1.2") 153 | 154 | 155 | class RInfo: 156 | def __init__(self, kwargs) -> None: 157 | self.__dict__.update(kwargs) 158 | 159 | def __repr__(self) -> str: 160 | info = ", ".join(f"{k}={v}" for k, v in sorted(self.__dict__.items())) 161 | return "" % info 162 | 163 | if TYPE_CHECKING: 164 | 165 | def __getattr__(self, name: str) -> Any: ... 166 | 167 | 168 | RemoteStatus = RInfo 169 | 170 | 171 | def rinfo_source(channel) -> None: 172 | import os 173 | import sys 174 | 175 | channel.send( 176 | dict( 177 | executable=sys.executable, 178 | version_info=sys.version_info[:5], 179 | platform=sys.platform, 180 | cwd=os.getcwd(), 181 | pid=os.getpid(), 182 | ) 183 | ) 184 | 185 | 186 | def _find_non_builtin_globals(source: str, codeobj: types.CodeType) -> list[str]: 187 | import ast 188 | import builtins 189 | 190 | vars = dict.fromkeys(codeobj.co_varnames) 191 | return [ 192 | node.id 193 | for node in ast.walk(ast.parse(source)) 194 | if isinstance(node, ast.Name) 195 | and node.id not in vars 196 | and node.id not in builtins.__dict__ 197 | ] 198 | 199 | 200 | def _source_of_function(function: types.FunctionType | Callable[..., object]) -> str: 201 | if function.__name__ == "": 202 | raise ValueError("can't evaluate lambda functions'") 203 | # XXX: we dont check before remote instantiation 204 | # if arguments are used properly 205 | try: 206 | sig = inspect.getfullargspec(function) 207 | except AttributeError: 208 | args = inspect.getargspec(function)[0] 209 | else: 210 | args = sig.args 211 | if not args or args[0] != "channel": 212 | raise ValueError("expected first function argument to be `channel`") 213 | 214 | closure = function.__closure__ 215 | codeobj = function.__code__ 216 | 217 | if closure is not None: 218 | raise ValueError("functions with closures can't be passed") 219 | 220 | try: 221 | source = inspect.getsource(function) 222 | except OSError as e: 223 | raise ValueError("can't find source file for %s" % function) from e 224 | 225 | source = textwrap.dedent(source) # just for inner functions 226 | 227 | used_globals = _find_non_builtin_globals(source, codeobj) 228 | if used_globals: 229 | raise ValueError("the use of non-builtin globals isn't supported", used_globals) 230 | 231 | leading_ws = "\n" * (codeobj.co_firstlineno - 1) 232 | return leading_ws + source 233 | -------------------------------------------------------------------------------- /src/execnet/gateway_bootstrap.py: -------------------------------------------------------------------------------- 1 | """Code to initialize the remote side of a gateway once the IO is created.""" 2 | 3 | from __future__ import annotations 4 | 5 | import inspect 6 | import os 7 | 8 | import execnet 9 | 10 | from . import gateway_base 11 | from .gateway_base import IO 12 | from .xspec import XSpec 13 | 14 | importdir = os.path.dirname(os.path.dirname(execnet.__file__)) 15 | 16 | 17 | class HostNotFound(Exception): 18 | pass 19 | 20 | 21 | def bootstrap_import(io: IO, spec: XSpec) -> None: 22 | # Only insert the importdir into the path if we must. This prevents 23 | # bugs where backports expect to be shadowed by the standard library on 24 | # newer versions of python but would instead shadow the standard library. 25 | sendexec( 26 | io, 27 | "import sys", 28 | "if %r not in sys.path:" % importdir, 29 | " sys.path.insert(0, %r)" % importdir, 30 | "from execnet.gateway_base import serve, init_popen_io, get_execmodel", 31 | "sys.stdout.write('1')", 32 | "sys.stdout.flush()", 33 | "execmodel = get_execmodel(%r)" % spec.execmodel, 34 | "serve(init_popen_io(execmodel), id='%s-worker')" % spec.id, 35 | ) 36 | s = io.read(1) 37 | assert s == b"1", repr(s) 38 | 39 | 40 | def bootstrap_exec(io: IO, spec: XSpec) -> None: 41 | try: 42 | sendexec( 43 | io, 44 | inspect.getsource(gateway_base), 45 | "execmodel = get_execmodel(%r)" % spec.execmodel, 46 | "io = init_popen_io(execmodel)", 47 | "io.write('1'.encode('ascii'))", 48 | "serve(io, id='%s-worker')" % spec.id, 49 | ) 50 | s = io.read(1) 51 | assert s == b"1" 52 | except EOFError: 53 | ret = io.wait() 54 | if ret == 255 and hasattr(io, "remoteaddress"): 55 | raise HostNotFound(io.remoteaddress) from None 56 | 57 | 58 | def bootstrap_socket(io: IO, id) -> None: 59 | # XXX: switch to spec 60 | from execnet.gateway_socket import SocketIO 61 | 62 | sendexec( 63 | io, 64 | inspect.getsource(gateway_base), 65 | "import socket", 66 | inspect.getsource(SocketIO), 67 | "try: execmodel", 68 | "except NameError:", 69 | " execmodel = get_execmodel('thread')", 70 | "io = SocketIO(clientsock, execmodel)", 71 | "io.write('1'.encode('ascii'))", 72 | "serve(io, id='%s-worker')" % id, 73 | ) 74 | s = io.read(1) 75 | assert s == b"1" 76 | 77 | 78 | def sendexec(io: IO, *sources: str) -> None: 79 | source = "\n".join(sources) 80 | io.write((repr(source) + "\n").encode("utf-8")) 81 | 82 | 83 | def bootstrap(io: IO, spec: XSpec) -> execnet.Gateway: 84 | if spec.popen: 85 | if spec.via or spec.python: 86 | bootstrap_exec(io, spec) 87 | else: 88 | bootstrap_import(io, spec) 89 | elif spec.ssh or spec.vagrant_ssh: 90 | bootstrap_exec(io, spec) 91 | elif spec.socket: 92 | bootstrap_socket(io, spec) 93 | else: 94 | raise ValueError("unknown gateway type, can't bootstrap") 95 | gw = execnet.Gateway(io, spec) 96 | return gw 97 | -------------------------------------------------------------------------------- /src/execnet/gateway_io.py: -------------------------------------------------------------------------------- 1 | """execnet IO initialization code. 2 | 3 | Creates IO instances used for gateway IO. 4 | """ 5 | 6 | from __future__ import annotations 7 | 8 | import shlex 9 | import sys 10 | from typing import TYPE_CHECKING 11 | from typing import cast 12 | 13 | if TYPE_CHECKING: 14 | from execnet.gateway_base import Channel 15 | from execnet.gateway_base import ExecModel 16 | from execnet.xspec import XSpec 17 | 18 | try: 19 | from execnet.gateway_base import Message 20 | from execnet.gateway_base import Popen2IO 21 | except ImportError: 22 | from __main__ import Message # type: ignore[no-redef] 23 | from __main__ import Popen2IO # type: ignore[no-redef] 24 | 25 | from functools import partial 26 | 27 | 28 | class Popen2IOMaster(Popen2IO): 29 | # Set externally, for some specs only. 30 | remoteaddress: str 31 | 32 | def __init__(self, args, execmodel: ExecModel) -> None: 33 | PIPE = execmodel.subprocess.PIPE 34 | self.popen = p = execmodel.subprocess.Popen(args, stdout=PIPE, stdin=PIPE) 35 | super().__init__(p.stdin, p.stdout, execmodel=execmodel) 36 | 37 | def wait(self) -> int | None: 38 | try: 39 | return self.popen.wait() # type: ignore[no-any-return] 40 | except OSError: 41 | return None 42 | 43 | def kill(self) -> None: 44 | try: 45 | self.popen.kill() 46 | except OSError as e: 47 | sys.stderr.write("ERROR killing: %s\n" % e) 48 | sys.stderr.flush() 49 | 50 | 51 | popen_bootstrapline = "import sys;exec(eval(sys.stdin.readline()))" 52 | 53 | 54 | def shell_split_path(path: str) -> list[str]: 55 | """ 56 | Use shell lexer to split the given path into a list of components, 57 | taking care to handle Windows' '\' correctly. 58 | """ 59 | if sys.platform.startswith("win"): 60 | # replace \\ by / otherwise shlex will strip them out 61 | path = path.replace("\\", "/") 62 | return shlex.split(path) 63 | 64 | 65 | def popen_args(spec: XSpec) -> list[str]: 66 | args = shell_split_path(spec.python) if spec.python else [sys.executable] 67 | args.append("-u") 68 | if spec.dont_write_bytecode: 69 | args.append("-B") 70 | args.extend(["-c", popen_bootstrapline]) 71 | return args 72 | 73 | 74 | def ssh_args(spec: XSpec) -> list[str]: 75 | # NOTE: If changing this, you need to sync those changes to vagrant_args 76 | # as well, or, take some time to further refactor the commonalities of 77 | # ssh_args and vagrant_args. 78 | remotepython = spec.python or "python" 79 | args = ["ssh", "-C"] 80 | if spec.ssh_config is not None: 81 | args.extend(["-F", str(spec.ssh_config)]) 82 | 83 | assert spec.ssh is not None 84 | args.extend(spec.ssh.split()) 85 | remotecmd = f'{remotepython} -c "{popen_bootstrapline}"' 86 | args.append(remotecmd) 87 | return args 88 | 89 | 90 | def vagrant_ssh_args(spec: XSpec) -> list[str]: 91 | # This is the vagrant-wrapped version of SSH. Unfortunately the 92 | # command lines are incompatible to just channel through ssh_args 93 | # due to ordering/templating issues. 94 | # NOTE: This should be kept in sync with the ssh_args behaviour. 95 | # spec.vagrant is identical to spec.ssh in that they both carry 96 | # the remote host "address". 97 | assert spec.vagrant_ssh is not None 98 | remotepython = spec.python or "python" 99 | args = ["vagrant", "ssh", spec.vagrant_ssh, "--", "-C"] 100 | if spec.ssh_config is not None: 101 | args.extend(["-F", str(spec.ssh_config)]) 102 | remotecmd = f'{remotepython} -c "{popen_bootstrapline}"' 103 | args.extend([remotecmd]) 104 | return args 105 | 106 | 107 | def create_io(spec: XSpec, execmodel: ExecModel) -> Popen2IOMaster: 108 | if spec.popen: 109 | args = popen_args(spec) 110 | return Popen2IOMaster(args, execmodel) 111 | if spec.ssh: 112 | args = ssh_args(spec) 113 | io = Popen2IOMaster(args, execmodel) 114 | io.remoteaddress = spec.ssh 115 | return io 116 | if spec.vagrant_ssh: 117 | args = vagrant_ssh_args(spec) 118 | io = Popen2IOMaster(args, execmodel) 119 | io.remoteaddress = spec.vagrant_ssh 120 | return io 121 | assert False 122 | 123 | 124 | # 125 | # Proxy Gateway handling code 126 | # 127 | # master: proxy initiator 128 | # forwarder: forwards between master and sub 129 | # sub: sub process that is proxied to the initiator 130 | 131 | RIO_KILL = 1 132 | RIO_WAIT = 2 133 | RIO_REMOTEADDRESS = 3 134 | RIO_CLOSE_WRITE = 4 135 | 136 | 137 | class ProxyIO: 138 | """A Proxy IO object allows to instantiate a Gateway 139 | through another "via" gateway. 140 | 141 | A master:ProxyIO object provides an IO object effectively connected to the 142 | sub via the forwarder. To achieve this, master:ProxyIO interacts with 143 | forwarder:serve_proxy_io() which itself instantiates and interacts with the 144 | sub. 145 | """ 146 | 147 | def __init__(self, proxy_channel: Channel, execmodel: ExecModel) -> None: 148 | # after exchanging the control channel we use proxy_channel 149 | # for messaging IO 150 | self.controlchan = proxy_channel.gateway.newchannel() 151 | proxy_channel.send(self.controlchan) 152 | self.iochan = proxy_channel 153 | self.iochan_file = self.iochan.makefile("r") 154 | self.execmodel = execmodel 155 | 156 | def read(self, nbytes: int) -> bytes: 157 | # TODO(typing): The IO protocol requires bytes here but ChannelFileRead 158 | # returns str. 159 | return self.iochan_file.read(nbytes) # type: ignore[return-value] 160 | 161 | def write(self, data: bytes) -> None: 162 | self.iochan.send(data) 163 | 164 | def _controll(self, event: int) -> object: 165 | self.controlchan.send(event) 166 | return self.controlchan.receive() 167 | 168 | def close_write(self) -> None: 169 | self._controll(RIO_CLOSE_WRITE) 170 | 171 | def close_read(self) -> None: 172 | raise NotImplementedError() 173 | 174 | def kill(self) -> None: 175 | self._controll(RIO_KILL) 176 | 177 | def wait(self) -> int | None: 178 | response = self._controll(RIO_WAIT) 179 | assert response is None or isinstance(response, int) 180 | return response 181 | 182 | @property 183 | def remoteaddress(self) -> str: 184 | response = self._controll(RIO_REMOTEADDRESS) 185 | assert isinstance(response, str) 186 | return response 187 | 188 | def __repr__(self) -> str: 189 | return f"" 190 | 191 | 192 | class PseudoSpec: 193 | def __init__(self, vars) -> None: 194 | self.__dict__.update(vars) 195 | 196 | def __getattr__(self, name: str) -> None: 197 | return None 198 | 199 | 200 | def serve_proxy_io(proxy_channelX: Channel) -> None: 201 | execmodel = proxy_channelX.gateway.execmodel 202 | log = partial( 203 | proxy_channelX.gateway._trace, "serve_proxy_io:%s" % proxy_channelX.id 204 | ) 205 | spec = cast("XSpec", PseudoSpec(proxy_channelX.receive())) 206 | # create sub IO object which we will proxy back to our proxy initiator 207 | sub_io = create_io(spec, execmodel) 208 | control_chan = cast("Channel", proxy_channelX.receive()) 209 | log("got control chan", control_chan) 210 | 211 | # read data from master, forward it to the sub 212 | # XXX writing might block, thus blocking the receiver thread 213 | def forward_to_sub(data: bytes) -> None: 214 | log("forward data to sub, size %s" % len(data)) 215 | sub_io.write(data) 216 | 217 | proxy_channelX.setcallback(forward_to_sub) 218 | 219 | def control(data: int) -> None: 220 | if data == RIO_WAIT: 221 | control_chan.send(sub_io.wait()) 222 | elif data == RIO_KILL: 223 | sub_io.kill() 224 | control_chan.send(None) 225 | elif data == RIO_REMOTEADDRESS: 226 | control_chan.send(sub_io.remoteaddress) 227 | elif data == RIO_CLOSE_WRITE: 228 | sub_io.close_write() 229 | control_chan.send(None) 230 | 231 | control_chan.setcallback(control) 232 | 233 | # write data to the master coming from the sub 234 | forward_to_master_file = proxy_channelX.makefile("w") 235 | 236 | # read bootstrap byte from sub, send it on to master 237 | log("reading bootstrap byte from sub", spec.id) 238 | initial = sub_io.read(1) 239 | assert initial == b"1", initial 240 | log("forwarding bootstrap byte from sub", spec.id) 241 | forward_to_master_file.write(initial) 242 | 243 | # enter message forwarding loop 244 | while True: 245 | try: 246 | message = Message.from_io(sub_io) 247 | except EOFError: 248 | log("EOF from sub, terminating proxying loop", spec.id) 249 | break 250 | message.to_io(forward_to_master_file) 251 | # proxy_channelX will be closed from remote_exec's finalization code 252 | 253 | 254 | if __name__ == "__channelexec__": 255 | serve_proxy_io(channel) # type: ignore[name-defined] # noqa:F821 256 | -------------------------------------------------------------------------------- /src/execnet/gateway_socket.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import sys 4 | from typing import cast 5 | 6 | from execnet.gateway import Gateway 7 | from execnet.gateway_base import ExecModel 8 | from execnet.gateway_bootstrap import HostNotFound 9 | from execnet.multi import Group 10 | from execnet.xspec import XSpec 11 | 12 | 13 | class SocketIO: 14 | remoteaddress: str 15 | 16 | def __init__(self, sock, execmodel: ExecModel) -> None: 17 | self.sock = sock 18 | self.execmodel = execmodel 19 | socket = execmodel.socket 20 | try: 21 | # IPTOS_LOWDELAY 22 | sock.setsockopt(socket.SOL_IP, socket.IP_TOS, 0x10) 23 | sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) 24 | except (AttributeError, OSError): 25 | sys.stderr.write("WARNING: cannot set socketoption") 26 | 27 | def read(self, numbytes: int) -> bytes: 28 | "Read exactly 'bytes' bytes from the socket." 29 | buf = b"" 30 | while len(buf) < numbytes: 31 | t = self.sock.recv(numbytes - len(buf)) 32 | if not t: 33 | raise EOFError 34 | buf += t 35 | return buf 36 | 37 | def write(self, data: bytes) -> None: 38 | self.sock.sendall(data) 39 | 40 | def close_read(self) -> None: 41 | try: 42 | self.sock.shutdown(0) 43 | except self.execmodel.socket.error: 44 | pass 45 | 46 | def close_write(self) -> None: 47 | try: 48 | self.sock.shutdown(1) 49 | except self.execmodel.socket.error: 50 | pass 51 | 52 | def wait(self) -> None: 53 | pass 54 | 55 | def kill(self) -> None: 56 | pass 57 | 58 | 59 | def start_via( 60 | gateway: Gateway, hostport: tuple[str, int] | None = None 61 | ) -> tuple[str, int]: 62 | """Instantiate a socketserver on the given gateway. 63 | 64 | Returns a host, port tuple. 65 | """ 66 | if hostport is None: 67 | host, port = ("localhost", 0) 68 | else: 69 | host, port = hostport 70 | 71 | from execnet.script import socketserver 72 | 73 | # execute the above socketserverbootstrap on the other side 74 | channel = gateway.remote_exec(socketserver) 75 | channel.send((host, port)) 76 | realhost, realport = cast("tuple[str, int]", channel.receive()) 77 | # self._trace("new_remote received" 78 | # "port=%r, hostname = %r" %(realport, hostname)) 79 | if not realhost or realhost == "0.0.0.0": 80 | realhost = "localhost" 81 | return realhost, realport 82 | 83 | 84 | def create_io(spec: XSpec, group: Group, execmodel: ExecModel) -> SocketIO: 85 | assert spec.socket is not None 86 | assert not spec.python, "socket: specifying python executables not yet supported" 87 | gateway_id = spec.installvia 88 | if gateway_id: 89 | host, port = start_via(group[gateway_id]) 90 | else: 91 | host, port_str = spec.socket.split(":") 92 | port = int(port_str) 93 | 94 | socket = execmodel.socket 95 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 96 | io = SocketIO(sock, execmodel) 97 | io.remoteaddress = "%s:%d" % (host, port) 98 | try: 99 | sock.connect((host, port)) 100 | except execmodel.socket.gaierror as e: 101 | raise HostNotFound() from e 102 | return io 103 | -------------------------------------------------------------------------------- /src/execnet/multi.py: -------------------------------------------------------------------------------- 1 | """ 2 | Managing Gateway Groups and interactions with multiple channels. 3 | 4 | (c) 2008-2014, Holger Krekel and others 5 | """ 6 | 7 | from __future__ import annotations 8 | 9 | import atexit 10 | import types 11 | from functools import partial 12 | from threading import Lock 13 | from typing import TYPE_CHECKING 14 | from typing import Any 15 | from typing import Callable 16 | from typing import Iterable 17 | from typing import Iterator 18 | from typing import Literal 19 | from typing import Sequence 20 | from typing import overload 21 | 22 | from . import gateway_bootstrap 23 | from . import gateway_io 24 | from .gateway_base import Channel 25 | from .gateway_base import ExecModel 26 | from .gateway_base import WorkerPool 27 | from .gateway_base import get_execmodel 28 | from .gateway_base import trace 29 | from .xspec import XSpec 30 | 31 | if TYPE_CHECKING: 32 | from .gateway import Gateway 33 | 34 | 35 | NO_ENDMARKER_WANTED = object() 36 | 37 | 38 | class Group: 39 | """Gateway Group.""" 40 | 41 | defaultspec = "popen" 42 | 43 | def __init__( 44 | self, xspecs: Iterable[XSpec | str | None] = (), execmodel: str = "thread" 45 | ) -> None: 46 | """Initialize a group and make gateways as specified. 47 | 48 | execmodel can be one of the supported execution models. 49 | """ 50 | self._gateways: list[Gateway] = [] 51 | self._autoidcounter = 0 52 | self._autoidlock = Lock() 53 | self._gateways_to_join: list[Gateway] = [] 54 | # we use the same execmodel for all of the Gateway objects 55 | # we spawn on our side. Probably we should not allow different 56 | # execmodels between different groups but not clear. 57 | # Note that "other side" execmodels may differ and is typically 58 | # specified by the spec passed to makegateway. 59 | self.set_execmodel(execmodel) 60 | for xspec in xspecs: 61 | self.makegateway(xspec) 62 | atexit.register(self._cleanup_atexit) 63 | 64 | @property 65 | def execmodel(self) -> ExecModel: 66 | return self._execmodel 67 | 68 | @property 69 | def remote_execmodel(self) -> ExecModel: 70 | return self._remote_execmodel 71 | 72 | def set_execmodel( 73 | self, execmodel: str, remote_execmodel: str | None = None 74 | ) -> None: 75 | """Set the execution model for local and remote site. 76 | 77 | execmodel can be one of the supported execution models. 78 | It determines the execution model for any newly created gateway. 79 | If remote_execmodel is not specified it takes on the value of execmodel. 80 | 81 | NOTE: Execution models can only be set before any gateway is created. 82 | """ 83 | if self._gateways: 84 | raise ValueError( 85 | "can not set execution models if gateways have been created already" 86 | ) 87 | if remote_execmodel is None: 88 | remote_execmodel = execmodel 89 | self._execmodel = get_execmodel(execmodel) 90 | self._remote_execmodel = get_execmodel(remote_execmodel) 91 | 92 | def __repr__(self) -> str: 93 | idgateways = [gw.id for gw in self] 94 | return "" % idgateways 95 | 96 | def __getitem__(self, key: int | str | Gateway) -> Gateway: 97 | if isinstance(key, int): 98 | return self._gateways[key] 99 | for gw in self._gateways: 100 | if gw == key or gw.id == key: 101 | return gw 102 | raise KeyError(key) 103 | 104 | def __contains__(self, key: str) -> bool: 105 | try: 106 | self[key] 107 | return True 108 | except KeyError: 109 | return False 110 | 111 | def __len__(self) -> int: 112 | return len(self._gateways) 113 | 114 | def __iter__(self) -> Iterator[Gateway]: 115 | return iter(list(self._gateways)) 116 | 117 | def makegateway(self, spec: XSpec | str | None = None) -> Gateway: 118 | """Create and configure a gateway to a Python interpreter. 119 | 120 | The ``spec`` string encodes the target gateway type 121 | and configuration information. The general format is:: 122 | 123 | key1=value1//key2=value2//... 124 | 125 | If you leave out the ``=value`` part a True value is assumed. 126 | Valid types: ``popen``, ``ssh=hostname``, ``socket=host:port``. 127 | Valid configuration:: 128 | 129 | id= specifies the gateway id 130 | python= specifies which python interpreter to execute 131 | execmodel=model 'thread', 'main_thread_only', 'eventlet', 'gevent' execution model 132 | chdir= specifies to which directory to change 133 | nice= specifies process priority of new process 134 | env:NAME=value specifies a remote environment variable setting. 135 | 136 | If no spec is given, self.defaultspec is used. 137 | """ 138 | if not spec: 139 | spec = self.defaultspec 140 | if not isinstance(spec, XSpec): 141 | spec = XSpec(spec) 142 | self.allocate_id(spec) 143 | if spec.execmodel is None: 144 | spec.execmodel = self.remote_execmodel.backend 145 | if spec.via: 146 | assert not spec.socket 147 | master = self[spec.via] 148 | proxy_channel = master.remote_exec(gateway_io) 149 | proxy_channel.send(vars(spec)) 150 | proxy_io_master = gateway_io.ProxyIO(proxy_channel, self.execmodel) 151 | gw = gateway_bootstrap.bootstrap(proxy_io_master, spec) 152 | elif spec.popen or spec.ssh or spec.vagrant_ssh: 153 | io = gateway_io.create_io(spec, execmodel=self.execmodel) 154 | gw = gateway_bootstrap.bootstrap(io, spec) 155 | elif spec.socket: 156 | from . import gateway_socket 157 | 158 | sio = gateway_socket.create_io(spec, self, execmodel=self.execmodel) 159 | gw = gateway_bootstrap.bootstrap(sio, spec) 160 | else: 161 | raise ValueError(f"no gateway type found for {spec._spec!r}") 162 | gw.spec = spec 163 | self._register(gw) 164 | if spec.chdir or spec.nice or spec.env: 165 | channel = gw.remote_exec( 166 | """ 167 | import os 168 | path, nice, env = channel.receive() 169 | if path: 170 | if not os.path.exists(path): 171 | os.mkdir(path) 172 | os.chdir(path) 173 | if nice and hasattr(os, 'nice'): 174 | os.nice(nice) 175 | if env: 176 | for name, value in env.items(): 177 | os.environ[name] = value 178 | """ 179 | ) 180 | nice = (spec.nice and int(spec.nice)) or 0 181 | channel.send((spec.chdir, nice, spec.env)) 182 | channel.waitclose() 183 | return gw 184 | 185 | def allocate_id(self, spec: XSpec) -> None: 186 | """(re-entrant) allocate id for the given xspec object.""" 187 | if spec.id is None: 188 | with self._autoidlock: 189 | id = "gw" + str(self._autoidcounter) 190 | self._autoidcounter += 1 191 | if id in self: 192 | raise ValueError(f"already have gateway with id {id!r}") 193 | spec.id = id 194 | 195 | def _register(self, gateway: Gateway) -> None: 196 | assert not hasattr(gateway, "_group") 197 | assert gateway.id 198 | assert gateway.id not in self 199 | self._gateways.append(gateway) 200 | gateway._group = self 201 | 202 | def _unregister(self, gateway: Gateway) -> None: 203 | self._gateways.remove(gateway) 204 | self._gateways_to_join.append(gateway) 205 | 206 | def _cleanup_atexit(self) -> None: 207 | trace(f"=== atexit cleanup {self!r} ===") 208 | self.terminate(timeout=1.0) 209 | 210 | def terminate(self, timeout: float | None = None) -> None: 211 | """Trigger exit of member gateways and wait for termination 212 | of member gateways and associated subprocesses. 213 | 214 | After waiting timeout seconds try to to kill local sub processes of 215 | popen- and ssh-gateways. 216 | 217 | Timeout defaults to None meaning open-ended waiting and no kill 218 | attempts. 219 | """ 220 | while self: 221 | vias: set[str] = set() 222 | for gw in self: 223 | if gw.spec.via: 224 | vias.add(gw.spec.via) 225 | for gw in self: 226 | if gw.id not in vias: 227 | gw.exit() 228 | 229 | def join_wait(gw: Gateway) -> None: 230 | gw.join() 231 | gw._io.wait() 232 | 233 | def kill(gw: Gateway) -> None: 234 | trace("Gateways did not come down after timeout: %r" % gw) 235 | gw._io.kill() 236 | 237 | safe_terminate( 238 | self.execmodel, 239 | timeout, 240 | [ 241 | (partial(join_wait, gw), partial(kill, gw)) 242 | for gw in self._gateways_to_join 243 | ], 244 | ) 245 | self._gateways_to_join[:] = [] 246 | 247 | def remote_exec( 248 | self, 249 | source: str | types.FunctionType | Callable[..., object] | types.ModuleType, 250 | **kwargs, 251 | ) -> MultiChannel: 252 | """remote_exec source on all member gateways and return 253 | a MultiChannel connecting to all sub processes.""" 254 | channels = [] 255 | for gw in self: 256 | channels.append(gw.remote_exec(source, **kwargs)) 257 | return MultiChannel(channels) 258 | 259 | 260 | class MultiChannel: 261 | def __init__(self, channels: Sequence[Channel]) -> None: 262 | self._channels = channels 263 | 264 | def __len__(self) -> int: 265 | return len(self._channels) 266 | 267 | def __iter__(self) -> Iterator[Channel]: 268 | return iter(self._channels) 269 | 270 | def __getitem__(self, key: int) -> Channel: 271 | return self._channels[key] 272 | 273 | def __contains__(self, chan: Channel) -> bool: 274 | return chan in self._channels 275 | 276 | def send_each(self, item: object) -> None: 277 | for ch in self._channels: 278 | ch.send(item) 279 | 280 | @overload 281 | def receive_each(self, withchannel: Literal[False] = ...) -> list[Any]: 282 | pass 283 | 284 | @overload 285 | def receive_each(self, withchannel: Literal[True]) -> list[tuple[Channel, Any]]: 286 | pass 287 | 288 | def receive_each( 289 | self, withchannel: bool = False 290 | ) -> list[tuple[Channel, Any]] | list[Any]: 291 | assert not hasattr(self, "_queue") 292 | l: list[object] = [] 293 | for ch in self._channels: 294 | obj = ch.receive() 295 | if withchannel: 296 | l.append((ch, obj)) 297 | else: 298 | l.append(obj) 299 | return l 300 | 301 | def make_receive_queue(self, endmarker: object = NO_ENDMARKER_WANTED): 302 | try: 303 | return self._queue # type: ignore[has-type] 304 | except AttributeError: 305 | self._queue = None 306 | for ch in self._channels: 307 | if self._queue is None: 308 | self._queue = ch.gateway.execmodel.queue.Queue() 309 | 310 | def putreceived(obj, channel: Channel = ch) -> None: 311 | self._queue.put((channel, obj)) # type: ignore[union-attr] 312 | 313 | if endmarker is NO_ENDMARKER_WANTED: 314 | ch.setcallback(putreceived) 315 | else: 316 | ch.setcallback(putreceived, endmarker=endmarker) 317 | return self._queue 318 | 319 | def waitclose(self) -> None: 320 | first = None 321 | for ch in self._channels: 322 | try: 323 | ch.waitclose() 324 | except ch.RemoteError as exc: 325 | if first is None: 326 | first = exc 327 | if first: 328 | raise first 329 | 330 | 331 | def safe_terminate( 332 | execmodel: ExecModel, timeout: float | None, list_of_paired_functions 333 | ) -> None: 334 | workerpool = WorkerPool(execmodel) 335 | 336 | def termkill(termfunc, killfunc) -> None: 337 | termreply = workerpool.spawn(termfunc) 338 | try: 339 | termreply.get(timeout=timeout) 340 | except OSError: 341 | killfunc() 342 | 343 | replylist = [] 344 | for termfunc, killfunc in list_of_paired_functions: 345 | reply = workerpool.spawn(termkill, termfunc, killfunc) 346 | replylist.append(reply) 347 | for reply in replylist: 348 | reply.get() 349 | workerpool.waitall(timeout=timeout) 350 | 351 | 352 | default_group = Group() 353 | makegateway = default_group.makegateway 354 | set_execmodel = default_group.set_execmodel 355 | -------------------------------------------------------------------------------- /src/execnet/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pytest-dev/execnet/e0d703541453d4d7a623e749184b73440f63d525/src/execnet/py.typed -------------------------------------------------------------------------------- /src/execnet/rsync.py: -------------------------------------------------------------------------------- 1 | """ 2 | 1:N rsync implementation on top of execnet. 3 | 4 | (c) 2006-2009, Armin Rigo, Holger Krekel, Maciej Fijalkowski 5 | """ 6 | 7 | from __future__ import annotations 8 | 9 | import os 10 | import stat 11 | from hashlib import md5 12 | from queue import Queue 13 | from typing import Callable 14 | from typing import Literal 15 | 16 | import execnet.rsync_remote 17 | from execnet.gateway import Gateway 18 | from execnet.gateway_base import BaseGateway 19 | from execnet.gateway_base import Channel 20 | 21 | 22 | class RSync: 23 | """This class allows to send a directory structure (recursively) 24 | to one or multiple remote filesystems. 25 | 26 | There is limited support for symlinks, which means that symlinks 27 | pointing to the sourcetree will be send "as is" while external 28 | symlinks will be just copied (regardless of existence of such 29 | a path on remote side). 30 | """ 31 | 32 | def __init__(self, sourcedir, callback=None, verbose: bool = True) -> None: 33 | self._sourcedir = str(sourcedir) 34 | self._verbose = verbose 35 | assert callback is None or callable(callback) 36 | self._callback = callback 37 | self._channels: dict[Channel, Callable[[], None] | None] = {} 38 | self._receivequeue: Queue[ 39 | tuple[ 40 | Channel, 41 | ( 42 | None 43 | | tuple[Literal["send"], tuple[list[str], bytes]] 44 | | tuple[Literal["list_done"], None] 45 | | tuple[Literal["ack"], str] 46 | | tuple[Literal["links"], None] 47 | | tuple[Literal["done"], None] 48 | ), 49 | ] 50 | ] = Queue() 51 | self._links: list[tuple[Literal["linkbase", "link"], str, str]] = [] 52 | 53 | def filter(self, path: str) -> bool: 54 | return True 55 | 56 | def _end_of_channel(self, channel: Channel) -> None: 57 | if channel in self._channels: 58 | # too early! we must have got an error 59 | channel.waitclose() 60 | # or else we raise one 61 | raise OSError(f"connection unexpectedly closed: {channel.gateway} ") 62 | 63 | def _process_link(self, channel: Channel) -> None: 64 | for link in self._links: 65 | channel.send(link) 66 | # completion marker, this host is done 67 | channel.send(42) 68 | 69 | def _done(self, channel: Channel) -> None: 70 | """Call all callbacks.""" 71 | finishedcallback = self._channels.pop(channel) 72 | if finishedcallback: 73 | finishedcallback() 74 | channel.waitclose() 75 | 76 | def _list_done(self, channel: Channel) -> None: 77 | # sum up all to send 78 | if self._callback: 79 | s = sum([self._paths[i] for i in self._to_send[channel]]) 80 | self._callback("list", s, channel) 81 | 82 | def _send_item( 83 | self, 84 | channel: Channel, 85 | modified_rel_path_components: list[str], 86 | checksum: bytes, 87 | ) -> None: 88 | """Send one item.""" 89 | modifiedpath = os.path.join(self._sourcedir, *modified_rel_path_components) 90 | try: 91 | f = open(modifiedpath, "rb") 92 | data = f.read() 93 | except OSError: 94 | data = None 95 | 96 | # provide info to progress callback function 97 | modified_rel_path = "/".join(modified_rel_path_components) 98 | if data is not None: 99 | self._paths[modified_rel_path] = len(data) 100 | else: 101 | self._paths[modified_rel_path] = 0 102 | if channel not in self._to_send: 103 | self._to_send[channel] = [] 104 | self._to_send[channel].append(modified_rel_path) 105 | # print "sending", modified_rel_path, data and len(data) or 0, checksum 106 | 107 | if data is not None: 108 | f.close() 109 | if checksum is not None and checksum == md5(data).digest(): 110 | data = None # not really modified 111 | else: 112 | self._report_send_file(channel.gateway, modified_rel_path) 113 | channel.send(data) 114 | 115 | def _report_send_file(self, gateway: BaseGateway, modified_rel_path: str) -> None: 116 | if self._verbose: 117 | print(f"{gateway} <= {modified_rel_path}") 118 | 119 | def send(self, raises: bool = True) -> None: 120 | """Sends a sourcedir to all added targets. 121 | 122 | raises indicates whether to raise an error or return in case of lack of 123 | targets. 124 | """ 125 | if not self._channels: 126 | if raises: 127 | raise OSError( 128 | "no targets available, maybe you are trying call send() twice?" 129 | ) 130 | return 131 | # normalize a trailing '/' away 132 | self._sourcedir = os.path.dirname(os.path.join(self._sourcedir, "x")) 133 | # send directory structure and file timestamps/sizes 134 | self._send_directory_structure(self._sourcedir) 135 | 136 | # paths and to_send are only used for doing 137 | # progress-related callbacks 138 | self._paths: dict[str, int] = {} 139 | self._to_send: dict[Channel, list[str]] = {} 140 | 141 | # send modified file to clients 142 | while self._channels: 143 | channel, req = self._receivequeue.get() 144 | if req is None: 145 | self._end_of_channel(channel) 146 | else: 147 | if req[0] == "links": 148 | self._process_link(channel) 149 | elif req[0] == "done": 150 | self._done(channel) 151 | elif req[0] == "ack": 152 | if self._callback: 153 | self._callback("ack", self._paths[req[1]], channel) 154 | elif req[0] == "list_done": 155 | self._list_done(channel) 156 | elif req[0] == "send": 157 | self._send_item(channel, req[1][0], req[1][1]) 158 | else: 159 | assert "Unknown command %s" % req[0] # type: ignore[unreachable] 160 | 161 | def add_target( 162 | self, 163 | gateway: Gateway, 164 | destdir: str | os.PathLike[str], 165 | finishedcallback: Callable[[], None] | None = None, 166 | **options, 167 | ) -> None: 168 | """Add a remote target specified via a gateway and a remote destination 169 | directory.""" 170 | for name in options: 171 | assert name in ("delete",) 172 | 173 | def itemcallback(req) -> None: 174 | self._receivequeue.put((channel, req)) 175 | 176 | channel = gateway.remote_exec(execnet.rsync_remote) 177 | channel.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False) 178 | channel.setcallback(itemcallback, endmarker=None) 179 | channel.send((str(destdir), options)) 180 | self._channels[channel] = finishedcallback 181 | 182 | def _broadcast(self, msg: object) -> None: 183 | for channel in self._channels: 184 | channel.send(msg) 185 | 186 | def _send_link( 187 | self, 188 | linktype: Literal["linkbase", "link"], 189 | basename: str, 190 | linkpoint: str, 191 | ) -> None: 192 | self._links.append((linktype, basename, linkpoint)) 193 | 194 | def _send_directory(self, path: str) -> None: 195 | # dir: send a list of entries 196 | names = [] 197 | subpaths = [] 198 | for name in os.listdir(path): 199 | p = os.path.join(path, name) 200 | if self.filter(p): 201 | names.append(name) 202 | subpaths.append(p) 203 | mode = os.lstat(path).st_mode 204 | self._broadcast([mode, *names]) 205 | for p in subpaths: 206 | self._send_directory_structure(p) 207 | 208 | def _send_link_structure(self, path: str) -> None: 209 | sourcedir = self._sourcedir 210 | basename = path[len(self._sourcedir) + 1 :] 211 | linkpoint = os.readlink(path) 212 | # On Windows, readlink returns an extended path (//?/) for 213 | # absolute links, but relpath doesn't like mixing extended 214 | # and non-extended paths. So fix it up ourselves. 215 | if ( 216 | os.path.__name__ == "ntpath" 217 | and linkpoint.startswith("\\\\?\\") 218 | and not self._sourcedir.startswith("\\\\?\\") 219 | ): 220 | sourcedir = "\\\\?\\" + self._sourcedir 221 | try: 222 | relpath = os.path.relpath(linkpoint, sourcedir) 223 | except ValueError: 224 | relpath = None 225 | if ( 226 | relpath is not None 227 | and relpath not in (os.curdir, os.pardir) 228 | and not relpath.startswith(os.pardir + os.sep) 229 | ): 230 | self._send_link("linkbase", basename, relpath) 231 | else: 232 | # relative or absolute link, just send it 233 | self._send_link("link", basename, linkpoint) 234 | self._broadcast(None) 235 | 236 | def _send_directory_structure(self, path: str) -> None: 237 | try: 238 | st = os.lstat(path) 239 | except OSError: 240 | self._broadcast((None, 0, 0)) 241 | return 242 | if stat.S_ISREG(st.st_mode): 243 | # regular file: send a mode/timestamp/size pair 244 | self._broadcast((st.st_mode, st.st_mtime, st.st_size)) 245 | elif stat.S_ISDIR(st.st_mode): 246 | self._send_directory(path) 247 | elif stat.S_ISLNK(st.st_mode): 248 | self._send_link_structure(path) 249 | else: 250 | raise ValueError(f"cannot sync {path!r}") 251 | -------------------------------------------------------------------------------- /src/execnet/rsync_remote.py: -------------------------------------------------------------------------------- 1 | """ 2 | (c) 2006-2013, Armin Rigo, Holger Krekel, Maciej Fijalkowski 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | from typing import TYPE_CHECKING 8 | from typing import Literal 9 | from typing import cast 10 | 11 | if TYPE_CHECKING: 12 | from execnet.gateway_base import Channel 13 | 14 | 15 | def serve_rsync(channel: Channel) -> None: 16 | import os 17 | import shutil 18 | import stat 19 | from hashlib import md5 20 | 21 | destdir, options = cast("tuple[str, dict[str, object]]", channel.receive()) 22 | modifiedfiles = [] 23 | 24 | def remove(path: str) -> None: 25 | assert path.startswith(destdir) 26 | try: 27 | os.unlink(path) 28 | except OSError: 29 | # assume it's a dir 30 | shutil.rmtree(path, True) 31 | 32 | def receive_directory_structure(path: str, relcomponents: list[str]) -> None: 33 | try: 34 | st = os.lstat(path) 35 | except OSError: 36 | st = None 37 | msg = channel.receive() 38 | if isinstance(msg, list): 39 | if st and not stat.S_ISDIR(st.st_mode): 40 | os.unlink(path) 41 | st = None 42 | if not st: 43 | os.makedirs(path) 44 | mode = msg.pop(0) 45 | if mode: 46 | # Ensure directories are writable, otherwise a 47 | # permission denied error (EACCES) would be raised 48 | # when attempting to receive read-only directory 49 | # structures. 50 | os.chmod(path, mode | 0o700) 51 | entrynames = {} 52 | for entryname in msg: 53 | destpath = os.path.join(path, entryname) 54 | receive_directory_structure(destpath, [*relcomponents, entryname]) 55 | entrynames[entryname] = True 56 | if options.get("delete"): 57 | for othername in os.listdir(path): 58 | if othername not in entrynames: 59 | otherpath = os.path.join(path, othername) 60 | remove(otherpath) 61 | elif msg is not None: 62 | assert isinstance(msg, tuple) 63 | checksum = None 64 | if st: 65 | if stat.S_ISREG(st.st_mode): 66 | msg_mode, msg_mtime, msg_size = msg 67 | if msg_size != st.st_size: 68 | pass 69 | elif msg_mtime != st.st_mtime: 70 | f = open(path, "rb") 71 | checksum = md5(f.read()).digest() 72 | f.close() 73 | elif msg_mode and msg_mode != st.st_mode: 74 | os.chmod(path, msg_mode | 0o700) 75 | return 76 | else: 77 | return # already fine 78 | else: 79 | remove(path) 80 | channel.send(("send", (relcomponents, checksum))) 81 | modifiedfiles.append((path, msg)) 82 | 83 | receive_directory_structure(destdir, []) 84 | 85 | STRICT_CHECK = False # seems most useful this way for py.test 86 | channel.send(("list_done", None)) 87 | 88 | for path, (mode, time, size) in modifiedfiles: 89 | data = cast(bytes, channel.receive()) 90 | channel.send(("ack", path[len(destdir) + 1 :])) 91 | if data is not None: 92 | if STRICT_CHECK and len(data) != size: 93 | raise OSError(f"file modified during rsync: {path!r}") 94 | f = open(path, "wb") 95 | f.write(data) 96 | f.close() 97 | try: 98 | if mode: 99 | os.chmod(path, mode) 100 | os.utime(path, (time, time)) 101 | except OSError: 102 | pass 103 | del data 104 | channel.send(("links", None)) 105 | 106 | msg = channel.receive() 107 | while msg != 42: 108 | # we get symlink 109 | _type, relpath, linkpoint = cast( 110 | "tuple[Literal['linkbase', 'link'], str, str]", msg 111 | ) 112 | path = os.path.join(destdir, relpath) 113 | try: 114 | remove(path) 115 | except OSError: 116 | pass 117 | if _type == "linkbase": 118 | src = os.path.join(destdir, linkpoint) 119 | else: 120 | assert _type == "link", _type 121 | src = linkpoint 122 | os.symlink(src, path) 123 | msg = channel.receive() 124 | channel.send(("done", None)) 125 | 126 | 127 | if __name__ == "__channelexec__": 128 | serve_rsync(channel) # type: ignore[name-defined] # noqa:F821 129 | -------------------------------------------------------------------------------- /src/execnet/script/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | -------------------------------------------------------------------------------- /src/execnet/script/loop_socketserver.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import sys 4 | 5 | if __name__ == "__main__": 6 | directory = os.path.dirname(os.path.abspath(sys.argv[0])) 7 | script = os.path.join(directory, "socketserver.py") 8 | while 1: 9 | cmdlist = ["python", script] 10 | cmdlist.extend(sys.argv[1:]) 11 | text = "starting subcommand: " + " ".join(cmdlist) 12 | print(text) 13 | process = subprocess.Popen(cmdlist) 14 | process.wait() 15 | -------------------------------------------------------------------------------- /src/execnet/script/quitserver.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | send a "quit" signal to a remote server 4 | 5 | """ 6 | 7 | from __future__ import annotations 8 | 9 | import socket 10 | import sys 11 | 12 | host, port = sys.argv[1].split(":") 13 | hostport = (host, int(port)) 14 | 15 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 16 | sock.connect(hostport) 17 | sock.sendall(b'"raise KeyboardInterrupt"\n') 18 | -------------------------------------------------------------------------------- /src/execnet/script/shell.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | """ 3 | a remote python shell 4 | 5 | for injection into startserver.py 6 | """ 7 | 8 | import os 9 | import select 10 | import socket 11 | import sys 12 | from threading import Thread 13 | from traceback import print_exc 14 | from typing import NoReturn 15 | 16 | 17 | def clientside() -> NoReturn: 18 | print("client side starting") 19 | host, portstr = sys.argv[1].split(":") 20 | port = int(portstr) 21 | myself = open(os.path.abspath(sys.argv[0])).read() 22 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 23 | sock.connect((host, port)) 24 | sock.sendall((repr(myself) + "\n").encode()) 25 | print("send boot string") 26 | inputlist = [sock, sys.stdin] 27 | try: 28 | while 1: 29 | r, w, e = select.select(inputlist, [], []) 30 | if sys.stdin in r: 31 | line = input() 32 | sock.sendall((line + "\n").encode()) 33 | if sock in r: 34 | line = sock.recv(4096).decode() 35 | sys.stdout.write(line) 36 | sys.stdout.flush() 37 | except BaseException: 38 | import traceback 39 | 40 | traceback.print_exc() 41 | 42 | sys.exit(1) 43 | 44 | 45 | class promptagent(Thread): 46 | def __init__(self, clientsock) -> None: 47 | print("server side starting") 48 | super().__init__() # type: ignore[call-overload] 49 | self.clientsock = clientsock 50 | 51 | def run(self) -> None: 52 | print("Entering thread prompt loop") 53 | clientfile = self.clientsock.makefile("w") 54 | 55 | filein = self.clientsock.makefile("r") 56 | loc = self.clientsock.getsockname() 57 | 58 | while 1: 59 | try: 60 | clientfile.write("{} {} >>> ".format(*loc)) 61 | clientfile.flush() 62 | line = filein.readline() 63 | if not line: 64 | raise EOFError("nothing") 65 | if line.strip(): 66 | oldout, olderr = sys.stdout, sys.stderr 67 | sys.stdout, sys.stderr = clientfile, clientfile 68 | try: 69 | try: 70 | exec(compile(line + "\n", "", "single")) 71 | except BaseException: 72 | print_exc() 73 | finally: 74 | sys.stdout = oldout 75 | sys.stderr = olderr 76 | clientfile.flush() 77 | except EOFError: 78 | sys.stderr.write("connection close, prompt thread returns") 79 | break 80 | 81 | self.clientsock.close() 82 | 83 | 84 | sock = globals().get("clientsock") 85 | if sock is not None: 86 | prompter = promptagent(sock) 87 | prompter.start() 88 | print("promptagent - thread started") 89 | else: 90 | clientside() 91 | -------------------------------------------------------------------------------- /src/execnet/script/socketserver.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | """ 3 | start socket based minimal readline exec server 4 | 5 | it can exeuted in 2 modes of operation 6 | 7 | 1. as normal script, that listens for new connections 8 | 9 | 2. via existing_gateway.remote_exec (as imported module) 10 | 11 | """ 12 | 13 | # this part of the program only executes on the server side 14 | # 15 | from __future__ import annotations 16 | 17 | import os 18 | import sys 19 | from typing import TYPE_CHECKING 20 | 21 | try: 22 | import fcntl 23 | except ImportError: 24 | fcntl = None # type: ignore[assignment] 25 | 26 | if TYPE_CHECKING: 27 | from execnet.gateway_base import Channel 28 | from execnet.gateway_base import ExecModel 29 | 30 | progname = "socket_readline_exec_server-1.2" 31 | 32 | 33 | debug = 0 34 | 35 | if debug: # and not os.isatty(sys.stdin.fileno()) 36 | f = open("/tmp/execnet-socket-pyout.log", "w") 37 | old = sys.stdout, sys.stderr 38 | sys.stdout = sys.stderr = f 39 | 40 | 41 | def print_(*args) -> None: 42 | print(" ".join(str(arg) for arg in args)) 43 | 44 | 45 | exec( 46 | """def exec_(source, locs): 47 | exec(source, locs)""" 48 | ) 49 | 50 | 51 | def exec_from_one_connection(serversock) -> None: 52 | print_(progname, "Entering Accept loop", serversock.getsockname()) 53 | clientsock, address = serversock.accept() 54 | print_(progname, "got new connection from {} {}".format(*address)) 55 | clientfile = clientsock.makefile("rb") 56 | print_("reading line") 57 | # rstrip so that we can use \r\n for telnet testing 58 | source = clientfile.readline().rstrip() 59 | clientfile.close() 60 | g = {"clientsock": clientsock, "address": address, "execmodel": execmodel} 61 | source = eval(source) 62 | if source: 63 | co = compile(source + "\n", "", "exec") 64 | print_(progname, "compiled source, executing") 65 | try: 66 | exec_(co, g) # type: ignore[name-defined] # noqa: F821 67 | finally: 68 | print_(progname, "finished executing code") 69 | # background thread might hold a reference to this (!?) 70 | # clientsock.close() 71 | 72 | 73 | def bind_and_listen(hostport: str | tuple[str, int], execmodel: ExecModel): 74 | socket = execmodel.socket 75 | if isinstance(hostport, str): 76 | host, port = hostport.split(":") 77 | hostport = (host, int(port)) 78 | serversock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 79 | # set close-on-exec 80 | if hasattr(fcntl, "FD_CLOEXEC"): 81 | old = fcntl.fcntl(serversock.fileno(), fcntl.F_GETFD) 82 | fcntl.fcntl(serversock.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC) 83 | # allow the address to be reused in a reasonable amount of time 84 | if os.name == "posix" and sys.platform != "cygwin": 85 | serversock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 86 | 87 | serversock.bind(hostport) 88 | serversock.listen(5) 89 | return serversock 90 | 91 | 92 | def startserver(serversock, loop: bool = False) -> None: 93 | execute_path = os.getcwd() 94 | try: 95 | while 1: 96 | try: 97 | exec_from_one_connection(serversock) 98 | except (KeyboardInterrupt, SystemExit): 99 | raise 100 | except BaseException as exc: 101 | if debug: 102 | import traceback 103 | 104 | traceback.print_exc() 105 | else: 106 | print_("got exception", exc) 107 | os.chdir(execute_path) 108 | if not loop: 109 | break 110 | finally: 111 | print_("leaving socketserver execloop") 112 | serversock.shutdown(2) 113 | 114 | 115 | if __name__ == "__main__": 116 | import sys 117 | 118 | if len(sys.argv) > 1: 119 | hostport = sys.argv[1] 120 | else: 121 | hostport = ":8888" 122 | from execnet.gateway_base import get_execmodel 123 | 124 | execmodel = get_execmodel("thread") 125 | serversock = bind_and_listen(hostport, execmodel) 126 | startserver(serversock, loop=True) 127 | 128 | elif __name__ == "__channelexec__": 129 | chan: Channel = globals()["channel"] 130 | execmodel = chan.gateway.execmodel 131 | bindname = chan.receive() 132 | assert isinstance(bindname, (str, tuple)) 133 | sock = bind_and_listen(bindname, execmodel) 134 | port = sock.getsockname() 135 | chan.send(port) 136 | startserver(sock) 137 | -------------------------------------------------------------------------------- /src/execnet/script/socketserverservice.py: -------------------------------------------------------------------------------- 1 | """ 2 | A windows service wrapper for the py.execnet socketserver. 3 | 4 | To use, run: 5 | python socketserverservice.py register 6 | net start ExecNetSocketServer 7 | """ 8 | 9 | import sys 10 | import threading 11 | 12 | import servicemanager 13 | import win32event 14 | import win32evtlogutil 15 | import win32service 16 | import win32serviceutil 17 | 18 | from execnet.gateway_base import get_execmodel 19 | 20 | from . import socketserver 21 | 22 | appname = "ExecNetSocketServer" 23 | 24 | 25 | class SocketServerService(win32serviceutil.ServiceFramework): 26 | _svc_name_ = appname 27 | _svc_display_name_ = "%s" % appname 28 | _svc_deps_ = ["EventLog"] 29 | 30 | def __init__(self, args) -> None: 31 | # The exe-file has messages for the Event Log Viewer. 32 | # Register the exe-file as event source. 33 | # 34 | # Probably it would be better if this is done at installation time, 35 | # so that it also could be removed if the service is uninstalled. 36 | # Unfortunately it cannot be done in the 'if __name__ == "__main__"' 37 | # block below, because the 'frozen' exe-file does not run this code. 38 | # 39 | win32evtlogutil.AddSourceToRegistry( 40 | self._svc_display_name_, servicemanager.__file__, "Application" 41 | ) 42 | super().__init__(args) 43 | self.hWaitStop = win32event.CreateEvent(None, 0, 0, None) 44 | self.WAIT_TIME = 1000 # in milliseconds 45 | 46 | def SvcStop(self) -> None: 47 | self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) 48 | win32event.SetEvent(self.hWaitStop) 49 | 50 | def SvcDoRun(self) -> None: 51 | # Redirect stdout and stderr to prevent "IOError: [Errno 9] 52 | # Bad file descriptor". Windows services don't have functional 53 | # output streams. 54 | sys.stdout = sys.stderr = open("nul", "w") 55 | 56 | # Write a 'started' event to the event log... 57 | win32evtlogutil.ReportEvent( 58 | self._svc_display_name_, 59 | servicemanager.PYS_SERVICE_STARTED, 60 | 0, # category 61 | servicemanager.EVENTLOG_INFORMATION_TYPE, 62 | (self._svc_name_, ""), 63 | ) 64 | print("Begin: %s" % self._svc_display_name_) 65 | 66 | hostport = ":8888" 67 | print("Starting py.execnet SocketServer on %s" % hostport) 68 | exec_model = get_execmodel("thread") 69 | serversock = socketserver.bind_and_listen(hostport, exec_model) 70 | thread = threading.Thread( 71 | target=socketserver.startserver, args=(serversock,), kwargs={"loop": True} 72 | ) 73 | thread.setDaemon(True) 74 | thread.start() 75 | 76 | # wait to be stopped or self.WAIT_TIME to pass 77 | while True: 78 | result = win32event.WaitForSingleObject(self.hWaitStop, self.WAIT_TIME) 79 | if result == win32event.WAIT_OBJECT_0: 80 | break 81 | 82 | # write a 'stopped' event to the event log. 83 | win32evtlogutil.ReportEvent( 84 | self._svc_display_name_, 85 | servicemanager.PYS_SERVICE_STOPPED, 86 | 0, # category 87 | servicemanager.EVENTLOG_INFORMATION_TYPE, 88 | (self._svc_name_, ""), 89 | ) 90 | print("End: %s" % appname) 91 | 92 | 93 | if __name__ == "__main__": 94 | # Note that this code will not be run in the 'frozen' exe-file!!! 95 | win32serviceutil.HandleCommandLine(SocketServerService) 96 | -------------------------------------------------------------------------------- /src/execnet/xspec.py: -------------------------------------------------------------------------------- 1 | """ 2 | (c) 2008-2013, holger krekel 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | 8 | class XSpec: 9 | """Execution Specification: key1=value1//key2=value2 ... 10 | 11 | * Keys need to be unique within the specification scope 12 | * Neither key nor value are allowed to contain "//" 13 | * Keys are not allowed to contain "=" 14 | * Keys are not allowed to start with underscore 15 | * If no "=value" is given, assume a boolean True value 16 | """ 17 | 18 | # XXX allow customization, for only allow specific key names 19 | chdir: str | None = None 20 | dont_write_bytecode: bool | None = None 21 | execmodel: str | None = None 22 | id: str | None = None 23 | installvia: str | None = None 24 | nice: str | None = None 25 | popen: bool | None = None 26 | python: str | None = None 27 | socket: str | None = None 28 | ssh: str | None = None 29 | ssh_config: str | None = None 30 | vagrant_ssh: str | None = None 31 | via: str | None = None 32 | 33 | def __init__(self, string: str) -> None: 34 | self._spec = string 35 | self.env = {} 36 | for keyvalue in string.split("//"): 37 | i = keyvalue.find("=") 38 | value: str | bool 39 | if i == -1: 40 | key, value = keyvalue, True 41 | else: 42 | key, value = keyvalue[:i], keyvalue[i + 1 :] 43 | if key[0] == "_": 44 | raise AttributeError("%r not a valid XSpec key" % key) 45 | if key in self.__dict__: 46 | raise ValueError(f"duplicate key: {key!r} in {string!r}") 47 | if key.startswith("env:"): 48 | self.env[key[4:]] = value 49 | else: 50 | setattr(self, key, value) 51 | 52 | def __getattr__(self, name: str) -> None | bool | str: 53 | if name[0] == "_": 54 | raise AttributeError(name) 55 | return None 56 | 57 | def __repr__(self) -> str: 58 | return f"" 59 | 60 | def __str__(self) -> str: 61 | return self._spec 62 | 63 | def __hash__(self) -> int: 64 | return hash(self._spec) 65 | 66 | def __eq__(self, other: object) -> bool: 67 | return self._spec == getattr(other, "_spec", None) 68 | 69 | def __ne__(self, other: object) -> bool: 70 | return self._spec != getattr(other, "_spec", None) 71 | 72 | def _samefilesystem(self) -> bool: 73 | return self.popen is not None and self.chdir is None 74 | -------------------------------------------------------------------------------- /testing/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import shutil 4 | import sys 5 | from functools import lru_cache 6 | from typing import Callable 7 | from typing import Generator 8 | from typing import Iterator 9 | 10 | import pytest 11 | 12 | import execnet 13 | from execnet.gateway import Gateway 14 | from execnet.gateway_base import ExecModel 15 | from execnet.gateway_base import WorkerPool 16 | from execnet.gateway_base import get_execmodel 17 | 18 | collect_ignore = ["build", "doc/_build"] 19 | 20 | rsyncdirs = ["conftest.py", "execnet", "testing", "doc"] 21 | 22 | 23 | @pytest.hookimpl(hookwrapper=True) 24 | def pytest_runtest_setup(item: pytest.Item) -> Generator[None, None, None]: 25 | if item.fspath.purebasename in ("test_group", "test_info"): 26 | getspecssh(item.config) # will skip if no gx given 27 | yield 28 | if "pypy" in item.keywords and not item.config.option.pypy: 29 | pytest.skip("pypy tests skipped, use --pypy to run them.") 30 | 31 | 32 | @pytest.fixture 33 | def group_function() -> Iterator[execnet.Group]: 34 | group = execnet.Group() 35 | yield group 36 | group.terminate(0.5) 37 | 38 | 39 | @pytest.fixture 40 | def makegateway(group_function: execnet.Group) -> Callable[[str], Gateway]: 41 | return group_function.makegateway 42 | 43 | 44 | pytest_plugins = ["pytester", "doctest"] 45 | 46 | 47 | # configuration information for tests 48 | def pytest_addoption(parser: pytest.Parser) -> None: 49 | group = parser.getgroup("execnet", "execnet testing options") 50 | group.addoption( 51 | "--gx", 52 | action="append", 53 | dest="gspecs", 54 | default=None, 55 | help="add a global test environment, XSpec-syntax. ", 56 | ) 57 | group.addoption( 58 | "--pypy", 59 | action="store_true", 60 | dest="pypy", 61 | help="run some tests also against pypy", 62 | ) 63 | group.addoption( 64 | "--broken-isp", 65 | action="store_true", 66 | dest="broken_isp", 67 | help=( 68 | "Skips tests that assume your ISP doesn't put up a landing " 69 | "page on invalid addresses" 70 | ), 71 | ) 72 | 73 | 74 | @pytest.fixture 75 | def specssh(request: pytest.FixtureRequest) -> execnet.XSpec: 76 | return getspecssh(request.config) 77 | 78 | 79 | @pytest.fixture 80 | def specsocket(request: pytest.FixtureRequest) -> execnet.XSpec: 81 | return getsocketspec(request.config) 82 | 83 | 84 | def getgspecs(config: pytest.Config) -> list[execnet.XSpec]: 85 | return [execnet.XSpec(gspec) for gspec in config.getvalueorskip("gspecs")] 86 | 87 | 88 | def getspecssh(config: pytest.Config) -> execnet.XSpec: 89 | xspecs = getgspecs(config) 90 | for spec in xspecs: 91 | if spec.ssh: 92 | if not shutil.which("ssh"): 93 | pytest.skip("command not found: ssh") 94 | return spec 95 | pytest.skip("need '--gx ssh=...'") 96 | 97 | 98 | def getsocketspec(config: pytest.Config) -> execnet.XSpec: 99 | xspecs = getgspecs(config) 100 | for spec in xspecs: 101 | if spec.socket: 102 | return spec 103 | pytest.skip("need '--gx socket=...'") 104 | 105 | 106 | def pytest_generate_tests(metafunc: pytest.Metafunc) -> None: 107 | if "gw" in metafunc.fixturenames: 108 | assert "anypython" not in metafunc.fixturenames, "need combine?" 109 | if hasattr(metafunc.function, "gwtypes"): 110 | gwtypes = metafunc.function.gwtypes 111 | elif hasattr(metafunc.cls, "gwtype"): 112 | gwtypes = [metafunc.cls.gwtype] 113 | else: 114 | gwtypes = ["popen", "socket", "ssh", "proxy"] 115 | metafunc.parametrize("gw", gwtypes, indirect=True) 116 | 117 | 118 | @lru_cache 119 | def getexecutable(name: str) -> str | None: 120 | if name == "sys.executable": 121 | return sys.executable 122 | return shutil.which(name) 123 | 124 | 125 | @pytest.fixture(params=("sys.executable", "pypy3")) 126 | def anypython(request: pytest.FixtureRequest) -> str: 127 | name = request.param 128 | executable = getexecutable(name) 129 | if executable is None: 130 | pytest.skip(f"no {name} found") 131 | if "execmodel" in request.fixturenames and name != "sys.executable": 132 | backend = request.getfixturevalue("execmodel").backend 133 | if backend not in ("thread", "main_thread_only"): 134 | pytest.xfail(f"cannot run {backend!r} execmodel with bare {name}") 135 | return executable 136 | 137 | 138 | @pytest.fixture(scope="session") 139 | def group() -> Iterator[execnet.Group]: 140 | g = execnet.Group() 141 | yield g 142 | g.terminate(timeout=1) 143 | 144 | 145 | @pytest.fixture 146 | def gw( 147 | request: pytest.FixtureRequest, 148 | execmodel: ExecModel, 149 | group: execnet.Group, 150 | ) -> Gateway: 151 | try: 152 | return group[request.param] 153 | except KeyError: 154 | if request.param == "popen": 155 | gw = group.makegateway("popen//id=popen//execmodel=%s" % execmodel.backend) 156 | elif request.param == "socket": 157 | # if execmodel.backend != "thread": 158 | # pytest.xfail( 159 | # "cannot set remote non-thread execmodel for sockets") 160 | pname = "sproxy1" 161 | if pname not in group: 162 | proxygw = group.makegateway("popen//id=%s" % pname) 163 | # assert group['proxygw'].remote_status().receiving 164 | gw = group.makegateway( 165 | f"socket//id=socket//installvia={pname}//execmodel={execmodel.backend}" 166 | ) 167 | # TODO(typing): Clarify this assignment. 168 | gw.proxygw = proxygw # type: ignore[attr-defined] 169 | assert pname in group 170 | elif request.param == "ssh": 171 | sshhost = request.getfixturevalue("specssh").ssh 172 | # we don't use execmodel.backend here 173 | # but you can set it when specifying the ssh spec 174 | gw = group.makegateway(f"ssh={sshhost}//id=ssh") 175 | elif request.param == "proxy": 176 | group.makegateway("popen//id=proxy-transport") 177 | gw = group.makegateway( 178 | "popen//via=proxy-transport//id=proxy//execmodel=%s" % execmodel.backend 179 | ) 180 | else: 181 | assert 0, f"unknown execmodel: {request.param}" 182 | return gw 183 | 184 | 185 | @pytest.fixture( 186 | params=["thread", "main_thread_only", "eventlet", "gevent"], scope="session" 187 | ) 188 | def execmodel(request: pytest.FixtureRequest) -> ExecModel: 189 | if request.param not in ("thread", "main_thread_only"): 190 | pytest.importorskip(request.param) 191 | if request.param in ("eventlet", "gevent") and sys.platform == "win32": 192 | pytest.xfail(request.param + " does not work on win32") 193 | return get_execmodel(request.param) 194 | 195 | 196 | @pytest.fixture 197 | def pool(execmodel: ExecModel) -> WorkerPool: 198 | return WorkerPool(execmodel=execmodel) 199 | -------------------------------------------------------------------------------- /testing/test_channel.py: -------------------------------------------------------------------------------- 1 | """ 2 | mostly functional tests of gateways. 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import time 8 | 9 | import pytest 10 | 11 | from execnet.gateway import Gateway 12 | from execnet.gateway_base import Channel 13 | 14 | needs_early_gc = pytest.mark.skipif("not hasattr(sys, 'getrefcount')") 15 | needs_osdup = pytest.mark.skipif("not hasattr(os, 'dup')") 16 | TESTTIMEOUT = 10.0 # seconds 17 | 18 | 19 | class TestChannelBasicBehaviour: 20 | def test_serialize_error(self, gw: Gateway) -> None: 21 | ch = gw.remote_exec("channel.send(ValueError(42))") 22 | excinfo = pytest.raises(ch.RemoteError, ch.receive) 23 | assert "can't serialize" in str(excinfo.value) 24 | 25 | def test_channel_close_and_then_receive_error(self, gw: Gateway) -> None: 26 | channel = gw.remote_exec("raise ValueError") 27 | pytest.raises(channel.RemoteError, channel.receive) 28 | 29 | def test_channel_finish_and_then_EOFError(self, gw: Gateway) -> None: 30 | channel = gw.remote_exec("channel.send(42)") 31 | x = channel.receive() 32 | assert x == 42 33 | pytest.raises(EOFError, channel.receive) 34 | pytest.raises(EOFError, channel.receive) 35 | pytest.raises(EOFError, channel.receive) 36 | 37 | def test_waitclose_timeouterror(self, gw: Gateway) -> None: 38 | channel = gw.remote_exec("channel.receive()") 39 | pytest.raises(channel.TimeoutError, channel.waitclose, 0.02) 40 | channel.send(1) 41 | channel.waitclose(timeout=TESTTIMEOUT) 42 | 43 | def test_channel_receive_timeout(self, gw: Gateway) -> None: 44 | channel = gw.remote_exec("channel.send(channel.receive())") 45 | with pytest.raises(channel.TimeoutError): 46 | channel.receive(timeout=0.2) 47 | channel.send(1) 48 | channel.receive(timeout=TESTTIMEOUT) 49 | 50 | def test_channel_receive_internal_timeout( 51 | self, gw: Gateway, monkeypatch: pytest.MonkeyPatch 52 | ) -> None: 53 | channel = gw.remote_exec( 54 | """ 55 | import time 56 | time.sleep(0.5) 57 | channel.send(1) 58 | """ 59 | ) 60 | monkeypatch.setattr(channel.__class__, "_INTERNALWAKEUP", 0.2) 61 | channel.receive() 62 | 63 | def test_channel_close_and_then_receive_error_multiple(self, gw: Gateway) -> None: 64 | channel = gw.remote_exec("channel.send(42) ; raise ValueError") 65 | x = channel.receive() 66 | assert x == 42 67 | pytest.raises(channel.RemoteError, channel.receive) 68 | 69 | def test_channel__local_close(self, gw: Gateway) -> None: 70 | channel = gw._channelfactory.new() 71 | gw._channelfactory._local_close(channel.id) 72 | channel.waitclose(0.1) 73 | 74 | def test_channel__local_close_error(self, gw: Gateway) -> None: 75 | channel = gw._channelfactory.new() 76 | gw._channelfactory._local_close(channel.id, channel.RemoteError("error")) 77 | pytest.raises(channel.RemoteError, channel.waitclose, 0.01) 78 | 79 | def test_channel_error_reporting(self, gw: Gateway) -> None: 80 | channel = gw.remote_exec("def foo():\n return foobar()\nfoo()\n") 81 | excinfo = pytest.raises(channel.RemoteError, channel.receive) 82 | msg = str(excinfo.value) 83 | assert msg.startswith("Traceback (most recent call last):") 84 | assert "NameError" in msg 85 | assert "foobar" in msg 86 | 87 | def test_channel_syntax_error(self, gw: Gateway) -> None: 88 | # missing colon 89 | channel = gw.remote_exec("def foo()\n return 1\nfoo()\n") 90 | excinfo = pytest.raises(channel.RemoteError, channel.receive) 91 | msg = str(excinfo.value) 92 | assert msg.startswith("Traceback (most recent call last):") 93 | assert "SyntaxError" in msg 94 | 95 | def test_channel_iter(self, gw: Gateway) -> None: 96 | channel = gw.remote_exec( 97 | """ 98 | for x in range(3): 99 | channel.send(x) 100 | """ 101 | ) 102 | l = list(channel) 103 | assert l == [0, 1, 2] 104 | 105 | def test_channel_pass_in_structure(self, gw: Gateway) -> None: 106 | channel = gw.remote_exec( 107 | """ 108 | ch1, ch2 = channel.receive() 109 | data = ch1.receive() 110 | ch2.send(data+1) 111 | """ 112 | ) 113 | newchan1 = gw.newchannel() 114 | newchan2 = gw.newchannel() 115 | channel.send((newchan1, newchan2)) 116 | newchan1.send(1) 117 | data = newchan2.receive() 118 | assert data == 2 119 | 120 | def test_channel_multipass(self, gw: Gateway) -> None: 121 | channel = gw.remote_exec( 122 | """ 123 | channel.send(channel) 124 | xchan = channel.receive() 125 | assert xchan == channel 126 | """ 127 | ) 128 | newchan = channel.receive() 129 | assert newchan == channel 130 | channel.send(newchan) 131 | channel.waitclose() 132 | 133 | def test_channel_passing_over_channel(self, gw: Gateway) -> None: 134 | channel = gw.remote_exec( 135 | """ 136 | c = channel.gateway.newchannel() 137 | channel.send(c) 138 | c.send(42) 139 | """ 140 | ) 141 | c = channel.receive() 142 | assert isinstance(c, Channel) 143 | x = c.receive() 144 | assert x == 42 145 | 146 | # check that the both sides previous channels are really gone 147 | channel.waitclose(TESTTIMEOUT) 148 | # assert c.id not in gw._channelfactory 149 | newchan = gw.remote_exec( 150 | """ 151 | assert %d not in channel.gateway._channelfactory._channels 152 | """ 153 | % channel.id 154 | ) 155 | newchan.waitclose(TESTTIMEOUT) 156 | assert channel.id not in gw._channelfactory._channels 157 | 158 | def test_channel_receiver_callback(self, gw: Gateway) -> None: 159 | l: list[int] = [] 160 | # channel = gw.newchannel(receiver=l.append) 161 | channel = gw.remote_exec( 162 | source=""" 163 | channel.send(42) 164 | channel.send(13) 165 | channel.send(channel.gateway.newchannel()) 166 | """ 167 | ) 168 | channel.setcallback(callback=l.append) 169 | pytest.raises(IOError, channel.receive) 170 | channel.waitclose(TESTTIMEOUT) 171 | assert len(l) == 3 172 | assert l[:2] == [42, 13] 173 | assert isinstance(l[2], channel.__class__) 174 | 175 | def test_channel_callback_after_receive(self, gw: Gateway) -> None: 176 | l: list[int] = [] 177 | channel = gw.remote_exec( 178 | source=""" 179 | channel.send(42) 180 | channel.send(13) 181 | channel.send(channel.gateway.newchannel()) 182 | """ 183 | ) 184 | x = channel.receive() 185 | assert x == 42 186 | channel.setcallback(callback=l.append) 187 | pytest.raises(IOError, channel.receive) 188 | channel.waitclose(TESTTIMEOUT) 189 | assert len(l) == 2 190 | assert l[0] == 13 191 | assert isinstance(l[1], channel.__class__) 192 | 193 | def test_waiting_for_callbacks(self, gw: Gateway) -> None: 194 | l = [] 195 | 196 | def callback(msg) -> None: 197 | import time 198 | 199 | time.sleep(0.2) 200 | l.append(msg) 201 | 202 | channel = gw.remote_exec( 203 | source=""" 204 | channel.send(42) 205 | """ 206 | ) 207 | channel.setcallback(callback) 208 | channel.waitclose(TESTTIMEOUT) 209 | assert l == [42] 210 | 211 | def test_channel_callback_stays_active(self, gw: Gateway) -> None: 212 | self.check_channel_callback_stays_active(gw, earlyfree=True) 213 | 214 | def check_channel_callback_stays_active( 215 | self, gw: Gateway, earlyfree: bool = True 216 | ) -> Channel | None: 217 | if gw.spec.execmodel == "gevent": 218 | pytest.xfail("investigate gevent failure") 219 | # with 'earlyfree==True', this tests the "sendonly" channel state. 220 | l: list[int] = [] 221 | channel = gw.remote_exec( 222 | source=""" 223 | import _thread 224 | import time 225 | def producer(subchannel): 226 | for i in range(5): 227 | time.sleep(0.15) 228 | subchannel.send(i*100) 229 | channel2 = channel.receive() 230 | _thread.start_new_thread(producer, (channel2,)) 231 | del channel2 232 | """ 233 | ) 234 | subchannel = gw.newchannel() 235 | subchannel.setcallback(l.append) 236 | channel.send(subchannel) 237 | subchan = None if earlyfree else subchannel 238 | counter = 100 239 | while len(l) < 5: 240 | if subchan and subchan.isclosed(): 241 | break 242 | counter -= 1 243 | print(counter) 244 | if not counter: 245 | pytest.fail("timed out waiting for the answer[%d]" % len(l)) 246 | time.sleep(0.04) # busy-wait 247 | assert l == [0, 100, 200, 300, 400] 248 | return subchan 249 | 250 | @needs_early_gc 251 | def test_channel_callback_remote_freed(self, gw: Gateway) -> None: 252 | channel = self.check_channel_callback_stays_active(gw, earlyfree=False) 253 | assert channel is not None 254 | # freed automatically at the end of producer() 255 | channel.waitclose(TESTTIMEOUT) 256 | 257 | def test_channel_endmarker_callback(self, gw: Gateway) -> None: 258 | l: list[int | Channel] = [] 259 | channel = gw.remote_exec( 260 | source=""" 261 | channel.send(42) 262 | channel.send(13) 263 | channel.send(channel.gateway.newchannel()) 264 | """ 265 | ) 266 | channel.setcallback(l.append, 999) 267 | pytest.raises(IOError, channel.receive) 268 | channel.waitclose(TESTTIMEOUT) 269 | assert len(l) == 4 270 | assert l[:2] == [42, 13] 271 | assert isinstance(l[2], channel.__class__) 272 | assert l[3] == 999 273 | 274 | def test_channel_endmarker_callback_error(self, gw: Gateway) -> None: 275 | q = gw.execmodel.queue.Queue() 276 | channel = gw.remote_exec( 277 | source=""" 278 | raise ValueError() 279 | """ 280 | ) 281 | channel.setcallback(q.put, endmarker=999) 282 | val = q.get(TESTTIMEOUT) 283 | assert val == 999 284 | err = channel._getremoteerror() 285 | assert err 286 | assert str(err).find("ValueError") != -1 287 | 288 | def test_channel_callback_error(self, gw: Gateway) -> None: 289 | channel = gw.remote_exec( 290 | """ 291 | def f(item): 292 | raise ValueError(42) 293 | ch = channel.gateway.newchannel() 294 | ch.setcallback(f) 295 | channel.send(ch) 296 | channel.receive() 297 | assert ch.isclosed() 298 | """ 299 | ) 300 | subchan = channel.receive() 301 | assert isinstance(subchan, Channel) 302 | subchan.send(1) 303 | with pytest.raises(subchan.RemoteError) as excinfo: 304 | subchan.waitclose(TESTTIMEOUT) 305 | assert "42" in excinfo.value.formatted 306 | channel.send(1) 307 | channel.waitclose() 308 | 309 | 310 | class TestChannelFile: 311 | def test_channel_file_write(self, gw: Gateway) -> None: 312 | channel = gw.remote_exec( 313 | """ 314 | f = channel.makefile() 315 | f.write("hello world\\n") 316 | f.close() 317 | channel.send(42) 318 | """ 319 | ) 320 | first = channel.receive() 321 | assert isinstance(first, str) 322 | assert first.strip() == "hello world" 323 | second = channel.receive() 324 | assert second == 42 325 | 326 | def test_channel_file_write_error(self, gw: Gateway) -> None: 327 | channel = gw.remote_exec("pass") 328 | f = channel.makefile() 329 | assert not f.isatty() 330 | channel.waitclose(TESTTIMEOUT) 331 | with pytest.raises(IOError): 332 | f.write(b"hello") 333 | 334 | def test_channel_file_proxyclose(self, gw: Gateway) -> None: 335 | channel = gw.remote_exec( 336 | """ 337 | f = channel.makefile(proxyclose=True) 338 | f.write("hello world") 339 | f.close() 340 | channel.send(42) 341 | """ 342 | ) 343 | first = channel.receive() 344 | assert isinstance(first, str) 345 | assert first.strip() == "hello world" 346 | pytest.raises(channel.RemoteError, channel.receive) 347 | 348 | def test_channel_file_read(self, gw: Gateway) -> None: 349 | channel = gw.remote_exec( 350 | """ 351 | f = channel.makefile(mode='r') 352 | s = f.read(2) 353 | channel.send(s) 354 | s = f.read(5) 355 | channel.send(s) 356 | """ 357 | ) 358 | channel.send("xyabcde") 359 | s1 = channel.receive() 360 | s2 = channel.receive() 361 | assert s1 == "xy" 362 | assert s2 == "abcde" 363 | 364 | def test_channel_file_read_empty(self, gw: Gateway) -> None: 365 | channel = gw.remote_exec("pass") 366 | f = channel.makefile(mode="r") 367 | s = f.read(3) 368 | assert s == "" 369 | s = f.read(5) 370 | assert s == "" 371 | 372 | def test_channel_file_readline_remote(self, gw: Gateway) -> None: 373 | channel = gw.remote_exec( 374 | """ 375 | channel.send('123\\n45') 376 | """ 377 | ) 378 | channel.waitclose(TESTTIMEOUT) 379 | f = channel.makefile(mode="r") 380 | s = f.readline() 381 | assert s == "123\n" 382 | s = f.readline() 383 | assert s == "45" 384 | 385 | def test_channel_makefile_incompatmode(self, gw: Gateway) -> None: 386 | channel = gw.newchannel() 387 | with pytest.raises(ValueError): 388 | channel.makefile("rw") # type: ignore[call-overload] 389 | -------------------------------------------------------------------------------- /testing/test_compatibility_regressions.py: -------------------------------------------------------------------------------- 1 | from execnet import gateway_base 2 | 3 | 4 | def test_opcodes() -> None: 5 | data = vars(gateway_base.opcode) 6 | computed = {k: v for k, v in data.items() if "__" not in k} 7 | assert computed == { 8 | "BUILDTUPLE": b"@", 9 | "BYTES": b"A", 10 | "CHANNEL": b"B", 11 | "FALSE": b"C", 12 | "FLOAT": b"D", 13 | "FROZENSET": b"E", 14 | "INT": b"F", 15 | "LONG": b"G", 16 | "LONGINT": b"H", 17 | "LONGLONG": b"I", 18 | "NEWDICT": b"J", 19 | "NEWLIST": b"K", 20 | "NONE": b"L", 21 | "PY2STRING": b"M", 22 | "PY3STRING": b"N", 23 | "SET": b"O", 24 | "SETITEM": b"P", 25 | "STOP": b"Q", 26 | "TRUE": b"R", 27 | "UNICODE": b"S", 28 | # added in 1.4 29 | # causes a regression since it was ordered in 30 | # between CHANNEL and FALSE as "C" moving the other items 31 | "COMPLEX": b"T", 32 | } 33 | -------------------------------------------------------------------------------- /testing/test_multi.py: -------------------------------------------------------------------------------- 1 | """ 2 | tests for multi channels and gateway Groups 3 | """ 4 | 5 | from __future__ import annotations 6 | 7 | import gc 8 | from time import sleep 9 | from typing import Callable 10 | 11 | import pytest 12 | 13 | import execnet 14 | from execnet import XSpec 15 | from execnet.gateway import Gateway 16 | from execnet.gateway_base import Channel 17 | from execnet.gateway_base import ExecModel 18 | from execnet.multi import Group 19 | from execnet.multi import safe_terminate 20 | 21 | 22 | class TestMultiChannelAndGateway: 23 | def test_multichannel_container_basics( 24 | self, gw: Gateway, execmodel: ExecModel 25 | ) -> None: 26 | mch = execnet.MultiChannel([Channel(gw, i) for i in range(3)]) 27 | assert len(mch) == 3 28 | channels = list(mch) 29 | assert len(channels) == 3 30 | # ordering 31 | for i in range(3): 32 | assert channels[i].id == i 33 | assert channels[i] == mch[i] 34 | assert channels[0] in mch 35 | assert channels[1] in mch 36 | assert channels[2] in mch 37 | 38 | def test_multichannel_receive_each(self) -> None: 39 | class pseudochannel: 40 | def receive(self) -> object: 41 | return 12 42 | 43 | pc1 = pseudochannel() 44 | pc2 = pseudochannel() 45 | multichannel = execnet.MultiChannel([pc1, pc2]) # type: ignore[list-item] 46 | l = multichannel.receive_each(withchannel=True) 47 | assert len(l) == 2 48 | assert l == [(pc1, 12), (pc2, 12)] # type: ignore[comparison-overlap] 49 | l2 = multichannel.receive_each(withchannel=False) 50 | assert l2 == [12, 12] 51 | 52 | def test_multichannel_send_each(self) -> None: 53 | gm = execnet.Group(["popen"] * 2) 54 | mc = gm.remote_exec( 55 | """ 56 | import os 57 | channel.send(channel.receive() + 1) 58 | """ 59 | ) 60 | mc.send_each(41) 61 | l = mc.receive_each() 62 | assert l == [42, 42] 63 | 64 | def test_Group_execmodel_setting(self) -> None: 65 | gm = execnet.Group() 66 | gm.set_execmodel("thread") 67 | assert gm.execmodel.backend == "thread" 68 | assert gm.remote_execmodel.backend == "thread" 69 | gm._gateways.append(1) # type: ignore[arg-type] 70 | try: 71 | with pytest.raises(ValueError): 72 | gm.set_execmodel("eventlet") 73 | assert gm.execmodel.backend == "thread" 74 | finally: 75 | gm._gateways.pop() 76 | 77 | def test_multichannel_receive_queue_for_two_subprocesses(self) -> None: 78 | gm = execnet.Group(["popen"] * 2) 79 | mc = gm.remote_exec( 80 | """ 81 | import os 82 | channel.send(os.getpid()) 83 | """ 84 | ) 85 | queue = mc.make_receive_queue() 86 | ch, item = queue.get(timeout=10) 87 | ch2, item2 = queue.get(timeout=10) 88 | assert ch != ch2 89 | assert ch.gateway != ch2.gateway 90 | assert item != item2 91 | mc.waitclose() 92 | 93 | def test_multichannel_waitclose(self) -> None: 94 | l = [] 95 | 96 | class pseudochannel: 97 | def waitclose(self) -> None: 98 | l.append(0) 99 | 100 | multichannel = execnet.MultiChannel([pseudochannel(), pseudochannel()]) # type: ignore[list-item] 101 | multichannel.waitclose() 102 | assert len(l) == 2 103 | 104 | 105 | class TestGroup: 106 | def test_basic_group(self, monkeypatch: pytest.MonkeyPatch) -> None: 107 | import atexit 108 | 109 | atexitlist: list[Callable[[], object]] = [] 110 | monkeypatch.setattr(atexit, "register", atexitlist.append) 111 | group = Group() 112 | assert atexitlist == [group._cleanup_atexit] 113 | exitlist = [] 114 | joinlist = [] 115 | 116 | class PseudoIO: 117 | def wait(self) -> None: 118 | pass 119 | 120 | class PseudoSpec: 121 | via = None 122 | 123 | class PseudoGW: 124 | id = "9999" 125 | _io = PseudoIO() 126 | spec = PseudoSpec() 127 | 128 | def exit(self) -> None: 129 | exitlist.append(self) 130 | group._unregister(self) # type: ignore[arg-type] 131 | 132 | def join(self) -> None: 133 | joinlist.append(self) 134 | 135 | gw = PseudoGW() 136 | group._register(gw) # type: ignore[arg-type] 137 | assert len(exitlist) == 0 138 | assert len(joinlist) == 0 139 | group._cleanup_atexit() 140 | assert len(exitlist) == 1 141 | assert exitlist == [gw] 142 | assert len(joinlist) == 1 143 | assert joinlist == [gw] 144 | group._cleanup_atexit() 145 | assert len(exitlist) == 1 146 | assert len(joinlist) == 1 147 | 148 | def test_group_default_spec(self) -> None: 149 | group = Group() 150 | group.defaultspec = "not-existing-type" 151 | pytest.raises(ValueError, group.makegateway) 152 | 153 | def test_group_PopenGateway(self) -> None: 154 | group = Group() 155 | gw = group.makegateway("popen") 156 | assert list(group) == [gw] 157 | assert group[0] == gw 158 | assert len(group) == 1 159 | group._cleanup_atexit() 160 | assert not group._gateways 161 | 162 | def test_group_ordering_and_termination(self) -> None: 163 | group = Group() 164 | group.makegateway("popen//id=3") 165 | group.makegateway("popen//id=2") 166 | group.makegateway("popen//id=5") 167 | gwlist = list(group) 168 | assert len(gwlist) == 3 169 | idlist = [x.id for x in gwlist] 170 | assert idlist == list("325") 171 | print(group) 172 | group.terminate() 173 | print(group) 174 | assert not group 175 | assert repr(group) == "" 176 | 177 | def test_group_id_allocation(self) -> None: 178 | group = Group() 179 | specs = [XSpec("popen"), XSpec("popen//id=hello")] 180 | group.allocate_id(specs[0]) 181 | group.allocate_id(specs[1]) 182 | gw = group.makegateway(specs[1]) 183 | assert gw.id == "hello" 184 | gw = group.makegateway(specs[0]) 185 | assert gw.id == "gw0" 186 | # pytest.raises(ValueError, 187 | # group.allocate_id, XSpec("popen//id=hello")) 188 | group.terminate() 189 | 190 | def test_gateway_and_id(self) -> None: 191 | group = Group() 192 | gw = group.makegateway("popen//id=hello") 193 | assert group["hello"] == gw 194 | with pytest.raises((TypeError, AttributeError)): 195 | del group["hello"] # type: ignore[attr-defined] 196 | with pytest.raises((TypeError, AttributeError)): 197 | group["hello"] = 5 # type: ignore[index] 198 | assert "hello" in group 199 | assert gw in group 200 | assert len(group) == 1 201 | gw.exit() 202 | assert "hello" not in group 203 | with pytest.raises(KeyError): 204 | _ = group["hello"] 205 | 206 | def test_default_group(self) -> None: 207 | oldlist = list(execnet.default_group) 208 | gw = execnet.makegateway("popen") 209 | try: 210 | newlist = list(execnet.default_group) 211 | assert len(newlist) == len(oldlist) + 1 212 | assert gw in newlist 213 | assert gw not in oldlist 214 | finally: 215 | gw.exit() 216 | 217 | def test_remote_exec_args(self) -> None: 218 | group = Group() 219 | group.makegateway("popen") 220 | 221 | def fun(channel, arg) -> None: 222 | channel.send(arg) 223 | 224 | mch = group.remote_exec(fun, arg=1) 225 | result = mch.receive_each() 226 | assert result == [1] 227 | 228 | def test_terminate_with_proxying(self) -> None: 229 | group = Group() 230 | group.makegateway("popen//id=master") 231 | group.makegateway("popen//via=master//id=worker") 232 | group.terminate(1.0) 233 | 234 | 235 | @pytest.mark.xfail(reason="active_count() has been broken for some time") 236 | def test_safe_terminate(execmodel: ExecModel) -> None: 237 | if execmodel.backend not in ("thread", "main_thread_only"): 238 | pytest.xfail( 239 | "execution model %r does not support task count" % execmodel.backend 240 | ) 241 | import threading 242 | 243 | active = threading.active_count() 244 | l = [] 245 | 246 | def term() -> None: 247 | sleep(3) 248 | 249 | def kill() -> None: 250 | l.append(1) 251 | 252 | safe_terminate(execmodel, 1, [(term, kill)] * 10) 253 | assert len(l) == 10 254 | sleep(0.1) 255 | gc.collect() 256 | assert execmodel.active_count() == active # type: ignore[attr-defined] 257 | 258 | 259 | @pytest.mark.xfail(reason="active_count() has been broken for some time") 260 | def test_safe_terminate2(execmodel: ExecModel) -> None: 261 | if execmodel.backend not in ("thread", "main_thread_only"): 262 | pytest.xfail( 263 | "execution model %r does not support task count" % execmodel.backend 264 | ) 265 | import threading 266 | 267 | active = threading.active_count() 268 | l = [] 269 | 270 | def term() -> None: 271 | return 272 | 273 | def kill() -> None: 274 | l.append(1) 275 | 276 | safe_terminate(execmodel, 3, [(term, kill)] * 10) 277 | assert len(l) == 0 278 | sleep(0.1) 279 | gc.collect() 280 | assert threading.active_count() == active 281 | -------------------------------------------------------------------------------- /testing/test_rsync.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | import platform 4 | import sys 5 | import types 6 | 7 | import pytest 8 | 9 | import execnet 10 | from execnet import RSync 11 | from execnet.gateway import Gateway 12 | 13 | 14 | @pytest.fixture(scope="module") 15 | def group(request: pytest.FixtureRequest) -> execnet.Group: 16 | group = execnet.Group() 17 | request.addfinalizer(group.terminate) 18 | return group 19 | 20 | 21 | @pytest.fixture(scope="module") 22 | def gw1(request: pytest.FixtureRequest, group: execnet.Group) -> Gateway: 23 | gw = group.makegateway("popen//id=gw1") 24 | request.addfinalizer(gw.exit) 25 | return gw 26 | 27 | 28 | @pytest.fixture(scope="module") 29 | def gw2(request: pytest.FixtureRequest, group: execnet.Group) -> Gateway: 30 | gw = group.makegateway("popen//id=gw2") 31 | request.addfinalizer(gw.exit) 32 | return gw 33 | 34 | 35 | needssymlink = pytest.mark.skipif( 36 | not hasattr(os, "symlink") 37 | or (platform.python_implementation() == "PyPy" and sys.platform == "win32"), 38 | reason="os.symlink not available", 39 | ) 40 | 41 | 42 | class _dirs(types.SimpleNamespace): 43 | source: pathlib.Path 44 | dest1: pathlib.Path 45 | dest2: pathlib.Path 46 | 47 | 48 | @pytest.fixture 49 | def dirs(tmp_path: pathlib.Path) -> _dirs: 50 | dirs = _dirs( 51 | source=tmp_path / "source", 52 | dest1=tmp_path / "dest1", 53 | dest2=tmp_path / "dest2", 54 | ) 55 | dirs.source.mkdir() 56 | dirs.dest1.mkdir() 57 | dirs.dest2.mkdir() 58 | return dirs 59 | 60 | 61 | def are_paths_equal(path1: pathlib.Path, path2: pathlib.Path) -> bool: 62 | if os.path.__name__ == "ntpath": 63 | # On Windows, os.readlink returns an extended path (\\?\) 64 | # for absolute symlinks. However, extended does not compare 65 | # equal to non-extended, even when they refer to the same 66 | # path otherwise. So we have to fix it up ourselves... 67 | is_extended1 = str(path1).startswith("\\\\?\\") 68 | is_extended2 = str(path2).startswith("\\\\?\\") 69 | if is_extended1 and not is_extended2: 70 | path2 = pathlib.Path("\\\\?\\" + str(path2)) 71 | if not is_extended1 and is_extended2: 72 | path1 = pathlib.Path("\\\\?\\" + str(path1)) 73 | return path1 == path2 74 | 75 | 76 | class TestRSync: 77 | def test_notargets(self, dirs: _dirs) -> None: 78 | rsync = RSync(dirs.source) 79 | with pytest.raises(IOError): 80 | rsync.send() 81 | assert rsync.send(raises=False) is None # type: ignore[func-returns-value] 82 | 83 | def test_dirsync(self, dirs: _dirs, gw1: Gateway, gw2: Gateway) -> None: 84 | dest = dirs.dest1 85 | dest2 = dirs.dest2 86 | source = dirs.source 87 | 88 | for s in ("content1", "content2", "content2-a-bit-longer"): 89 | subdir = source / "subdir" 90 | subdir.mkdir(exist_ok=True) 91 | subdir.joinpath("file1").write_text(s) 92 | rsync = RSync(dirs.source) 93 | rsync.add_target(gw1, dest) 94 | rsync.add_target(gw2, dest2) 95 | rsync.send() 96 | assert dest.joinpath("subdir").is_dir() 97 | assert dest.joinpath("subdir", "file1").is_file() 98 | assert dest.joinpath("subdir", "file1").read_text() == s 99 | assert dest2.joinpath("subdir").is_dir() 100 | assert dest2.joinpath("subdir", "file1").is_file() 101 | assert dest2.joinpath("subdir", "file1").read_text() == s 102 | for x in dest, dest2: 103 | fn = x.joinpath("subdir", "file1") 104 | os.utime(fn, (0, 0)) 105 | 106 | source.joinpath("subdir", "file1").unlink() 107 | rsync = RSync(source) 108 | rsync.add_target(gw2, dest2) 109 | rsync.add_target(gw1, dest) 110 | rsync.send() 111 | assert dest.joinpath("subdir", "file1").is_file() 112 | assert dest2.joinpath("subdir", "file1").is_file() 113 | rsync = RSync(source) 114 | rsync.add_target(gw1, dest, delete=True) 115 | rsync.add_target(gw2, dest2) 116 | rsync.send() 117 | assert not dest.joinpath("subdir", "file1").exists() 118 | assert dest2.joinpath("subdir", "file1").exists() 119 | 120 | def test_dirsync_twice(self, dirs: _dirs, gw1: Gateway, gw2: Gateway) -> None: 121 | source = dirs.source 122 | source.joinpath("hello").touch() 123 | rsync = RSync(source) 124 | rsync.add_target(gw1, dirs.dest1) 125 | rsync.send() 126 | assert dirs.dest1.joinpath("hello").exists() 127 | with pytest.raises(IOError): 128 | rsync.send() 129 | assert rsync.send(raises=False) is None # type: ignore[func-returns-value] 130 | rsync.add_target(gw1, dirs.dest2) 131 | rsync.send() 132 | assert dirs.dest2.joinpath("hello").exists() 133 | with pytest.raises(IOError): 134 | rsync.send() 135 | assert rsync.send(raises=False) is None # type: ignore[func-returns-value] 136 | 137 | def test_rsync_default_reporting( 138 | self, capsys: pytest.CaptureFixture[str], dirs: _dirs, gw1: Gateway 139 | ) -> None: 140 | source = dirs.source 141 | source.joinpath("hello").touch() 142 | rsync = RSync(source) 143 | rsync.add_target(gw1, dirs.dest1) 144 | rsync.send() 145 | out, err = capsys.readouterr() 146 | assert out.find("hello") != -1 147 | 148 | def test_rsync_non_verbose( 149 | self, capsys: pytest.CaptureFixture[str], dirs: _dirs, gw1: Gateway 150 | ) -> None: 151 | source = dirs.source 152 | source.joinpath("hello").touch() 153 | rsync = RSync(source, verbose=False) 154 | rsync.add_target(gw1, dirs.dest1) 155 | rsync.send() 156 | out, err = capsys.readouterr() 157 | assert not out 158 | assert not err 159 | 160 | @pytest.mark.skipif( 161 | sys.platform == "win32" or getattr(os, "_name", "") == "nt", 162 | reason="irrelevant on windows", 163 | ) 164 | def test_permissions(self, dirs: _dirs, gw1: Gateway, gw2: Gateway) -> None: 165 | source = dirs.source 166 | dest = dirs.dest1 167 | onedir = dirs.source / "one" 168 | onedir.mkdir() 169 | onedir.chmod(448) 170 | onefile = dirs.source / "file" 171 | onefile.touch() 172 | onefile.chmod(504) 173 | onefile_mtime = onefile.stat().st_mtime 174 | 175 | rsync = RSync(source) 176 | rsync.add_target(gw1, dest) 177 | rsync.send() 178 | 179 | destdir = dirs.dest1 / onedir.name 180 | destfile = dirs.dest1 / onefile.name 181 | assert destfile.stat().st_mode & 511 == 504 182 | mode = destdir.stat().st_mode 183 | assert mode & 511 == 448 184 | 185 | # transfer again with changed permissions 186 | onedir.chmod(504) 187 | onefile.chmod(448) 188 | os.utime(onefile, (onefile_mtime, onefile_mtime)) 189 | 190 | rsync = RSync(source) 191 | rsync.add_target(gw1, dest) 192 | rsync.send() 193 | 194 | mode = destfile.stat().st_mode 195 | assert mode & 511 == 448, mode 196 | mode = destdir.stat().st_mode 197 | assert mode & 511 == 504 198 | 199 | @pytest.mark.skipif( 200 | sys.platform == "win32" or getattr(os, "_name", "") == "nt", 201 | reason="irrelevant on windows", 202 | ) 203 | def test_read_only_directories(self, dirs: _dirs, gw1: Gateway) -> None: 204 | source = dirs.source 205 | dest = dirs.dest1 206 | sub = source / "sub" 207 | sub.mkdir() 208 | subsub = sub / "subsub" 209 | subsub.mkdir() 210 | sub.chmod(0o500) 211 | subsub.chmod(0o500) 212 | 213 | # The destination directories should be created with the write 214 | # permission forced, to avoid raising an EACCES error. 215 | rsync = RSync(source) 216 | rsync.add_target(gw1, dest) 217 | rsync.send() 218 | 219 | assert dest.joinpath("sub").stat().st_mode & 0o700 220 | assert dest.joinpath("sub", "subsub").stat().st_mode & 0o700 221 | 222 | @needssymlink 223 | def test_symlink_rsync(self, dirs: _dirs, gw1: Gateway) -> None: 224 | source = dirs.source 225 | dest = dirs.dest1 226 | subdir = dirs.source / "subdir" 227 | subdir.mkdir() 228 | sourcefile = subdir / "existent" 229 | sourcefile.touch() 230 | source.joinpath("rellink").symlink_to(sourcefile.relative_to(source)) 231 | source.joinpath("abslink").symlink_to(sourcefile) 232 | 233 | rsync = RSync(source) 234 | rsync.add_target(gw1, dest) 235 | rsync.send() 236 | 237 | rellink = pathlib.Path(os.readlink(str(dest / "rellink"))) 238 | assert rellink == pathlib.Path("subdir/existent") 239 | 240 | abslink = pathlib.Path(os.readlink(str(dest / "abslink"))) 241 | expected = dest.joinpath(sourcefile.relative_to(source)) 242 | assert are_paths_equal(abslink, expected) 243 | 244 | @needssymlink 245 | def test_symlink2_rsync(self, dirs: _dirs, gw1: Gateway) -> None: 246 | source = dirs.source 247 | dest = dirs.dest1 248 | subdir = dirs.source / "subdir" 249 | subdir.mkdir() 250 | sourcefile = subdir / "somefile" 251 | sourcefile.touch() 252 | subdir.joinpath("link1").symlink_to( 253 | subdir.joinpath("link2").relative_to(subdir) 254 | ) 255 | subdir.joinpath("link2").symlink_to(sourcefile) 256 | subdir.joinpath("link3").symlink_to(source.parent) 257 | rsync = RSync(source) 258 | rsync.add_target(gw1, dest) 259 | rsync.send() 260 | expected = dest.joinpath(sourcefile.relative_to(dirs.source)) 261 | destsub = dest.joinpath("subdir") 262 | assert destsub.exists() 263 | link1 = pathlib.Path(os.readlink(str(destsub / "link1"))) 264 | assert are_paths_equal(link1, pathlib.Path("link2")) 265 | link2 = pathlib.Path(os.readlink(str(destsub / "link2"))) 266 | assert are_paths_equal(link2, expected) 267 | link3 = pathlib.Path(os.readlink(str(destsub / "link3"))) 268 | assert are_paths_equal(link3, source.parent) 269 | 270 | def test_callback(self, dirs: _dirs, gw1: Gateway) -> None: 271 | dest = dirs.dest1 272 | source = dirs.source 273 | source.joinpath("existent").write_text("a" * 100) 274 | source.joinpath("existant2").write_text("a" * 10) 275 | total = {} 276 | 277 | def callback(cmd, lgt, channel): 278 | total[(cmd, lgt)] = True 279 | 280 | rsync = RSync(source, callback=callback) 281 | # rsync = RSync() 282 | rsync.add_target(gw1, dest) 283 | rsync.send() 284 | 285 | assert total == {("list", 110): True, ("ack", 100): True, ("ack", 10): True} 286 | 287 | def test_file_disappearing(self, dirs: _dirs, gw1: Gateway) -> None: 288 | dest = dirs.dest1 289 | source = dirs.source 290 | source.joinpath("ex").write_text("a" * 100) 291 | source.joinpath("ex2").write_text("a" * 100) 292 | 293 | class DRsync(RSync): 294 | def filter(self, x: str) -> bool: 295 | assert x != str(source) 296 | if x.endswith("ex2"): 297 | self.x = 1 298 | source.joinpath("ex2").unlink() 299 | return True 300 | 301 | rsync = DRsync(source) 302 | rsync.add_target(gw1, dest) 303 | rsync.send() 304 | assert rsync.x == 1 305 | assert len(list(dest.iterdir())) == 1 306 | assert len(list(source.iterdir())) == 1 307 | -------------------------------------------------------------------------------- /testing/test_serializer.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import os 4 | import subprocess 5 | import sys 6 | from pathlib import Path 7 | 8 | import pytest 9 | 10 | import execnet 11 | 12 | # We use the execnet folder in order to avoid triggering a missing apipkg. 13 | pyimportdir = os.fspath(Path(execnet.__file__).parent) 14 | 15 | 16 | class PythonWrapper: 17 | def __init__(self, executable: str, tmp_path: Path) -> None: 18 | self.executable = executable 19 | self.tmp_path = tmp_path 20 | 21 | def dump(self, obj_rep: str) -> bytes: 22 | script_file = self.tmp_path.joinpath("dump.py") 23 | script_file.write_text( 24 | f""" 25 | import sys 26 | sys.path.insert(0, {pyimportdir!r}) 27 | import gateway_base as serializer 28 | sys.stdout = sys.stdout.detach() 29 | sys.stdout.write(serializer.dumps_internal({obj_rep})) 30 | """ 31 | ) 32 | res = subprocess.run( 33 | [str(self.executable), str(script_file)], capture_output=True, check=True 34 | ) 35 | return res.stdout 36 | 37 | def load(self, data: bytes) -> list[str]: 38 | script_file = self.tmp_path.joinpath("load.py") 39 | script_file.write_text( 40 | rf""" 41 | import sys 42 | sys.path.insert(0, {pyimportdir!r}) 43 | import gateway_base as serializer 44 | from io import BytesIO 45 | data = {data!r} 46 | io = BytesIO(data) 47 | loader = serializer.Unserializer(io) 48 | obj = loader.load() 49 | sys.stdout.write(type(obj).__name__ + "\n") 50 | sys.stdout.write(repr(obj)) 51 | """ 52 | ) 53 | res = subprocess.run( 54 | [str(self.executable), str(script_file)], 55 | capture_output=True, 56 | check=True, 57 | ) 58 | 59 | return res.stdout.decode("ascii").splitlines() 60 | 61 | def __repr__(self) -> str: 62 | return f"" 63 | 64 | 65 | @pytest.fixture 66 | def py3(tmp_path: Path) -> PythonWrapper: 67 | return PythonWrapper(sys.executable, tmp_path) 68 | 69 | 70 | @pytest.fixture 71 | def dump(py3: PythonWrapper): 72 | return py3.dump 73 | 74 | 75 | @pytest.fixture 76 | def load(py3: PythonWrapper): 77 | return py3.load 78 | 79 | 80 | simple_tests = [ 81 | # expected before/after repr 82 | ("int", "4"), 83 | ("float", "3.25"), 84 | ("complex", "(1.78+3.25j)"), 85 | ("list", "[1, 2, 3]"), 86 | ("tuple", "(1, 2, 3)"), 87 | ("dict", "{(1, 2, 3): 32}"), 88 | ] 89 | 90 | 91 | @pytest.mark.parametrize(["tp_name", "repr"], simple_tests) 92 | def test_simple(tp_name, repr, dump, load) -> None: 93 | p = dump(repr) 94 | tp, v = load(p) 95 | assert tp == tp_name 96 | assert v == repr 97 | 98 | 99 | def test_set(load, dump) -> None: 100 | p = dump("set((1, 2, 3))") 101 | 102 | tp, v = load(p) 103 | assert tp == "set" 104 | # assert v == "{1, 2, 3}" # ordering prevents this assertion 105 | assert v.startswith("{") and v.endswith("}") 106 | assert "1" in v and "2" in v and "3" in v 107 | p = dump("set()") 108 | tp, v = load(p) 109 | assert tp == "set" 110 | assert v == "set()" 111 | 112 | 113 | def test_frozenset(load, dump): 114 | p = dump("frozenset((1, 2, 3))") 115 | tp, v = load(p) 116 | assert tp == "frozenset" 117 | assert v == "frozenset({1, 2, 3})" 118 | 119 | 120 | def test_long(load, dump) -> None: 121 | really_big = "9223372036854775807324234" 122 | p = dump(really_big) 123 | tp, v = load(p) 124 | assert tp == "int" 125 | assert v == really_big 126 | 127 | 128 | def test_bytes(dump, load) -> None: 129 | p = dump("b'hi'") 130 | tp, v = load(p) 131 | assert tp == "bytes" 132 | assert v == "b'hi'" 133 | 134 | 135 | def test_str(dump, load) -> None: 136 | p = dump("'xyz'") 137 | tp, s = load(p) 138 | assert tp == "str" 139 | assert s == "'xyz'" 140 | 141 | 142 | def test_unicode(load, dump) -> None: 143 | p = dump("u'hi'") 144 | tp, s = load(p) 145 | assert tp == "str" 146 | assert s == "'hi'" 147 | 148 | 149 | def test_bool(dump, load) -> None: 150 | p = dump("True") 151 | tp, s = load(p) 152 | assert s == "True" 153 | assert tp == "bool" 154 | 155 | 156 | def test_none(dump, load) -> None: 157 | p = dump("None") 158 | tp, s = load(p) 159 | assert s == "None" 160 | 161 | 162 | def test_tuple_nested_with_empty_in_between(dump, load) -> None: 163 | p = dump("(1, (), 3)") 164 | tp, s = load(p) 165 | assert tp == "tuple" 166 | assert s == "(1, (), 3)" 167 | 168 | 169 | def test_py2_string_loads() -> None: 170 | """Regression test for #267.""" 171 | assert execnet.loads(b"\x02M\x00\x00\x00\x01aQ") == b"a" 172 | -------------------------------------------------------------------------------- /testing/test_termination.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | import shutil 4 | import signal 5 | import subprocess 6 | import sys 7 | from typing import Callable 8 | 9 | import pytest 10 | from test_gateway import TESTTIMEOUT 11 | 12 | import execnet 13 | from execnet.gateway import Gateway 14 | from execnet.gateway_base import ExecModel 15 | from execnet.gateway_base import WorkerPool 16 | 17 | execnetdir = pathlib.Path(execnet.__file__).parent.parent 18 | 19 | skip_win_pypy = pytest.mark.xfail( 20 | condition=hasattr(sys, "pypy_version_info") and sys.platform.startswith("win"), 21 | reason="failing on Windows on PyPy (#63)", 22 | ) 23 | 24 | 25 | def test_exit_blocked_worker_execution_gateway( 26 | anypython: str, makegateway: Callable[[str], Gateway], pool: WorkerPool 27 | ) -> None: 28 | gateway = makegateway("popen//python=%s" % anypython) 29 | gateway.remote_exec( 30 | """ 31 | import time 32 | time.sleep(10.0) 33 | """ 34 | ) 35 | 36 | def doit() -> int: 37 | gateway.exit() 38 | return 17 39 | 40 | reply = pool.spawn(doit) 41 | x = reply.get(timeout=5.0) 42 | assert x == 17 43 | 44 | 45 | def test_endmarker_delivery_on_remote_killterm( 46 | makegateway: Callable[[str], Gateway], execmodel: ExecModel 47 | ) -> None: 48 | if execmodel.backend not in ("thread", "main_thread_only"): 49 | pytest.xfail("test and execnet not compatible to greenlets yet") 50 | gw = makegateway("popen") 51 | q = execmodel.queue.Queue() 52 | channel = gw.remote_exec( 53 | source=""" 54 | import os, time 55 | channel.send(os.getpid()) 56 | time.sleep(100) 57 | """ 58 | ) 59 | pid = channel.receive() 60 | assert isinstance(pid, int) 61 | os.kill(pid, signal.SIGTERM) 62 | channel.setcallback(q.put, endmarker=999) 63 | val = q.get(TESTTIMEOUT) 64 | assert val == 999 65 | err = channel._getremoteerror() 66 | assert isinstance(err, EOFError) 67 | 68 | 69 | @skip_win_pypy 70 | def test_termination_on_remote_channel_receive( 71 | monkeypatch: pytest.MonkeyPatch, makegateway: Callable[[str], Gateway] 72 | ) -> None: 73 | if not shutil.which("ps"): 74 | pytest.skip("need 'ps' command to externally check process status") 75 | monkeypatch.setenv("EXECNET_DEBUG", "2") 76 | gw = makegateway("popen") 77 | pid = gw.remote_exec("import os ; channel.send(os.getpid())").receive() 78 | gw.remote_exec("channel.receive()") 79 | gw._group.terminate() 80 | command = ["ps", "-p", str(pid)] 81 | output = subprocess.run(command, capture_output=True, text=True, check=False) 82 | assert str(pid) not in output.stdout, output 83 | 84 | 85 | def test_close_initiating_remote_no_error( 86 | pytester: pytest.Pytester, anypython: str 87 | ) -> None: 88 | p = pytester.makepyfile( 89 | """ 90 | import sys 91 | sys.path.insert(0, sys.argv[1]) 92 | import execnet 93 | gw = execnet.makegateway("popen") 94 | print ("remote_exec1") 95 | ch1 = gw.remote_exec("channel.receive()") 96 | print ("remote_exec1") 97 | ch2 = gw.remote_exec("channel.receive()") 98 | print ("termination") 99 | execnet.default_group.terminate() 100 | """ 101 | ) 102 | popen = subprocess.Popen( 103 | [anypython, str(p), str(execnetdir)], stdout=None, stderr=subprocess.PIPE 104 | ) 105 | out, err = popen.communicate() 106 | print(err) 107 | errstr = err.decode("utf8") 108 | lines = [x for x in errstr.splitlines() if "*sys-package" not in x] 109 | assert not lines 110 | 111 | 112 | def test_terminate_implicit_does_trykill( 113 | pytester: pytest.Pytester, 114 | anypython: str, 115 | capfd: pytest.CaptureFixture[str], 116 | pool: WorkerPool, 117 | ) -> None: 118 | if pool.execmodel.backend not in ("thread", "main_thread_only"): 119 | pytest.xfail("only os threading model supported") 120 | if sys.version_info >= (3, 12): 121 | pytest.xfail( 122 | "since python3.12 this test triggers RuntimeError: can't create new thread at interpreter shutdown" 123 | ) 124 | p = pytester.makepyfile( 125 | """ 126 | import sys 127 | sys.path.insert(0, %r) 128 | import execnet 129 | group = execnet.Group() 130 | gw = group.makegateway("popen") 131 | ch = gw.remote_exec("import time ; channel.send(1) ; time.sleep(100)") 132 | ch.receive() # remote execution started 133 | sys.stdout.write("1\\n") 134 | sys.stdout.flush() 135 | sys.stdout.close() 136 | class FlushNoOp(object): 137 | def flush(self): 138 | pass 139 | # replace stdout since some python implementations 140 | # flush and print errors (for example 3.2) 141 | # see Issue #5319 (from the release notes of 3.2 Alpha 2) 142 | sys.stdout = FlushNoOp() 143 | 144 | # use process at-exit group.terminate call 145 | """ 146 | % str(execnetdir) 147 | ) 148 | popen = subprocess.Popen([str(anypython), str(p)], stdout=subprocess.PIPE) 149 | # sync with start-up 150 | assert popen.stdout is not None 151 | popen.stdout.readline() 152 | reply = pool.spawn(popen.communicate) 153 | reply.get(timeout=50) 154 | out, err = capfd.readouterr() 155 | lines = [x for x in err.splitlines() if "*sys-package" not in x] 156 | assert not lines or "Killed" in err 157 | -------------------------------------------------------------------------------- /testing/test_threadpool.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | import pytest 5 | 6 | from execnet.gateway_base import ExecModel 7 | from execnet.gateway_base import WorkerPool 8 | 9 | 10 | def test_execmodel(execmodel: ExecModel, tmp_path: Path) -> None: 11 | assert execmodel.backend 12 | p = tmp_path / "somefile" 13 | p.write_text("content") 14 | fd = os.open(p, os.O_RDONLY) 15 | f = execmodel.fdopen(fd, "r") 16 | assert f.read() == "content" 17 | f.close() 18 | 19 | 20 | def test_execmodel_basic_attrs(execmodel: ExecModel) -> None: 21 | m = execmodel 22 | assert callable(m.start) 23 | assert m.get_ident() 24 | 25 | 26 | def test_simple(pool: WorkerPool) -> None: 27 | reply = pool.spawn(lambda: 42) 28 | assert reply.get() == 42 29 | 30 | 31 | def test_some(pool: WorkerPool, execmodel: ExecModel) -> None: 32 | q = execmodel.queue.Queue() 33 | num = 4 34 | 35 | def f(i: int) -> None: 36 | q.put(i) 37 | while q.qsize(): 38 | execmodel.sleep(0.01) 39 | 40 | for i in range(num): 41 | pool.spawn(f, i) 42 | for i in range(num): 43 | q.get() 44 | # assert len(pool._running) == 4 45 | assert pool.waitall(timeout=1.0) 46 | # execmodel.sleep(1) helps on windows? 47 | assert len(pool._running) == 0 48 | 49 | 50 | def test_running_semnatics(pool: WorkerPool, execmodel: ExecModel) -> None: 51 | q = execmodel.queue.Queue() 52 | 53 | def first() -> None: 54 | q.get() 55 | 56 | reply = pool.spawn(first) 57 | assert reply.running 58 | assert pool.active_count() == 1 59 | q.put(1) 60 | assert pool.waitall() 61 | assert pool.active_count() == 0 62 | assert not reply.running 63 | 64 | 65 | def test_waitfinish_on_reply(pool: WorkerPool) -> None: 66 | l = [] 67 | reply = pool.spawn(lambda: l.append(1)) 68 | reply.waitfinish() 69 | assert l == [1] 70 | reply = pool.spawn(lambda: 0 / 0) 71 | reply.waitfinish() # no exception raised 72 | pytest.raises(ZeroDivisionError, reply.get) 73 | 74 | 75 | @pytest.mark.xfail(reason="WorkerPool does not implement limited size") 76 | def test_limited_size(execmodel: ExecModel) -> None: 77 | pool = WorkerPool(execmodel, size=1) # type: ignore[call-arg] 78 | q = execmodel.queue.Queue() 79 | q2 = execmodel.queue.Queue() 80 | q3 = execmodel.queue.Queue() 81 | 82 | def first() -> None: 83 | q.put(1) 84 | q2.get() 85 | 86 | pool.spawn(first) 87 | assert q.get() == 1 88 | 89 | def second() -> None: 90 | q3.put(3) 91 | 92 | # we spawn a second pool to spawn the second function 93 | # which should block 94 | pool2 = WorkerPool(execmodel) 95 | pool2.spawn(pool.spawn, second) 96 | assert not pool2.waitall(1.0) 97 | assert q3.qsize() == 0 98 | q2.put(2) 99 | assert pool2.waitall() 100 | assert pool.waitall() 101 | 102 | 103 | def test_get(pool: WorkerPool) -> None: 104 | def f() -> int: 105 | return 42 106 | 107 | reply = pool.spawn(f) 108 | result = reply.get() 109 | assert result == 42 110 | 111 | 112 | def test_get_timeout(execmodel: ExecModel, pool: WorkerPool) -> None: 113 | def f() -> int: 114 | execmodel.sleep(0.2) 115 | return 42 116 | 117 | reply = pool.spawn(f) 118 | with pytest.raises(IOError): 119 | reply.get(timeout=0.01) 120 | 121 | 122 | def test_get_excinfo(pool: WorkerPool) -> None: 123 | def f() -> None: 124 | raise ValueError("42") 125 | 126 | reply = pool.spawn(f) 127 | with pytest.raises(ValueError): 128 | reply.get(1.0) 129 | with pytest.raises(ValueError): 130 | reply.get(1.0) 131 | 132 | 133 | def test_waitall_timeout(pool: WorkerPool, execmodel: ExecModel) -> None: 134 | q = execmodel.queue.Queue() 135 | 136 | def f() -> None: 137 | q.get() 138 | 139 | reply = pool.spawn(f) 140 | assert not pool.waitall(0.01) 141 | q.put(None) 142 | reply.get(timeout=1.0) 143 | assert pool.waitall(timeout=0.1) 144 | 145 | 146 | @pytest.mark.skipif(not hasattr(os, "dup"), reason="no os.dup") 147 | def test_pool_clean_shutdown( 148 | pool: WorkerPool, capfd: pytest.CaptureFixture[str] 149 | ) -> None: 150 | q = pool.execmodel.queue.Queue() 151 | 152 | def f() -> None: 153 | q.get() 154 | 155 | pool.spawn(f) 156 | assert not pool.waitall(timeout=1.0) 157 | pool.trigger_shutdown() 158 | with pytest.raises(ValueError): 159 | pool.spawn(f) 160 | 161 | def wait_then_put() -> None: 162 | pool.execmodel.sleep(0.1) 163 | q.put(1) 164 | 165 | pool.execmodel.start(wait_then_put) 166 | assert pool.waitall() 167 | out, err = capfd.readouterr() 168 | assert err == "" 169 | 170 | 171 | def test_primary_thread_integration(execmodel: ExecModel) -> None: 172 | if execmodel.backend not in ("thread", "main_thread_only"): 173 | with pytest.raises(ValueError): 174 | WorkerPool(execmodel=execmodel, hasprimary=True) 175 | return 176 | pool = WorkerPool(execmodel=execmodel, hasprimary=True) 177 | queue = execmodel.queue.Queue() 178 | 179 | def do_integrate() -> None: 180 | queue.put(execmodel.get_ident()) 181 | pool.integrate_as_primary_thread() 182 | 183 | execmodel.start(do_integrate) 184 | 185 | def func() -> None: 186 | queue.put(execmodel.get_ident()) 187 | 188 | pool.spawn(func) 189 | ident1 = queue.get() 190 | ident2 = queue.get() 191 | assert ident1 == ident2 192 | pool.terminate() 193 | 194 | 195 | def test_primary_thread_integration_shutdown(execmodel: ExecModel) -> None: 196 | if execmodel.backend not in ("thread", "main_thread_only"): 197 | pytest.skip("can only run with threading") 198 | pool = WorkerPool(execmodel=execmodel, hasprimary=True) 199 | queue = execmodel.queue.Queue() 200 | 201 | def do_integrate() -> None: 202 | queue.put(execmodel.get_ident()) 203 | pool.integrate_as_primary_thread() 204 | 205 | execmodel.start(do_integrate) 206 | queue.get() 207 | 208 | queue2 = execmodel.queue.Queue() 209 | 210 | def get_two() -> None: 211 | queue.put(execmodel.get_ident()) 212 | queue2.get() 213 | 214 | reply = pool.spawn(get_two) 215 | # make sure get_two is running and blocked on queue2 216 | queue.get() 217 | # then shut down 218 | pool.trigger_shutdown() 219 | # and let get_two finish 220 | queue2.put(1) 221 | reply.get() 222 | assert pool.waitall(5.0) 223 | -------------------------------------------------------------------------------- /testing/test_xspec.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import os 4 | import shutil 5 | import subprocess 6 | import sys 7 | from pathlib import Path 8 | from typing import Callable 9 | 10 | import pytest 11 | 12 | import execnet 13 | from execnet import XSpec 14 | from execnet.gateway import Gateway 15 | from execnet.gateway_io import popen_args 16 | from execnet.gateway_io import ssh_args 17 | from execnet.gateway_io import vagrant_ssh_args 18 | 19 | skip_win_pypy = pytest.mark.xfail( 20 | condition=hasattr(sys, "pypy_version_info") and sys.platform.startswith("win"), 21 | reason="failing on Windows on PyPy (#63)", 22 | ) 23 | 24 | 25 | class TestXSpec: 26 | def test_norm_attributes(self) -> None: 27 | spec = XSpec( 28 | r"socket=192.168.102.2:8888//python=c:/this/python3.8//chdir=d:\hello" 29 | ) 30 | assert spec.socket == "192.168.102.2:8888" 31 | assert spec.python == "c:/this/python3.8" 32 | assert spec.chdir == r"d:\hello" 33 | assert spec.nice is None 34 | assert not hasattr(spec, "_xyz") 35 | 36 | with pytest.raises(AttributeError): 37 | spec._hello() # type: ignore[misc,operator] 38 | 39 | spec = XSpec("socket=192.168.102.2:8888//python=python2.5//nice=3") 40 | assert spec.socket == "192.168.102.2:8888" 41 | assert spec.python == "python2.5" 42 | assert spec.chdir is None 43 | assert spec.nice == "3" 44 | 45 | spec = XSpec("ssh=user@host//chdir=/hello/this//python=/usr/bin/python2.5") 46 | assert spec.ssh == "user@host" 47 | assert spec.python == "/usr/bin/python2.5" 48 | assert spec.chdir == "/hello/this" 49 | 50 | spec = XSpec("popen") 51 | assert spec.popen is True 52 | 53 | def test_ssh_options(self) -> None: 54 | spec = XSpec("ssh=-p 22100 user@host//python=python3") 55 | assert spec.ssh == "-p 22100 user@host" 56 | assert spec.python == "python3" 57 | 58 | spec = XSpec( 59 | "ssh=-i ~/.ssh/id_rsa-passwordless_login -p 22100 user@host//python=python3" 60 | ) 61 | assert spec.ssh == "-i ~/.ssh/id_rsa-passwordless_login -p 22100 user@host" 62 | assert spec.python == "python3" 63 | 64 | def test_execmodel(self) -> None: 65 | spec = XSpec("execmodel=thread") 66 | assert spec.execmodel == "thread" 67 | spec = XSpec("execmodel=eventlet") 68 | assert spec.execmodel == "eventlet" 69 | 70 | def test_ssh_options_and_config(self) -> None: 71 | spec = XSpec("ssh=-p 22100 user@host//python=python3") 72 | spec.ssh_config = "/home/user/ssh_config" 73 | assert ssh_args(spec)[:6] == ["ssh", "-C", "-F", spec.ssh_config, "-p", "22100"] 74 | 75 | def test_vagrant_options(self) -> None: 76 | spec = XSpec("vagrant_ssh=default//python=python3") 77 | assert vagrant_ssh_args(spec)[:-1] == ["vagrant", "ssh", "default", "--", "-C"] 78 | 79 | def test_popen_with_sudo_python(self) -> None: 80 | spec = XSpec("popen//python=sudo python3") 81 | assert popen_args(spec) == [ 82 | "sudo", 83 | "python3", 84 | "-u", 85 | "-c", 86 | "import sys;exec(eval(sys.stdin.readline()))", 87 | ] 88 | 89 | def test_env(self) -> None: 90 | xspec = XSpec("popen//env:NAME=value1") 91 | assert xspec.env["NAME"] == "value1" 92 | 93 | def test__samefilesystem(self) -> None: 94 | assert XSpec("popen")._samefilesystem() 95 | assert XSpec("popen//python=123")._samefilesystem() 96 | assert not XSpec("popen//chdir=hello")._samefilesystem() 97 | 98 | def test__spec_spec(self) -> None: 99 | for x in ("popen", "popen//python=this"): 100 | assert XSpec(x)._spec == x 101 | 102 | def test_samekeyword_twice_raises(self) -> None: 103 | pytest.raises(ValueError, XSpec, "popen//popen") 104 | pytest.raises(ValueError, XSpec, "popen//popen=123") 105 | 106 | def test_unknown_keys_allowed(self) -> None: 107 | xspec = XSpec("hello=3") 108 | assert xspec.hello == "3" 109 | 110 | def test_repr_and_string(self) -> None: 111 | for x in ("popen", "popen//python=this"): 112 | assert repr(XSpec(x)).find("popen") != -1 113 | assert str(XSpec(x)) == x 114 | 115 | def test_hash_equality(self) -> None: 116 | assert XSpec("popen") == XSpec("popen") 117 | assert hash(XSpec("popen")) == hash(XSpec("popen")) 118 | assert XSpec("popen//python=123") != XSpec("popen") 119 | assert hash(XSpec("socket=hello:8080")) != hash(XSpec("popen")) 120 | 121 | 122 | class TestMakegateway: 123 | def test_no_type(self, makegateway: Callable[[str], Gateway]) -> None: 124 | pytest.raises(ValueError, lambda: makegateway("hello")) 125 | 126 | @skip_win_pypy 127 | def test_popen_default(self, makegateway: Callable[[str], Gateway]) -> None: 128 | gw = makegateway("") 129 | assert gw.spec.popen 130 | assert gw.spec.python is None 131 | rinfo = gw._rinfo() 132 | # assert rinfo.executable == sys.executable 133 | assert rinfo.cwd == os.getcwd() 134 | assert rinfo.version_info == sys.version_info 135 | 136 | @pytest.mark.skipif("not hasattr(os, 'nice')") 137 | @pytest.mark.xfail(reason="fails due to timing problems on busy single-core VMs") 138 | def test_popen_nice(self, makegateway: Callable[[str], Gateway]) -> None: 139 | gw = makegateway("popen") 140 | 141 | def getnice(channel) -> None: 142 | import os 143 | 144 | if hasattr(os, "nice"): 145 | channel.send(os.nice(0)) 146 | else: 147 | channel.send(None) 148 | 149 | remotenice = gw.remote_exec(getnice).receive() 150 | assert isinstance(remotenice, int) 151 | gw.exit() 152 | if remotenice is not None: 153 | gw = makegateway("popen//nice=5") 154 | remotenice2 = gw.remote_exec(getnice).receive() 155 | assert remotenice2 == remotenice + 5 156 | 157 | def test_popen_env(self, makegateway: Callable[[str], Gateway]) -> None: 158 | gw = makegateway("popen//env:NAME123=123") 159 | ch = gw.remote_exec( 160 | """ 161 | import os 162 | channel.send(os.environ['NAME123']) 163 | """ 164 | ) 165 | value = ch.receive() 166 | assert value == "123" 167 | 168 | @skip_win_pypy 169 | def test_popen_explicit(self, makegateway: Callable[[str], Gateway]) -> None: 170 | gw = makegateway("popen//python=%s" % sys.executable) 171 | assert gw.spec.python == sys.executable 172 | rinfo = gw._rinfo() 173 | assert rinfo.executable == sys.executable 174 | assert rinfo.cwd == os.getcwd() 175 | assert rinfo.version_info == sys.version_info 176 | 177 | @skip_win_pypy 178 | def test_popen_chdir_absolute( 179 | self, tmp_path: Path, makegateway: Callable[[str], Gateway] 180 | ) -> None: 181 | gw = makegateway("popen//chdir=%s" % tmp_path) 182 | rinfo = gw._rinfo() 183 | assert rinfo.cwd == str(tmp_path.resolve()) 184 | 185 | @skip_win_pypy 186 | def test_popen_chdir_newsub( 187 | self, 188 | monkeypatch: pytest.MonkeyPatch, 189 | tmp_path: Path, 190 | makegateway: Callable[[str], Gateway], 191 | ) -> None: 192 | monkeypatch.chdir(tmp_path) 193 | gw = makegateway("popen//chdir=hello") 194 | rinfo = gw._rinfo() 195 | expected = str(tmp_path.joinpath("hello").resolve()).lower() 196 | assert rinfo.cwd.lower() == expected 197 | 198 | def test_ssh(self, specssh: XSpec, makegateway: Callable[[str], Gateway]) -> None: 199 | sshhost = specssh.ssh 200 | gw = makegateway("ssh=%s//id=ssh1" % sshhost) 201 | assert gw.id == "ssh1" 202 | 203 | def test_vagrant(self, makegateway: Callable[[str], Gateway]) -> None: 204 | vagrant_bin = shutil.which("vagrant") 205 | if vagrant_bin is None: 206 | pytest.skip("Vagrant binary not in PATH") 207 | res = subprocess.run( 208 | [vagrant_bin, "status", "default", "--machine-readable"], 209 | capture_output=True, 210 | encoding="utf-8", 211 | errors="replace", 212 | check=True, 213 | ).stdout 214 | print(res) 215 | if ",default,state,shutoff\n" in res: 216 | pytest.xfail("vm shutoff, run `vagrant up` first") 217 | if ",default,state,not_created\n" in res: 218 | pytest.xfail("vm not created, run `vagrant up` first") 219 | if ",default,state,running\n" not in res: 220 | pytest.fail("unknown vm state") 221 | 222 | gw = makegateway("vagrant_ssh=default//python=python3") 223 | rinfo = gw._rinfo() 224 | assert rinfo.cwd == "/home/vagrant" 225 | assert rinfo.executable == "/usr/bin/python" 226 | 227 | def test_socket( 228 | self, specsocket: XSpec, makegateway: Callable[[str], Gateway] 229 | ) -> None: 230 | gw = makegateway("socket=%s//id=sock1" % specsocket.socket) 231 | rinfo = gw._rinfo() 232 | assert rinfo.executable 233 | assert rinfo.cwd 234 | assert rinfo.version_info 235 | assert gw.id == "sock1" 236 | # we cannot instantiate a second gateway 237 | 238 | @pytest.mark.xfail(reason="we can't instantiate a second gateway") 239 | def test_socket_second( 240 | self, specsocket: XSpec, makegateway: Callable[[str], Gateway] 241 | ) -> None: 242 | gw = makegateway("socket=%s//id=sock1" % specsocket.socket) 243 | gw2 = makegateway("socket=%s//id=sock1" % specsocket.socket) 244 | rinfo = gw._rinfo() 245 | rinfo2 = gw2._rinfo() 246 | assert rinfo.executable == rinfo2.executable 247 | assert rinfo.cwd == rinfo2.cwd 248 | assert rinfo.version_info == rinfo2.version_info 249 | 250 | def test_socket_installvia(self) -> None: 251 | group = execnet.Group() 252 | group.makegateway("popen//id=p1") 253 | gw = group.makegateway("socket//installvia=p1//id=s1") 254 | assert gw.id == "s1" 255 | assert gw.remote_status() 256 | group.terminate() 257 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist=py{38,39,310,311,312,pypy38},docs,linting 3 | isolated_build = true 4 | 5 | [testenv] 6 | usedevelop=true 7 | setenv = 8 | PYTHONWARNDEFAULTENCODING = 1 9 | deps= 10 | pytest 11 | pytest-timeout 12 | passenv = GITHUB_ACTIONS, HOME, USER, XDG_* 13 | commands= 14 | python -m pytest {posargs:testing} 15 | 16 | [testenv:docs] 17 | skipsdist = True 18 | usedevelop = True 19 | changedir = doc 20 | deps = 21 | sphinx 22 | PyYAML 23 | commands = 24 | sphinx-build -W -b html . _build 25 | 26 | [testenv:linting] 27 | skip_install = True 28 | deps = pre-commit>=1.11.0 29 | commands = pre-commit run --all-files --show-diff-on-failure 30 | 31 | [pytest] 32 | timeout = 20 33 | addopts = -ra 34 | testpaths = testing 35 | --------------------------------------------------------------------------------