├── .gitignore
├── LICENSE
├── MANIFEST.in
├── README.md
├── benchmarks
├── mapper.py
├── multiprocessing_vs_zproc.py
└── webpage_downloader.py
├── docs
├── Makefile
├── api.rst
├── api
│ ├── context.rst
│ ├── context
│ │ ├── call_when.rst
│ │ ├── call_when_change.rst
│ │ ├── call_when_equality.rst
│ │ ├── call_when_xxx_header.rst
│ │ └── params
│ │ │ ├── process_kwargs.rst
│ │ │ └── return.rst
│ ├── exceptions.rst
│ ├── functions.rst
│ ├── process.rst
│ ├── snippets
│ │ ├── backend.rst
│ │ └── server_address.rst
│ ├── state.rst
│ └── state
│ │ ├── get_raw_update.rst
│ │ ├── get_when.rst
│ │ ├── get_when_change.rst
│ │ ├── get_when_equality.rst
│ │ └── params
│ │ ├── duplicate_okay.rst
│ │ ├── exclude.rst
│ │ ├── key.rst
│ │ ├── keys.rst
│ │ ├── live.rst
│ │ ├── return.rst
│ │ ├── test_fn.rst
│ │ ├── timeout.rst
│ │ └── value.rst
├── build.sh
├── conf.py
├── index.rst
├── make.bat
├── user.rst
└── user
│ ├── atomicity.rst
│ ├── communication.rst
│ ├── distributed.rst
│ ├── introduction.rst
│ ├── security.rst
│ ├── spec.py
│ └── state_watching.rst
├── examples
├── atomicity.py
├── chain_reaction.py
├── cookie_eater.py
├── cookie_eater_extreme.py
├── luck_test.py
├── nested_procs.py
├── peanut_processor.py
├── remote_exceptions.py
└── state_watchers.py
├── mypy.ini
├── readthedocs.yml
├── requirements.in
├── requirements.txt
├── setup.py
├── tests
├── resillience_tests
│ ├── README.md
│ ├── nested_process.py
│ ├── process_wait.py
│ ├── start_server.py
│ ├── state_watchers.py
│ └── swarm.py
├── test_atomic_contract.py
├── test_dict_api.py
├── test_liveness.py
├── test_maps.py
├── test_namespaces.py
├── test_ping.py
├── test_process_kwargs.py
├── test_process_wait.py
├── test_retries.py
├── test_server_address.py
└── test_state_watchers.py
└── zproc
├── __init__.py
├── __version__.py
├── child.py
├── consts.py
├── context.py
├── exceptions.py
├── process.py
├── serializer.py
├── server
├── __init__.py
├── main.py
└── tools.py
├── state
├── __init__.py
├── _type.py
├── _type.pyi
├── server.py
└── state.py
├── task
├── __init__.py
├── map_plus.py
├── result.py
├── server.py
├── swarm.py
└── worker.py
└── util.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | zproc/*.c
3 | zproc/*.so
4 |
5 | # Created by https://www.gitignore.io/api/python,pycharm
6 |
7 | ### PyCharm ###
8 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
9 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
10 |
11 | # User-specific stuff
12 | .idea/**/workspace.xml
13 | .idea/**/tasks.xml
14 | .idea/**/usage.statistics.xml
15 | .idea/**/dictionaries
16 | .idea/**/shelf
17 |
18 | # Sensitive or high-churn files
19 | .idea/**/dataSources/
20 | .idea/**/dataSources.ids
21 | .idea/**/dataSources.local.xml
22 | .idea/**/sqlDataSources.xml
23 | .idea/**/dynamic.xml
24 | .idea/**/uiDesigner.xml
25 | .idea/**/dbnavigator.xml
26 |
27 | # Gradle
28 | .idea/**/gradle.xml
29 | .idea/**/libraries
30 |
31 | # Gradle and Maven with auto-import
32 | # When using Gradle or Maven with auto-import, you should exclude module files,
33 | # since they will be recreated, and may cause churn. Uncomment if using
34 | # auto-import.
35 | # .idea/modules.xml
36 | # .idea/*.iml
37 | # .idea/modules
38 |
39 | # CMake
40 | cmake-build-*/
41 |
42 | # Mongo Explorer plugin
43 | .idea/**/mongoSettings.xml
44 |
45 | # File-based project format
46 | *.iws
47 |
48 | # IntelliJ
49 | out/
50 |
51 | # mpeltonen/sbt-idea plugin
52 | .idea_modules/
53 |
54 | # JIRA plugin
55 | atlassian-ide-plugin.xml
56 |
57 | # Cursive Clojure plugin
58 | .idea/replstate.xml
59 |
60 | # Crashlytics plugin (for Android Studio and IntelliJ)
61 | com_crashlytics_export_strings.xml
62 | crashlytics.properties
63 | crashlytics-build.properties
64 | fabric.properties
65 |
66 | # Editor-based Rest Client
67 | .idea/httpRequests
68 |
69 | ### PyCharm Patch ###
70 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
71 |
72 | # *.iml
73 | # modules.xml
74 | # .idea/misc.xml
75 | # *.ipr
76 |
77 | # Sonarlint plugin
78 | .idea/sonarlint
79 |
80 | ### Python ###
81 | # Byte-compiled / optimized / DLL files
82 | __pycache__/
83 | *.py[cod]
84 | *$py.class
85 |
86 | # C extensions
87 | *.so
88 |
89 | # Distribution / packaging
90 | .Python
91 | build/
92 | develop-eggs/
93 | dist/
94 | downloads/
95 | eggs/
96 | .eggs/
97 | lib/
98 | lib64/
99 | parts/
100 | sdist/
101 | var/
102 | wheels/
103 | *.egg-info/
104 | .installed.cfg
105 | *.egg
106 | MANIFEST
107 |
108 | # PyInstaller
109 | # Usually these files are written by a python script from a template
110 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
111 | *.manifest
112 | *.spec
113 |
114 | # Installer logs
115 | pip-log.txt
116 | pip-delete-this-directory.txt
117 |
118 | # Unit test / coverage reports
119 | htmlcov/
120 | .tox/
121 | .coverage
122 | .coverage.*
123 | .cache
124 | nosetests.xml
125 | coverage.xml
126 | *.cover
127 | .hypothesis/
128 | .pytest_cache/
129 |
130 | # Translations
131 | *.mo
132 | *.pot
133 |
134 | # Django stuff:
135 | *.log
136 | local_settings.py
137 | db.sqlite3
138 |
139 | # Flask stuff:
140 | instance/
141 | .webassets-cache
142 |
143 | # Scrapy stuff:
144 | .scrapy
145 |
146 | # Sphinx documentation
147 | docs/_build/
148 |
149 | # PyBuilder
150 | target/
151 |
152 | # Jupyter Notebook
153 | .ipynb_checkpoints
154 |
155 | # pyenv
156 | .python-version
157 |
158 | # celery beat schedule file
159 | celerybeat-schedule
160 |
161 | # SageMath parsed files
162 | *.sage.py
163 |
164 | # Environments
165 | .env
166 | .venv
167 | env/
168 | venv/
169 | ENV/
170 | env.bak/
171 | venv.bak/
172 |
173 | # Spyder project settings
174 | .spyderproject
175 | .spyproject
176 |
177 | # Rope project settings
178 | .ropeproject
179 |
180 | # mkdocs documentation
181 | /site
182 |
183 | # mypy
184 | .mypy_cache/
185 |
186 | ### Python Patch ###
187 | .venv/
188 |
189 | ### Python.VirtualEnv Stack ###
190 | # Virtualenv
191 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/
192 | [Bb]in
193 | [Ii]nclude
194 | [Ll]ib
195 | [Ll]ib64
196 | [Ll]ocal
197 | [Ss]cripts
198 | pyvenv.cfg
199 | pip-selfcheck.json
200 |
201 |
202 | # End of https://www.gitignore.io/api/python,pycharm
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Py Campers
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include zproc/_type.pyi
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | [](https://pypi.org/project/zproc/)
6 |
7 |
8 |
9 | # The idea
10 |
11 | ZProc is an experiment that aims to unify how we program multitasking and distributed applications.
12 |
13 | If it succeeds, programmers can have a _single_ method to program in this general area of computing, at any level in the stack.
14 |
15 | ---
16 |
17 | Perhaps, the ethos of this project is best summarised by this quote from the late Joe Armstrong:
18 |
19 | > I want one way to program, not many.
20 |
21 | # Implemenatation
22 |
23 | The current solution is a centralized one.
24 |
25 | At the heart lies a Python program,
26 | that serves a data structure (A python dict),
27 | which supports event sourcing, time travel, task sequencing, etc.
28 |
29 | Processes simply mutate this remote data structure, and communicate using the events it emitts.
30 |
31 | And it does this using [zeromq](http://zeromq.org/) — in a way that users don't need to concern themselves with the intricacies of networking and messaing passing — while still benifiting from the powers of [CSP](https://en.wikipedia.org/wiki/Communicating_sequential_processes).
32 |
33 | This project is currently understood to be at [TRL3](https://en.wikipedia.org/wiki/Technology_readiness_level).
34 |
--------------------------------------------------------------------------------
/benchmarks/mapper.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | from time import perf_counter
3 |
4 | import zproc
5 |
6 | ctx = zproc.Context()
7 | ctx.workers.start(2)
8 |
9 |
10 | def sq(x):
11 | return x ** 2
12 |
13 |
14 | SAMPLES = 10000000
15 |
16 | s = perf_counter()
17 | list(ctx.workers.map(sq, range(SAMPLES)))
18 | print(perf_counter() - s)
19 |
20 | with multiprocessing.Pool(2) as p:
21 | s = perf_counter()
22 | p.map(sq, range(SAMPLES))
23 | print(perf_counter() - s)
24 |
--------------------------------------------------------------------------------
/benchmarks/multiprocessing_vs_zproc.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import time
3 |
4 | import zproc
5 |
6 | ctx = zproc.Context()
7 | ctx.workers.start()
8 |
9 |
10 | def test123(x):
11 | return x * x
12 |
13 |
14 | with multiprocessing.Pool() as p:
15 | s = time.perf_counter()
16 | print(len(p.map(test123, range(10 ** 7))))
17 | e = time.perf_counter()
18 | print("Process Pool-", e - s)
19 |
20 |
21 | s = time.perf_counter()
22 | print(len(ctx.workers.map(test123, range(10 ** 7))))
23 | e = time.perf_counter()
24 | print("Zproc Worker-", e - s)
25 |
--------------------------------------------------------------------------------
/benchmarks/webpage_downloader.py:
--------------------------------------------------------------------------------
1 | """
2 | Downloads a couple of web-pages and compares their sizes.
3 |
4 | Prints out the time taken by different frameworks to complete the task
5 | (excluding warm-up/setup time)
6 | """
7 | import asyncio
8 | from time import time
9 |
10 | import zproc
11 |
12 | SAMPLES = 1
13 |
14 | sites = list(
15 | {
16 | "https://www.yahoo.com/",
17 | "http://www.cnn.com",
18 | "http://www.python.org",
19 | "http://www.jython.org",
20 | "http://www.pypy.org",
21 | "http://www.perl.org",
22 | "http://www.cisco.com",
23 | "http://www.facebook.com",
24 | "http://www.twitter.com",
25 | "http://www.macrumors.com/",
26 | "http://arstechnica.com/",
27 | "http://www.reuters.com/",
28 | "http://abcnews.go.com/",
29 | "http://www.cnbc.com/",
30 | "https://wordpress.org/",
31 | "https://lists.zeromq.org/",
32 | "https://news.ycombinator.com/",
33 | "https://open.spotify.com/",
34 | "https://keep.google.com/",
35 | "https://jsonformatter.curiousconcept.com/",
36 | "https://www.reddit.com/",
37 | "https://www.youtube.com/",
38 | "https://bitbucket.org/",
39 | "http://showrss.info/timeline",
40 | "https://dev.solus-project.com/",
41 | "https://webpack.js.org/",
42 | "https://github.com/",
43 | "https://stackoverflow.com/",
44 | "https://github.com/",
45 | }
46 | )
47 |
48 |
49 | def print_result(results):
50 | winner = max(results, key=lambda x: x[0])
51 | print()
52 | print("largest:", winner[1], winner[0] / 1024, "KB")
53 | print()
54 |
55 |
56 | ########
57 | # Zproc
58 | ########
59 |
60 | ctx = zproc.Context()
61 | ctx.state.setdefault("results", [])
62 |
63 |
64 | @zproc.atomic
65 | def save(snap, size, url):
66 | print(url, int(size / 1024), "KB")
67 |
68 | snap["results"].append((size, url))
69 |
70 |
71 | def downloader(state, url):
72 | size = 0
73 | for _ in range(SAMPLES):
74 | size += len(requests.get(url, headers={"Cache-Control": "no-cache"}).text)
75 | size /= SAMPLES
76 |
77 | save(state, size, url)
78 |
79 |
80 | s = time()
81 |
82 | for url in sites:
83 | ctx.spawn(downloader, args=[url])
84 | ctx.wait()
85 |
86 | print_result(ctx.state["results"])
87 |
88 | t = time() - s
89 |
90 | print("Pure ZProc took: {} sec".format(t))
91 |
92 | ##############
93 | # Worker Map
94 | ##############
95 |
96 |
97 | def map_downloader(url):
98 | size = 0
99 | for _ in range(SAMPLES):
100 | size += len(requests.get(url, headers={"Cache-Control": "no-cache"}).text)
101 | size /= SAMPLES
102 | print(url, int(size / 1024), "KB")
103 |
104 | return size, url
105 |
106 |
107 | s = time()
108 |
109 | print_result(ctx.worker_map(map_downloader, sites, count=len(sites)))
110 |
111 | t = time() - s
112 |
113 | print("ZProc Worker Map took: {} sec".format(t))
114 |
115 |
116 | ########
117 | # Async
118 | ########
119 |
120 | import grequests
121 |
122 |
123 | async def main():
124 | results = []
125 | for site in sites:
126 | size = 0
127 | for _ in range(SAMPLES):
128 | req = await grequests.get(site, headers={"Cache-Control": "no-cache"})
129 | size += len(req.text)
130 | size /= SAMPLES
131 |
132 | results.append((size, site))
133 |
134 | print_result(results)
135 |
136 |
137 | loop = asyncio.get_event_loop()
138 |
139 | s = time()
140 |
141 | loop.run_until_complete(main())
142 |
143 | t = time() - s
144 |
145 | print("Asyncio took: {} sec".format(t))
146 |
147 | #######
148 | # Trio
149 | #######
150 | #
151 | #
152 | # async def downloader():
153 | # size = 0
154 | # for _ in range(SAMPLES):
155 | # size += len(requests.get(url, headers={"Cache-Control": "no-cache"}).text)
156 | # size /= SAMPLES
157 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = .
8 | BUILDDIR = _build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/api.rst:
--------------------------------------------------------------------------------
1 | .. _api-doc:
2 |
3 | The API Documentation
4 | ---------------------
5 |
6 | If you are looking for information on a specific function, class, or method,
7 | this part of the documentation is for you.
8 |
9 | .. toctree::
10 |
11 | api/process.rst
12 | api/state.rst
13 | api/context.rst
14 | api/functions.rst
15 | api/exceptions.rst
--------------------------------------------------------------------------------
/docs/api/context.rst:
--------------------------------------------------------------------------------
1 | Context
2 | -------
3 |
4 | .. autoclass:: zproc.Context
5 | :inherited-members:
--------------------------------------------------------------------------------
/docs/api/context/call_when.rst:
--------------------------------------------------------------------------------
1 | .. include:: /api/context/call_when_xxx_header.rst
2 |
3 | :param test_fn:
4 | .. include:: /api/state/params/test_fn.rst
5 |
6 | :param live:
7 | .. include:: /api/state/params/live.rst
8 |
9 | :param timeout:
10 | .. include:: /api/state/params/timeout.rst
11 |
12 | :param identical_okay:
13 | .. include:: /api/state/params/identical_okay.rst
14 |
15 | :param \*\*process_kwargs:
16 | .. include:: /api/context/params/process_kwargs.rst
17 |
18 | .. include:: /api/context/params/return.rst
19 |
--------------------------------------------------------------------------------
/docs/api/context/call_when_change.rst:
--------------------------------------------------------------------------------
1 | .. include:: /api/context/call_when_xxx_header.rst
2 |
3 | :param \*keys:
4 | .. include:: /api/state/params/keys.rst
5 |
6 | :param exclude:
7 | .. include:: /api/state/params/exclude.rst
8 |
9 | :param live:
10 | .. include:: /api/state/params/live.rst
11 |
12 | :param timeout:
13 | .. include:: /api/state/params/timeout.rst
14 |
15 | :param identical_okay:
16 | .. include:: /api/state/params/identical_okay.rst
17 |
18 | :param \*\*process_kwargs:
19 | .. include:: /api/context/params/process_kwargs.rst
20 |
21 | .. include:: /api/context/params/return.rst
22 |
--------------------------------------------------------------------------------
/docs/api/context/call_when_equality.rst:
--------------------------------------------------------------------------------
1 | .. include:: /api/context/call_when_xxx_header.rst
2 |
3 | :param key:
4 | .. include:: /api/state/params/key.rst
5 |
6 | :param value:
7 | .. include:: /api/state/params/value.rst
8 |
9 | :param live:
10 | .. include:: /api/state/params/live.rst
11 |
12 | :param timeout:
13 | .. include:: /api/state/params/timeout.rst
14 |
15 | :param identical_okay:
16 | .. include:: /api/state/params/identical_okay.rst
17 |
18 | :param \*\*process_kwargs:
19 | .. include:: /api/context/params/process_kwargs.rst
20 |
21 | .. include:: /api/context/params/return.rst
22 |
23 |
--------------------------------------------------------------------------------
/docs/api/context/call_when_xxx_header.rst:
--------------------------------------------------------------------------------
1 | Spawns a new :py:class:`Process`,
2 | and then calls the wrapped function inside of that new process.
3 |
4 | *The wrapped function is run with the following signature:*
5 |
6 | ``target(snap, state, *args, **kwargs)``
7 |
8 | *Where:*
9 |
10 | - ``target`` is the wrapped function.
11 |
12 | - ``snap`` is a ``dict`` containing a copy of the state.
13 |
14 | Its serves as a *snapshot* of the state,
15 | corresponding to the state-change for which the wrapped function is being called.
16 |
17 | - ``state`` is a :py:class:`State` instance.
18 |
19 | - ``*args`` and ``**kwargs`` are passed on from ``**process_kwargs``.
20 |
--------------------------------------------------------------------------------
/docs/api/context/params/process_kwargs.rst:
--------------------------------------------------------------------------------
1 | Keyword arguments that :py:class:`Process` takes, except ``address`` and ``target``.
2 |
3 | If provided, these shall override :py:attr:`process_kwargs`.
4 |
--------------------------------------------------------------------------------
/docs/api/context/params/return.rst:
--------------------------------------------------------------------------------
1 | :return:
2 | A decorator function
3 |
4 | The decorator function will return the :py:class:`Process` instance created.
5 |
--------------------------------------------------------------------------------
/docs/api/exceptions.rst:
--------------------------------------------------------------------------------
1 | Exceptions
2 | ----------
3 |
4 | .. autoexception:: zproc.ProcessWaitError
5 | .. autoexception:: zproc.RemoteException
6 | .. autoexception:: zproc.SignalException
7 | .. autoexception:: zproc.ProcessExit
8 |
--------------------------------------------------------------------------------
/docs/api/functions.rst:
--------------------------------------------------------------------------------
1 | Functions
2 | ---------
3 |
4 | .. autofunction:: zproc.ping
5 | .. autofunction:: zproc.atomic
6 | .. autofunction:: zproc.start_server
7 | .. autofunction:: zproc.signal_to_exception
8 | .. autofunction:: zproc.exception_to_signal
--------------------------------------------------------------------------------
/docs/api/process.rst:
--------------------------------------------------------------------------------
1 | Process
2 | -------
3 |
4 | .. autoclass:: zproc.Process
5 | :inherited-members:
--------------------------------------------------------------------------------
/docs/api/snippets/backend.rst:
--------------------------------------------------------------------------------
1 | The backend to use for launching the Processes.
2 |
3 | For example, you may use :py:class:`threading.Thread` as the backend.
4 |
5 | The ``backend`` must -
6 |
7 | - Be a Callable.
8 | - Accept ``target``, ``daemon``, ``args``, and ``kwargs`` as keyword arguments.
9 |
10 | .. warning::
11 |
12 | Not guaranteed to work with anything other than :py:class:`multiprocessing.Process`.
13 |
--------------------------------------------------------------------------------
/docs/api/snippets/server_address.rst:
--------------------------------------------------------------------------------
1 | The address of the ZProc server.
2 |
3 | Please read :ref:`server-address-spec` for a detailed explanation.
4 |
--------------------------------------------------------------------------------
/docs/api/state.rst:
--------------------------------------------------------------------------------
1 | State
2 | -----
3 |
4 | .. autoclass:: zproc.State
5 | :inherited-members:
6 | :exclude-members: clear, get, items, keys, pop, popitem, setdefault, update, values
--------------------------------------------------------------------------------
/docs/api/state/get_raw_update.rst:
--------------------------------------------------------------------------------
1 | It is preffereable to use :py:meth:`get_when` over this,
2 | since it avoids common pitfalls associated with state-watching.
3 |
4 | *This is an advanced API, and should be used with caution.*
5 |
6 | Meant to be used a Context Manager.
7 |
8 | .. code-block:: python
9 |
10 | with state.get_raw_update(identical_okay=True) as get:
11 | while True:
12 | before, after, identical = get()
13 | print("new update:", before, after, identical)
14 |
15 | :param live:
16 | .. include:: /api/state/params/live.rst
17 |
18 | :param timeout:
19 | .. include:: /api/state/params/timeout.rst
20 |
21 | :param identical_okay:
22 | .. include:: /api/state/params/identical_okay.rst
23 |
--------------------------------------------------------------------------------
/docs/api/state/get_when.rst:
--------------------------------------------------------------------------------
1 | :param test_fn:
2 | .. include:: /api/state/params/test_fn.rst
3 |
4 | :param live:
5 | .. include:: /api/state/params/live.rst
6 |
7 | :param timeout:
8 | .. include:: /api/state/params/timeout.rst
9 |
10 | :param identical_okay:
11 | .. include:: /api/state/params/identical_okay.rst
12 |
13 | .. include:: /api/state/params/return.rst
--------------------------------------------------------------------------------
/docs/api/state/get_when_change.rst:
--------------------------------------------------------------------------------
1 | :param \*keys:
2 | .. include:: /api/state/params/keys.rst
3 |
4 | :param exclude:
5 | .. include:: /api/state/params/exclude.rst
6 |
7 | :param live:
8 | .. include:: /api/state/params/live.rst
9 |
10 | :param timeout:
11 | .. include:: /api/state/params/timeout.rst
12 |
13 | .. include:: /api/state/params/return.rst
--------------------------------------------------------------------------------
/docs/api/state/get_when_equality.rst:
--------------------------------------------------------------------------------
1 | :param key:
2 | .. include:: /api/state/params/key.rst
3 |
4 | :param value:
5 | .. include:: /api/state/params/value.rst
6 |
7 | :param live:
8 | .. include:: /api/state/params/live.rst
9 |
10 | :param timeout:
11 | .. include:: /api/state/params/timeout.rst
12 |
13 | :param identical_okay:
14 | .. include:: /api/state/params/identical_okay.rst
15 |
16 | .. include:: /api/state/params/return.rst
--------------------------------------------------------------------------------
/docs/api/state/params/duplicate_okay.rst:
--------------------------------------------------------------------------------
1 | Whether it's okay to process duplicate updates.
2 |
3 | Please read :ref:`duplicate-events` for a detailed explanation.
--------------------------------------------------------------------------------
/docs/api/state/params/exclude.rst:
--------------------------------------------------------------------------------
1 | Reverse the lookup logic i.e.,
2 |
3 | Watch for all changes in the state *except* in ``*keys``.
4 |
5 | If ``*keys`` is not provided, then this has no effect. (default)
--------------------------------------------------------------------------------
/docs/api/state/params/key.rst:
--------------------------------------------------------------------------------
1 | Some key in the state ``dict``.
2 |
--------------------------------------------------------------------------------
/docs/api/state/params/keys.rst:
--------------------------------------------------------------------------------
1 | Watch for changes on these keys in the state ``dict``.
2 |
3 | If this is not provided, then all state-changes are respected. (default)
--------------------------------------------------------------------------------
/docs/api/state/params/live.rst:
--------------------------------------------------------------------------------
1 | Whether to get **live** updates.
2 |
3 | Please read :ref:`live-events` for a detailed explanation.
--------------------------------------------------------------------------------
/docs/api/state/params/return.rst:
--------------------------------------------------------------------------------
1 | :return:
2 | A ``dict`` containing a copy of the state.
3 |
4 | This copy serves as a *snapshot* of the state,
5 | corresponding to the state-change for which this state watcher was triggered.
6 |
7 |
--------------------------------------------------------------------------------
/docs/api/state/params/test_fn.rst:
--------------------------------------------------------------------------------
1 | A ``Callable``, which is called on each state-change.
2 |
--------------------------------------------------------------------------------
/docs/api/state/params/timeout.rst:
--------------------------------------------------------------------------------
1 | Sets the timeout in seconds.
2 |
3 | If the value is ``None``, it will block until an update is available.
4 |
5 | For all other values (``>=0``), it will wait for a state-change,
6 | for that amount of time before returning with a :py:class:`TimeoutError`.
--------------------------------------------------------------------------------
/docs/api/state/params/value.rst:
--------------------------------------------------------------------------------
1 | The value corresponding to the ``key`` in state ``dict``.
2 |
--------------------------------------------------------------------------------
/docs/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | rm -r _build/
4 |
5 | function build {
6 | sphinx-apidoc -o _build ../zproc
7 | sphinx-build . _build
8 | }
9 |
10 | if [ "$1" == "loop" ]; then
11 | while true; do
12 | build
13 | test $? -ne 0 && break
14 | sleep 1
15 | done
16 | else
17 | build
18 | fi
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Configuration file for the Sphinx documentation builder.
4 | #
5 | # This file does only contain a selection of the most common options. For a
6 | # full list see the documentation:
7 | # http://www.sphinx-doc.org/en/master/config
8 |
9 | # -- Path setup --------------------------------------------------------------
10 |
11 | # If extensions (or modules to document with autodoc) are in another directory,
12 | # add these directories to sys.path here. If the directory is relative to the
13 | # documentation root, use os.path.abspath to make it absolute, like shown here.
14 | #
15 | import os
16 | import sys
17 |
18 | sys.path.insert(0, os.path.abspath(".."))
19 |
20 | # -- Project information -----------------------------------------------------
21 |
22 |
23 | import datetime
24 |
25 | import zproc
26 |
27 | project = "ZProc"
28 | copyright = "{}, Dev Aggarwal".format(datetime.datetime.now().year)
29 | author = "Dev Aggarwal"
30 |
31 | # The short X.Y version
32 | version = zproc.__version__
33 | # The full version, including alpha/beta/rc tags
34 | release = zproc.__version__
35 |
36 | # -- General configuration ---------------------------------------------------
37 |
38 | # If your documentation needs a minimal Sphinx version, state it here.
39 | #
40 | # needs_sphinx = '1.0'
41 |
42 | # Add any Sphinx extension module names here, as strings. They can be
43 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
44 | # ones.
45 | extensions = [
46 | "sphinx.ext.autodoc",
47 | "sphinx.ext.doctest",
48 | "sphinx.ext.intersphinx",
49 | "sphinx.ext.todo",
50 | "sphinx.ext.coverage",
51 | "sphinx.ext.viewcode",
52 | ]
53 |
54 | # Add any paths that contain templates here, relative to this directory.
55 | templates_path = ["_templates"]
56 |
57 | # The suffix(es) of source filenames.
58 | # You can specify multiple suffix as a list of string:
59 | #
60 | # source_suffix = ['.rst', '.md']
61 | source_suffix = ".rst"
62 |
63 | # The master toctree document.
64 | master_doc = "index"
65 |
66 | # The language for content autogenerated by Sphinx. Refer to documentation
67 | # for a list of supported languages.
68 | #
69 | # This is also used if you do content translation via gettext catalogs.
70 | # Usually you set "language" from the command line for these cases.
71 | language = None
72 |
73 | # List of patterns, relative to source directory, that match files and
74 | # directories to ignore when looking for source files.
75 | # This pattern also affects html_static_path and html_extra_path .
76 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
77 |
78 | # The name of the Pygments (syntax highlighting) style to use.
79 | pygments_style = "monokai"
80 |
81 | # -- Options for HTML output -------------------------------------------------
82 |
83 | # The theme to use for HTML and HTML Help pages. See the documentation for
84 | # a list of builtin themes.
85 | #
86 | html_theme = "sphinx_rtd_theme"
87 |
88 | # Theme options are theme-specific and customize the look and feel of a theme
89 | # further. For a list of options available for each theme, see the
90 | # documentation.
91 |
92 | html_sidebars = {
93 | "**": [
94 | "about.html",
95 | "navigation.html",
96 | "relations.html",
97 | "searchbox.html",
98 | "donate.html",
99 | ]
100 | }
101 | html_theme_options = {
102 | "navigation_depth": 4,
103 | "sticky_navigation": False,
104 | "prev_next_buttons_location": "both",
105 | }
106 | # Add any paths that contain custom static files (such as style sheets) here,
107 | # relative to this directory. They are copied after the builtin static files,
108 | # so a file named "default.css" will overwrite the builtin "default.css".
109 | html_static_path = ["_static"]
110 |
111 | # Custom sidebar templates, must be a dictionary that maps document names
112 | # to template names.
113 | #
114 | # The default sidebars (for documents that don't match any pattern) are
115 | # defined by theme itself. Builtin themes are using these templates by
116 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
117 | # 'searchbox.html']``.
118 | #
119 | # html_sidebars = {}
120 |
121 |
122 | # -- Options for HTMLHelp output ---------------------------------------------
123 |
124 | # Output file base name for HTML help builder.
125 | htmlhelp_basename = "zprocdoc"
126 |
127 | # -- Options for LaTeX output ------------------------------------------------
128 |
129 | latex_elements = {
130 | # The paper size ('letterpaper' or 'a4paper').
131 | #
132 | # 'papersize': 'letterpaper',
133 | # The font size ('10pt', '11pt' or '12pt').
134 | #
135 | # 'pointsize': '10pt',
136 | # Additional stuff for the LaTeX preamble.
137 | #
138 | # 'preamble': '',
139 | # Latex figure (float) alignment
140 | #
141 | # 'figure_align': 'htbp',
142 | }
143 |
144 | # Grouping the document tree into LaTeX files. List of tuples
145 | # (source start file, target name, title,
146 | # author, documentclass [howto, manual, or own class]).
147 | latex_documents = [
148 | (master_doc, "zproc.tex", "zproc Documentation", "dev aggarwal", "manual")
149 | ]
150 |
151 | # -- Options for manual page output ------------------------------------------
152 |
153 | # One entry per manual page. List of tuples
154 | # (source start file, name, description, authors, manual section).
155 | man_pages = [(master_doc, "zproc", "zproc Documentation", [author], 1)]
156 |
157 | # -- Options for Texinfo output ----------------------------------------------
158 |
159 | # Grouping the document tree into Texinfo files. List of tuples
160 | # (source start file, target name, title, author,
161 | # dir menu entry, description, category)
162 | texinfo_documents = [
163 | (
164 | master_doc,
165 | "zproc",
166 | "zproc Documentation",
167 | author,
168 | "zproc",
169 | "Process on steroids",
170 | "Miscellaneous",
171 | )
172 | ]
173 |
174 | # -- Extension configuration -------------------------------------------------
175 |
176 | autoclass_content = "both"
177 | autodoc_member_order = "bysource"
178 |
179 | rst_prolog = """
180 | :github_url: https://github.com/pycampers/zproc
181 | """
182 |
183 | intersphinx_mapping = {"python": ("https://docs.python.org/3.5", None)}
184 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | ZProc: Multi Processing on steroids
2 | ===================================
3 |
4 | .. image:: https://img.shields.io/pypi/v/zproc.svg?style=for-the-badge
5 | :alt: PyPI
6 | :target: https://pypi.org/project/zproc/
7 |
8 | .. image:: https://img.shields.io/pypi/pyversions/zproc.svg?style=for-the-badge
9 | :alt: PyPI - Python Version
10 | :target: https://pypi.org/project/zproc/
11 |
12 | .. image:: https://img.shields.io/github/license/mashape/apistatus.svg?style=for-the-badge
13 | :alt: license
14 | :target: https://github.com/pycampers/zproc/blob/master/LICENSE
15 |
16 | .. image:: https://img.shields.io/github/stars/pycampers/zproc.svg?style=for-the-badge&label=Stars
17 | :alt: GitHub stars
18 | :target: https://github.com/pycampers/zproc
19 |
20 | Welcome to ZProc's docs! Glad that you made it here.
21 |
22 | - If you're unsure whether you really want to use zproc, read the :ref:`motivation`!
23 | - Head over to the :ref:`user-guide` if you're new.
24 | - Or, if you want information about something specific, use the :ref:`api-doc`!
25 |
26 |
27 | .. _motivation:
28 |
29 | Motivation
30 | -----------------
31 |
32 | Typically, when a Process is launched using Python's ``multiprocessing`` module,
33 | Python will spwan a new interpreter,
34 | that inherits resources (like variables, file descriptors etc.) from the parent.
35 |
36 | *However, it is not possible for you to update these resources from both Processes.*
37 |
38 | Let me explain with an example, where we try to update a ``dict`` from a child Process, but it doesn't really work.
39 |
40 | .. code-block:: python
41 | :caption: multiprocessing module
42 |
43 | import multiprocessing
44 |
45 |
46 | state = {"msg": "hello!"}
47 |
48 | def my_process():
49 | print(state["msg"])
50 |
51 | # this won't show up on global state
52 | state['msg'] = "bye!"
53 |
54 | p = multiprocessing.Process(target=my_process)
55 | p.start()
56 | p.join()
57 |
58 | print(state) # {"msg": "hello!"}
59 |
60 |
61 | **This is the problem zproc solves.**
62 |
63 | .. code-block:: python
64 | :caption: zproc
65 |
66 | import zproc
67 |
68 | ctx = zproc.Context()
69 | ctx.state = {"msg": "hello!"}
70 |
71 | def my_process(state):
72 | print(state["msg"])
73 |
74 | # this will show up on global state
75 | state['msg'] = "bye!"
76 |
77 | p = ctx.spawn(my_process)
78 | p.wait()
79 |
80 | print(ctx.state) # {"msg": "bye!"}
81 |
82 | ZProc achieves this by having `zeromq `_ sockets communicate between Processes.
83 |
84 | *It cleverly hides away all these details, empowering you to build complex multi-tasking apps fast.*
85 |
86 | Also,
87 | it turns out that this model of passing messages fits quite well
88 | for doing a pleothera of multitasking "feats",
89 | like reactive programming, atomic operations, distributed task queues, you name it.
90 |
91 |
92 | Indicies
93 | ------------
94 |
95 | .. toctree::
96 | :maxdepth: 1
97 |
98 | user.rst
99 | api.rst
100 |
101 | - :ref:`genindex`
102 | - :ref:`modindex`
103 | - :ref:`search`
104 |
105 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/user.rst:
--------------------------------------------------------------------------------
1 | .. _user-guide:
2 |
3 | The User Guide
4 | --------------
5 |
6 | This part of the documentation, which is mostly prose, begins with some
7 | background information about ZProc, then focuses on step-by-step
8 | instructions for getting the most out of ZProc.
9 |
10 | .. toctree::
11 | :maxdepth: 1
12 |
13 | user/introduction.rst
14 | user/atomicity.rst
15 | user/state_watching.rst
16 | user/communication.rst
17 | user/security.rst
18 |
--------------------------------------------------------------------------------
/docs/user/atomicity.rst:
--------------------------------------------------------------------------------
1 | .. _atomicity:
2 |
3 | Atomicity and race conditions
4 | =============================
5 |
6 | When writing parallel/concurrent code, atomicity is a major concern.
7 |
8 | zproc provides a powerful consturct, the :py:func:`~.atomic()` decorator
9 | to make it easy for you to write correct multi tasking code.
10 |
11 | The problem
12 | -----------
13 |
14 | .. sidebar:: Atomic operations
15 |
16 | In concurrent programming,
17 | an operation (or set of operations) is atomic, linearizable, indivisible or uninterruptible
18 | if it appears to the rest of the system to occur at once without being interrupted.
19 |
20 | If an operation can be divided into pieces, then other Processes might jump
21 | in and out between these pieces and try to meddle with each others' work, confusing everyone.
22 |
23 |
24 | .. code-block:: python
25 | :caption: non-atomic incrementer
26 |
27 | state['count'] += 5
28 |
29 | The above code might look like a single operation, but don't get fooled! (They're 2)
30 |
31 | 1. get ``'count'``, i.e. ``dict.__getitem__('count')``
32 | 2. set ``'count'`` to ``count + 1``, i.e. ``dict.__setitem__('count', count + 1)``
33 |
34 | The solution
35 | ------------
36 |
37 | zproc **guarantees™** that a single method call on a ``dict`` is atomic.
38 |
39 | This takes out a lot of guesswork in determining the atomicity of an operation.
40 | Just think in terms of ``dict`` methods.
41 |
42 | So, ``dict.__getitiem__()`` and ``dict.__setitem__()`` are **guarateed™**
43 | to be atomic on their own, but not in conjunction. *If these operations are not done atomically,
44 | it exposes the possibility of other Processes trying to do operations between "1" and "2"*
45 |
46 | zproc makes it dead simple to avoid such race conditions.
47 | Let's make some changes to our example...
48 |
49 | .. code-block:: python
50 | :caption: atomic incrementer
51 |
52 | @zproc.atomic
53 | def increment(snap, step):
54 | snap['count'] += step
55 |
56 | increment(state, 5)
57 |
58 | :py:func:`~.atomic()` transforms any arbitrary function into
59 | an atomic operation on the state.
60 |
61 | *Notice, the use of the word* ``snap`` *instead of* ``state`` *inside* ``increment()``.
62 |
63 | This is because the :py:func:`~.atomic()` wrapper provides only a *snapshot* of the state, as a ``dict`` object.
64 | It's important to realize that ``snap`` is not a :py:class:`State` object.
65 |
66 | This enables you to mutate ``snap`` freely
67 | and the changes will be reflected in the global state
68 | (after ``increment()`` returns).
69 | It also means that you cannot assign to ``snap``, only mutate.
70 |
71 | .. code-block:: python
72 | :caption: assignment not allowed
73 |
74 | @zproc.atomic
75 | def setcount(snap):
76 | snap = {'count': 10} # wrong!
77 |
78 | setcount(state)
79 |
80 |
81 | Sidenotes
82 | ---------
83 |
84 | - | While zproc does provide you a mechanism to avoid such race conditions,
85 | | you still need to identify the critical points where a race condition can occur,
86 | | and prevent it using :py:func:`~.atomic()`.
87 |
88 | - To preserve state consistency, update related State keys in a single atomic operation.
89 |
90 | - | If an error occurs while the function is running, the state will remain *unaffected*.
91 |
92 | - | The first argument to the atomic function must be a :py:class:`.State` object.
93 |
94 |
95 |
96 | A complete example is available `here `_.
97 |
98 |
--------------------------------------------------------------------------------
/docs/user/communication.rst:
--------------------------------------------------------------------------------
1 | How ZProc talks
2 | ===============
3 |
4 | While you don't need to do any communication on your own,
5 | ZProc is actively doing it behind the covers, using zmq sockets.
6 |
7 | Thanks to this,
8 | you take the same code and run it in a different environment,
9 | with very little to no modifications.
10 |
11 | Furthermore, you can even take your existing code and scale it across
12 | multiple computers on your network.
13 |
14 | This is the benefit of message passing parallelism.
15 | Your whole stack is built on communication, and hence,
16 | becomes extremely scalable and flexible when you need it to be.
17 |
18 | .. _server-address-spec:
19 |
20 | The server address spec
21 | -----------------------
22 |
23 | An endpoint is a string consisting of two parts as follows: ``://``.
24 | The transport part specifies the underlying transport protocol to use.
25 | The meaning of the address part is specific to the underlying transport protocol selected.
26 |
27 | The following transports may be used:
28 |
29 | - ipc
30 | local inter-process communication transport, see `zmq_ipc `_
31 |
32 | (``tcp://:``)
33 |
34 | - tcp
35 | unicast transport using TCP, see `zmq_tcp `_
36 |
37 | (``ipc://``)
38 |
39 | .. code-block:: python
40 | :caption: Example
41 |
42 | server_address='tcp://0.0.0.0:50001'
43 |
44 | server_address='ipc:///home/username/my_endpoint'
45 |
46 |
47 | IPC or TCP?
48 | -----------
49 |
50 | If you have a POSIX, and don't need to communicate across multiple computers,
51 | you are better off reaping the performance benefits of IPC.
52 |
53 | For other use-cases, TCP.
54 |
55 | By default, zproc will use IPC if it is available, else TCP.
56 |
57 | .. _start-server:
58 |
59 | Starting the server manually
60 | ----------------------------
61 |
62 | When you create a :py:class:`.Context` object, ZProc will produce a random ``server_address``,
63 | and start a server.
64 |
65 | For advanced use-cases,
66 | you might want to use a well-known static address that all the services in your application are aware of.
67 |
68 | **This is quite useful when you want to access the same state across multiple nodes on a network,
69 | or in a different context on the same machine; anywhere communicating a "random" address would become an issue.**
70 |
71 | However, if you use a static address, :py:class:`.Context` won't start that
72 | server itself, and you have to do it manually, using :py:func:`.start_server`
73 | (This behavior enables us to spawn multiple :py:class:`.Context` objects with the same address).
74 |
75 | *All the classes in ZProc take* ``address`` *as their first argument.*
76 |
77 |
78 | >>> import zproc
79 | >>> ADDRESS = 'tcp://127.0.0.1:5000'
80 | >>> zproc.start_server(ADDRESS) # Important!
81 | (, 'tcp://127.0.0.1:5000')
82 | >>> zproc.Context(ADDRESS)
83 |
84 | >>> zproc.State(ADDRESS)
85 |
86 |
87 |
88 | The above example uses tcp, but ipc works just as well. (except across multiple machines)
89 |
90 | .. caution::
91 |
92 | Start the server *before* you access the :py:class:`.State` in *any* way; it solely depends on the server.
93 |
94 | TLDR; You can start the server from anywhere you wish, and then access it though the address.
--------------------------------------------------------------------------------
/docs/user/distributed.rst:
--------------------------------------------------------------------------------
1 | Distributed compute using ZProc
2 | ===============================
3 |
4 | *zproc makes distributed compute easy for you.*
5 |
6 |
7 | .. code-block:: python
8 |
9 | import zproc
10 |
11 |
12 | ctx = zproc.Context()
13 |
14 | ctx.workers.start()
15 | result = ctx.workers.map(pow, [1, 2, 3, 4])
16 |
17 | print(result, list(result))
18 |
19 |
20 | This will spawn workers,
21 | distribute the task,
22 | send the task over,
23 | and then pull the results from them.
24 |
25 |
26 | :py:meth:`.Swarm.map` runs across machines.
27 |
28 |
29 | .. code-block:: python
30 | :caption: 192.168.1.25
31 |
32 | ctx = zproc.Context("tcp://0.0.0.0:50001")
33 |
34 | ctx.workers.start()
35 |
36 |
37 | .. code-block:: python
38 | :caption: Computer 2
39 |
40 | ctx = zproc.Context("tcp://192.168.1.25:50001")
41 |
42 | ctx.workers.start()
43 |
44 | .. code-block:: python
45 | :caption: Computer 3
46 |
47 | ctx.workers.map(pow, [1, 2, 3, 4])
48 |
--------------------------------------------------------------------------------
/docs/user/introduction.rst:
--------------------------------------------------------------------------------
1 | Introduction to ZProc
2 | =====================
3 |
4 | The idea of zproc revolves around this funky :py:class:`.State` object.
5 |
6 | A :py:class:`.Context` is provided as a factory for creating objects.
7 | It's the easiest, most obvious way to use zproc.
8 |
9 | Each :py:class:`.Context` object must be associated with a server process,
10 | whose job is to manage the state of your applciation;
11 | anything that needs synchronization.
12 |
13 | Creating one is as simple as:
14 |
15 | .. code-block:: python
16 |
17 | import zproc
18 |
19 | ctx = zproc.Context()
20 |
21 | It makes the creation of objects explicit and bound to a specific Context,
22 | eliminating the need for various guesing games.
23 |
24 | The Context means just that.
25 | It's a collection of various parameters and flags that help the framework
26 | identify *where* the program currently is.
27 |
28 | Launching a Process
29 | ---------------------------------
30 |
31 | .. sidebar:: Decorators
32 |
33 | Function decorators are functions which
34 | accept other functions as arguments,
35 | and add some wrapper code around them.
36 |
37 | .. code-block:: python
38 |
39 | @decorator
40 | def func():
41 | pass
42 |
43 | # roughly equivalent to:
44 |
45 | func = decorator(func)
46 |
47 | The :py:meth:`.Context.spawn` function allows you to launch processes.
48 |
49 | .. code-block:: python
50 |
51 | def my_process(state):
52 | ...
53 |
54 | ctx.spawn(my_process)
55 |
56 | It works both as a function, and decorator.
57 |
58 | .. code-block:: python
59 |
60 | @ctx.process
61 | def my_process(state):
62 | ...
63 |
64 |
65 | The state
66 | ---------
67 |
68 | .. code-block:: python
69 |
70 | state = ctx.create_state()
71 |
72 |
73 |
74 | :py:meth:`~.Context.spawn` will launch a process, and provide it with ``state``.
75 |
76 | :py:class:`.State` is a *dict-like* object.
77 | *dict-like*, because it's not exactly a ``dict``.
78 |
79 | It supports common dictionary operations on the state.
80 |
81 | | However, you *cannot* actually operate on the underlying ``dict``.
82 | | It's guarded by a Process, whose sole job is to manage it.
83 | | The :py:class:`.State` object only *instructs* that Process to modify the ``dict``.
84 |
85 | You may also access it from the :py:class:`.Context` itself -- ``ctx.state``.
86 |
87 | Process arguments
88 | ------------------------------
89 |
90 | To supply arguments to a the Process's target function,
91 | you can use ``args`` or ``kwargs``:
92 |
93 | .. code-block:: python
94 |
95 | def my_process(state, num, exp):
96 | print(num, exp) # 2, 4
97 |
98 | ctx.spawn(my_process, args=[2], kwargs={'exp': 4})
99 |
100 | ``args`` is a sequence of positional arguments for the function;
101 | ``kwargs`` is a dict, which maps argument names and values.
102 |
103 |
104 | Waiting for a Process
105 | -----------------------------------
106 |
107 | Once you've launched a Process, you can wait for it to complete,
108 | and obtain the return value.
109 |
110 | .. code-block:: python
111 |
112 | from time import sleep
113 |
114 |
115 | def sleeper(state):
116 | sleep(5)
117 | return 'Hello There!'
118 |
119 | p = ctx.spawn(sleeper)
120 | result = p.wait()
121 |
122 | print(result) # Hello There!
123 |
124 |
125 | .. _process_factory:
126 |
127 | Process Factory
128 | --------------------------
129 |
130 | :py:meth:`~.Context.spawn` also lets you launch many processes at once.
131 |
132 | .. code-block:: python
133 |
134 | p_list = ctx.spawn(sleeper, count=10)
135 | p_list.wait()
136 |
137 |
138 | .. _worker_map:
139 |
140 | Worker Processes
141 | ----------------
142 |
143 | This feature let's you distribute a computation to serveral,
144 | fixed amount of workers.
145 |
146 | This is meant to be used for CPU bound tasks,
147 | since you can only have a limited number of CPU bound Processes working at any given time.
148 |
149 | :py:meth:`~.Context.worker_map` let's you use the in-built `map()` function in a parallel way.
150 |
151 | It divides up the sequence you provide into ``count`` number of pieces,
152 | and sends them to ``count`` number of workers.
153 |
154 | ---
155 |
156 | You first, need a :py:class:`.Swarm` object,
157 | which is the front-end for using worker Processes.
158 |
159 | .. code-block:: python
160 | :caption: obtaining workers
161 |
162 | ctx = zproc.Context()
163 |
164 | swarm = ctx.create_swarm(4)
165 |
166 | ---
167 |
168 | Now, we can start to use it.
169 |
170 | .. code-block:: python
171 | :caption: Works similar to ``map()``
172 |
173 | def square(num):
174 | return num * num
175 |
176 | # [1, 4, 9, 16]
177 | list(workers.map(square, [1, 2, 3, 4]))
178 |
179 |
180 | .. code-block:: python
181 | :caption: Common Arguments.
182 |
183 | def power(num, exp):
184 | return num ** exp
185 |
186 | # [0, 1, 8, 27, 64, ... 941192, 970299]
187 | list(
188 | workers.map(
189 | power,
190 | range(100),
191 | args=[3],
192 | count=10 # distribute among 10 workers.
193 | )
194 | )
195 |
196 | .. code-block:: python
197 | :caption: Mapped Positional Arguments.
198 |
199 | def power(num, exp):
200 | return num ** exp
201 |
202 | # [4, 9, 36, 256]
203 | list(
204 | workers.map(
205 | power,
206 | map_args=[(2, 2), (3, 2), (6, 2), (2, 8)]
207 | )
208 | )
209 |
210 | .. code-block:: python
211 | :caption: Mapped Keyword Arguments.
212 |
213 | def my_thingy(seed, num, exp):
214 | return seed + num ** exp
215 |
216 | # [1007, 3132, 298023223876953132, 736, 132, 65543, 8]
217 | list(
218 | ctx.worker_map(
219 | my_thingy,
220 | args=[7],
221 | map_kwargs=[
222 | {'num': 10, 'exp': 3},
223 | {'num': 5, 'exp': 5},
224 | {'num': 5, 'exp': 2},
225 | {'num': 9, 'exp': 3},
226 | {'num': 5, 'exp': 3},
227 | {'num': 4, 'exp': 8},
228 | {'num': 1, 'exp': 4},
229 | ],
230 | count=5
231 | )
232 | )
233 |
234 | ---
235 |
236 | What's interesting about :py:meth:`~.Context.worker_map` is that it returns a generator.
237 |
238 | The moment you call it, it will distribute the task to "count" number of workers.
239 |
240 | It will then, return with a generator,
241 | which in-turn will do the job of pulling out the results from these workers,
242 | and arranging them in order.
243 |
244 | ---
245 |
246 | The amount of time it takes for ``next(res)`` is non-linear,
247 | because all the blocking computation is being carrried out in the background.
248 |
249 | >>> import zproc
250 | >>> import time
251 |
252 | >>> ctx = zproc.Context()
253 |
254 | >>> def blocking_func(x):
255 | ... time.sleep(5)
256 | ...
257 | ... return x * x
258 | ...
259 |
260 | >>> res = ctx.worker_map(blocking_func, range(10)) # returns immediately
261 | >>> res
262 |
263 |
264 | >>> next(res) # might block
265 | 0
266 | >>> next(res) # might block
267 | 1
268 | >>> next(res) # might block
269 | 4
270 | >>> next(res) # might block
271 | 9
272 | >>> next(res) # might block
273 | 16
274 | *and so on..*
275 |
276 |
277 | .. _process_map:
278 |
279 | Map Processes
280 | -------------
281 |
282 | This is meant to be used for I/O and network bound tasks,
283 | as you can have more number of Processes working together,
284 | than the number physical CPUs.
285 |
286 | This is beacuase these kind of tasks typically involve waiting for a resource,
287 | and are, as a result quite lax on CPU resources.
288 |
289 | :py:meth:`~.Context.map_process`
290 | has the exact same semantics for mapping sequences as :py:meth:`~.Context.map_process`,
291 | except that it launches a new Process for each item in the sequence.
292 |
293 | Reactive programming
294 | --------------------
295 |
296 | .. sidebar:: Reactive Programming
297 |
298 | Reactive programming is a declarative programming
299 | paradigm concerned with data streams and the propagation of change.
300 |
301 |
302 | This is the part where you really start to see the benefits of a smart state.
303 | The state knows when it's being updated, and does the job of notifying everyone.
304 |
305 | State watching allows you to "react" to some change in the state in an efficient way.
306 |
307 | The problem
308 | +++++++++++
309 |
310 | .. sidebar:: Busy waiting
311 |
312 | busy-waiting
313 | is a technique in which a process repeatedly checks to see if a condition is true,
314 | such as whether keyboard input or a lock is available.
315 |
316 | *Busy waiting is expensive and quite tricky to get right.*
317 |
318 | Lets say, you want to wait for the number of ``"cookies"`` to be ``5``.
319 | Using busy-waiting, you might do it with something like this:
320 |
321 | .. code-block:: python
322 |
323 | while True:
324 | if cookies == 5:
325 | print('done!')
326 | break
327 |
328 | But then you find out that this eats too much CPU, and put put some sleep.
329 |
330 | .. code-block:: python
331 |
332 | from time import sleep
333 |
334 | while True:
335 | if cookies == 5:
336 | print('done!')
337 | break
338 | sleep(1)
339 |
340 | And from there on, you try to manage the time for which your application sleeps (to arrive at a sweet spot).
341 |
342 | The solution
343 | ++++++++++++
344 |
345 | zproc provides an elegant, easy to use solution to this problem.
346 |
347 | .. code-block:: python
348 |
349 | def my_process(state):
350 | state.get_when_equal('cookies', 5)
351 | print('done with zproc!')
352 |
353 | This eats very little to no CPU, and is fast enough for almost everyone needs.
354 |
355 |
356 | You can also provide a callable,
357 | which gets called on each state update
358 | to check whether the return value is *truthy*.
359 |
360 | .. code-block:: python
361 |
362 | state.get_when(lambda snap: snap.get('cookies') == 5)
363 |
364 |
365 | .. caution::
366 |
367 | Wrong use of state watchers!
368 |
369 | .. code-block:: python
370 |
371 | from time import time
372 |
373 | t = time()
374 | state.get_when(lambda _: time() > t + 5) # wrong!
375 |
376 | State only knows how to respond to *state* changes.
377 | Changing time doesn't signify a state update.
378 |
379 |
380 | Read more on the :ref:`state-watching`.
381 |
382 | Mutating objects inside state
383 | -----------------------------
384 |
385 | .. sidebar:: Mutation
386 |
387 | In computer science,
388 | mutation refers to the act of modifying an object in-place.
389 |
390 | When we say that an object is mutable,
391 | it implies that its in-place methods "mutate" the object's contents.
392 |
393 |
394 | Zproc does not allow one to mutate objects inside the state.
395 |
396 | .. code-block:: python
397 | :caption: incorrect mutation
398 |
399 | state['numbers'] = [1, 2, 3] # works
400 |
401 | state['numbers'].append(4) # doesn't work
402 |
403 |
404 | The *right* way to mutate objects in the state,
405 | is to do it using the :py:func:`~.atomic` decorator.
406 |
407 | .. code-block:: python
408 | :caption: correct mutation
409 |
410 | @zproc.atomic
411 | def add_a_number(snap, to_add)
412 | snap['numbers'].append(to_add)
413 |
414 |
415 | @ctx.process
416 | def my_process(state):
417 | add_a_number(state, 4)
418 |
419 | Read more about :ref:`atomicity`.
420 |
421 |
422 | Here be dragons
423 | ---------------
424 |
425 | .. sidebar:: Thread safety
426 |
427 | Thread-safe code only manipulates shared data structures in a manner that ensures that all
428 | threads behave properly and fulfill their design specifications without unintended interaction.
429 |
430 |
431 | Absolutely none of the the classes in ZProc are Process or Thread safe.
432 | You must never attempt to share an object across multiple Processes.
433 |
434 | Create a new object for each Process.
435 | Communicate and synchronize using the :py:class:`.State` at all times.
436 |
437 | This is, in-general *very* good practice.
438 |
439 | Never attempt to directly share python objects across Processes,
440 | and the framework will reward you :).
441 |
442 | The problem
443 | +++++++++++
444 |
445 | .. code-block:: python
446 | :caption: incorrect use of the framework
447 |
448 | ctx = zproc.Context()
449 |
450 |
451 | def my_process(state):
452 | ctx.spawn(some_other_process) # very wrong!
453 |
454 | ctx.spawn(my_process)
455 |
456 | Here, the ``ctx`` object is shared between the parent and child Process.
457 | This is not allowed, and will inevitably lead to improper behavior.
458 |
459 | The solution
460 | ++++++++++++
461 |
462 | You can ask zproc to create new objects for you.
463 |
464 | .. code-block:: python
465 | :caption: correct use of the framework
466 |
467 | ctx = zproc.Context()
468 |
469 |
470 | def my_process(inner_ctx):
471 | inner_ctx.spawn(some_other_process) # correct.
472 |
473 | ctx.spawn(my_process, pass_context=True) # Notice "pass_context"
474 |
475 | ---
476 |
477 | Or, create new ones youself.
478 |
479 | .. code-block:: python
480 | :caption: correct use of the framework
481 |
482 | ctx = zproc.Context()
483 |
484 |
485 | def my_process(state):
486 | inner_ctx = zproc.Context() # important!
487 | inner_ctx.spawn(some_other_process)
488 |
489 | ctx.spawn(my_process)
490 |
--------------------------------------------------------------------------------
/docs/user/security.rst:
--------------------------------------------------------------------------------
1 | .. _security:
2 |
3 | Security considerations
4 | =======================
5 |
6 | Cryptographic signing
7 | ---------------------
8 |
9 | Why?
10 | ++++
11 |
12 | Since un-pickling from an external source is considered dangerous,
13 | it becomes necessary to verify whether the other end is also a ZProc node,
14 | and not some attacker trying to exploit our application.
15 |
16 | Hence, ZProc provides cryptographic signing support using `itsdangerous `_.
17 |
18 | Just provide the `secret_key` parameter to :py:class:`.Context`, and you should be good to go!
19 |
20 | >>> import zproc
21 | >>> ctx = zproc.Context(secret_key="muchsecret")
22 | >>> ctx.secret_key
23 | 'muchsecret'
24 |
25 | Similarly, :py:class:`.State` also takes the ``secret_key`` parameter.
26 |
27 | By default, ``secret_key`` is set to ``None``, which implies that no cryptographic signing is performed.
28 |
29 | Yes, but why?
30 | +++++++++++++
31 |
32 | Here is an example demonstrating the usefulness of the this feature.
33 |
34 |
35 | .. code-block:: python
36 |
37 | import zproc
38 |
39 | home = zproc.Context(secret_key="muchsecret")
40 | ADDRESS = home.address
41 |
42 | home.state['gold'] = 5
43 |
44 |
45 | An attacker somehow got to know our server's address.
46 | But since his secret key didn't match ours, their attempts to connect our server are futile.
47 |
48 | .. code-block:: python
49 |
50 | attacker = zproc.Context(ADDRESS) # blocks forever
51 |
52 |
53 | If however, you tell someone the secret key, then they are allowed to access the state.
54 |
55 | .. code-block:: python
56 |
57 | friend = zproc.Context(ADDRESS, secret_key="muchsecret")
58 | print(friend.state['gold']) # 5
59 |
--------------------------------------------------------------------------------
/docs/user/spec.py:
--------------------------------------------------------------------------------
1 | import zproc
2 |
3 | ctx = zproc.Context()
4 |
5 |
6 | def my_proc(ctx):
7 | state = ctx.create_state()
8 |
9 | for snap in state.when_available():
10 | pass
11 |
12 |
13 | ctx.start()
14 | ctx.spawn(my_proc)
15 |
16 |
17 | swarm = ctx.create_swarm()
18 |
19 | state = ctx.create_state()
20 |
21 |
22 | state["test"] = 5
23 |
--------------------------------------------------------------------------------
/docs/user/state_watching.rst:
--------------------------------------------------------------------------------
1 | .. _state-watching:
2 |
3 | The magic of state watching
4 | ===========================
5 | **Watch the state for events, as-if you were watching a youtube video!**
6 |
7 | zproc allows you to *watch* the state using these methods, @ the :py:class:`.State` API.
8 |
9 | - :py:meth:`~.State.get_when_change`
10 | - :py:meth:`~.State.get_when`
11 | - :py:meth:`~.State.get_when_equal`
12 | - :py:meth:`~.State.get_when_not_equal`
13 | - :py:meth:`~.State.get_when_none`
14 | - :py:meth:`~.State.get_when_not_none`
15 | - :py:meth:`~.State.get_when_available`
16 |
17 | For example, the following code will watch the state,
18 | and print out a message whenever the price of gold is below 40.
19 |
20 | .. code-block:: python
21 |
22 | while True:
23 | snap = state.get_when(lambda snap: snap['gold_price'] < 40)
24 |
25 | print('"gold_price" is below 40!!:', snap['gold_price'])
26 |
27 | ---
28 |
29 | There also these utility methods in :py:class:`.Context` that are just a wrapper
30 | over their counterparts in :py:class:`.State`.
31 |
32 | - :py:meth:`~.Context.call_when_change`
33 | - :py:meth:`~.Context.call_when`
34 | - :py:meth:`~.Context.call_when_equal`
35 | - :py:meth:`~.Context.call_when_not_equal`
36 | - :py:meth:`~.Context.call_when_none`
37 | - :py:meth:`~.Context.call_when_not_none`
38 | - :py:meth:`~.Context.call_when_available`
39 |
40 |
41 | For example, the function ``want_pizza()`` will be called every-time the ``"num_pizza"`` key in the state changes.
42 |
43 | .. code-block:: python
44 |
45 | @ctx.call_when_change("num_pizza")
46 | def want_pizza(snap, state):
47 | print("pizza be tasty!", snap['num_pizza'])
48 |
49 |
50 | .. note::
51 | All state-watchers are ``KeyError`` safe.
52 | That means, if the dict key you requested for isn't present, a ``KeyError`` won't be thrown.
53 |
54 | Snapshots
55 | ---------
56 |
57 | Notice, the use of the name ``snap`` in these examples, instead of ``state``
58 |
59 | All watchers provide return with a *snapshot* of the state,
60 | corresponding to the state-change for which the state watcher was triggered.
61 |
62 | The *snapshot* is just a regular ``dict`` object.
63 |
64 | In practice, this helps avoid race conditions -- especially in cases where state keys are inter-dependent.
65 |
66 | .. _duplicate-events:
67 |
68 | Duplicate-ness of events
69 | ------------------------
70 |
71 | #TODO
72 |
73 |
74 | .. _live-events:
75 |
76 | Live-ness of events
77 | -------------------
78 |
79 | zproc provides 2 different "modes" for watching the state.
80 |
81 | By default, all state watchers will provide **buffered updates**.
82 |
83 | Let us see what that exactly means, in detail.
84 |
85 |
86 | Peanut generator
87 | ++++++++++++++++
88 |
89 |
90 | First, let us create a :py:class:`~Process` that will generate some peanuts, periodically.
91 |
92 | .. code-block:: python
93 |
94 | from time import sleep
95 | import zproc
96 |
97 |
98 | ctx = zproc.Context()
99 | state = ctx.state
100 | state["peanuts"] = 0
101 |
102 |
103 | @zproc.atomic
104 | def inc_peanuts(snap):
105 | snap['peanuts'] += 1
106 |
107 |
108 | @ctx.process
109 | def peanut_gen(state):
110 | while True:
111 | inc_peanuts(state)
112 | sleep(1)
113 |
114 |
115 |
116 | Live consumer
117 | +++++++++++++
118 |
119 | .. code-block:: python
120 |
121 | while True:
122 | num = state.get_when_change("peanuts", live=True)
123 | print("live consumer got:", num)
124 |
125 | sleep(2)
126 |
127 | The above code will miss any updates that happen while it is sleeping (``sleep(2)``).
128 |
129 | When consuming live updates, your code **can miss events**, if it's not paying attention.
130 |
131 | *like a live youtube video, you only see what's currently happening.*
132 |
133 | Buffered consumer
134 | +++++++++++++++++
135 |
136 | To modify this behaviour, you need to pass ``live=False``.
137 |
138 | .. code-block:: python
139 |
140 | while True:
141 | num = state.get_when_change("peanuts", live=False)
142 | print("non-live consumer got:", num)
143 |
144 | sleep(2)
145 |
146 | This way, the events are stored in a *queue*,
147 | so that your code **doesn't miss any events**.
148 |
149 | *like a normal youtube video, where you won't miss anything, since it's buffering.*
150 |
151 | Hybrid consumer
152 | ++++++++++++++++
153 |
154 | *But a live youtube video can be buffered as well!*
155 |
156 | Hence the need for a :py:meth:`~.State.go_live` method.
157 |
158 | It *clears* the outstanding queue (or buffer) -- deleting all previous events.
159 |
160 | *That's somewhat like the "LIVE" button on a live stream, that skips ahead to the live broadcast.*
161 |
162 |
163 | .. code-block:: python
164 |
165 | while True:
166 | num = state.get_when_change("peanuts", live=False)
167 | print("hybrid consumer got:", num)
168 |
169 | state.go_live()
170 |
171 | sleep(2)
172 |
173 |
174 | .. note::
175 | :py:meth:`~.State.go_live` only affects the behavior when ``live`` is set to ``False``.
176 |
177 | Has no effect when ``live`` is set to ``True``.
178 |
179 | A **live** state watcher is strictly **LIVE**.
180 |
181 |
182 | *A Full Example is available* `here. `_
183 |
184 |
185 | Decision making
186 | +++++++++++++++
187 |
188 | Its easy to decide whether you need live updates or not.
189 |
190 | - If you don't care about missing an update or two, and want the most up-to date state, use live mode.
191 |
192 | - If you care about each state update, at the cost of speed, and the recency of the updates, don't use live mode.
193 |
194 | Live mode is obviously faster (potentially), since it can miss an update or two,
195 | which eventually trickles down to less computation.
196 |
197 |
198 | Timeouts
199 | --------
200 |
201 | You can also provide timeouts while watching the state, using ``timeout`` parameter.
202 |
203 | If an update doesn't occur within the specified timeout, a :py:class:`TimeoutError` is raised.
204 |
205 | .. code-block:: python
206 |
207 | try:
208 | print(state.get_when_change(timeout=5)) # wait 5 seconds for an update
209 | except TimeoutError:
210 | print('Waited too long!)
211 |
212 |
213 |
214 | Button Press
215 | ------------
216 |
217 | Let's take an example, to put what we learned into real world usage.
218 |
219 | Here, we want to watch a button press, and determine whether it was a long or a short press.
220 |
221 | Some assumptions:
222 |
223 | - If the value of ``'button'`` is ``True``, the the button is pressed
224 |
225 | - If the value of ``'button'`` is ``False``, the button is not pressed.
226 |
227 | - The ``Reader`` is any arbitrary source of a value, e.g. a GPIO pin or a socket connection, receiving the value from an IOT button.
228 |
229 | .. code-block:: python
230 |
231 | @ctx.process
232 | def reader(state):
233 | # reads the button value from a reader and stores it in the state
234 |
235 | reader = Reader()
236 | old_value = None
237 |
238 | while True:
239 | new_value = reader.read()
240 |
241 | # only update state when the value changes
242 | if old_value != new_value:
243 | state['button'] = new_value
244 | old_value = new_value
245 |
246 |
247 |
248 | # calls handle_press() whenever button is pressed
249 | @ctx.call_when_equal('button', True, live=True)
250 | def handle_press(_, state): # The first arg will be the value of "button". We don't need that.
251 |
252 | print("button pressed")
253 |
254 | try:
255 | # wait 0.5 sec for a button to be released
256 | state.get_when_equal('button', False, timeout=0.5)
257 |
258 | print('its a SHORT press')
259 |
260 | # give up waiting
261 | except TimeoutError as e:
262 |
263 | print('its a LONG press')
264 |
265 | # wait infinitely for button to be released
266 | state.get_when_equal('button', False)
267 |
268 | print("button is released")
269 |
270 |
271 | Here, passing ``live=True`` makes sense, since we don't care about a missed button press.
272 |
273 | It makes the software respond to the button in real-time.
274 |
275 | If ``live=False`` was passed, then it would not be real-time,
276 | and sometimes the application would lag behind the real world button state.
277 |
278 | This behavior is undesirable when making Human computer interfaces,
279 | where keeping stuff responsive is a priority.
280 |
281 |
282 | (The above code is simplified version of the code used in `this `_ project).
283 |
--------------------------------------------------------------------------------
/examples/atomicity.py:
--------------------------------------------------------------------------------
1 | """
2 | Shows how much ZProc is resillient to race conditions.
3 | Complete with a fuzz! (Thanks to Raymond's talk on the Concurrency)
4 |
5 | Spawns 100 processes that do non-atomic operation (incrementation) on state.
6 | Since the operations are run using ".atomic()", they magically avoid race conditions!
7 |
8 | Expected Output:
9 |
10 | 1
11 | 2
12 | 3
13 | .
14 | .
15 |
16 | 100
17 | """
18 | from random import random
19 | from time import sleep
20 |
21 | import zproc
22 |
23 | ctx = zproc.Context(wait=True)
24 | ctx.state["count"] = 0
25 |
26 |
27 | @zproc.atomic
28 | def increment(snap):
29 | count = snap["count"]
30 |
31 | sleep(random())
32 |
33 | snap["count"] = count + 1
34 | print(snap["count"])
35 |
36 |
37 | def child1(state):
38 | increment(state)
39 |
40 |
41 | ctx.spawn(child1, count=10)
42 |
--------------------------------------------------------------------------------
/examples/chain_reaction.py:
--------------------------------------------------------------------------------
1 | """
2 | Demonstration of a chain reaction to a state change
3 |
4 | # Expected output
5 |
6 | main: I set foo to foobar
7 | child1: foo changed, so I wake, now foo = foobar
8 | child1: I set foo to bar
9 | child1: I exit
10 | child2: foo changed to bar, so I wake
11 | child2: I exit
12 |
13 | main: I exit
14 | """
15 | from time import sleep
16 |
17 | import zproc
18 |
19 |
20 | # define a child process
21 | def child1(state):
22 | val = state.when_change("foo") # wait for foo to change
23 | print("child1: foo changed, so I wake, now foo =", val)
24 |
25 | state["foo"] = "bar" # update bar
26 | print("child1: I set foo to bar")
27 | print("child1: I exit")
28 |
29 |
30 | # define another child process
31 | def child2(state):
32 | state.when(lambda s: s.get("foo") == "bar") # wait for bar_equals_bar
33 | print("child2: foo changed to bar, so I wake")
34 | print("child2: I exit")
35 |
36 |
37 | if __name__ == "__main__":
38 | ctx = zproc.Context(wait=True) # create a context for us to work with
39 | ctx.spawn(child1, child2) # give the context some processes to work with
40 |
41 | sleep(1) # sleep for no reason
42 |
43 | ctx.state["foo"] = "foobar" # set initial state
44 | print("main: I set foo to foobar")
45 |
46 | print("main: I exit")
47 |
--------------------------------------------------------------------------------
/examples/cookie_eater.py:
--------------------------------------------------------------------------------
1 | """
2 | Expected output:
3 |
4 | Process - pid: 10815 target: '__main__.cookie_eater' ppid: 10802 is_alive: True exitcode: None
5 | Here's a cookie!
6 | Here's a cookie!
7 | Here's a cookie!
8 | nom nom nom
9 | Here's a cookie!
10 | nom nom nom
11 | Here's a cookie!
12 | nom nom nom
13 | nom nom nom
14 | nom nom nom
15 | """
16 | import zproc
17 |
18 |
19 | @zproc.atomic
20 | def eat_cookie(snap):
21 | """Eat a cookie."""
22 | snap["cookies"] -= 1
23 | print("nom nom nom")
24 |
25 |
26 | @zproc.atomic
27 | def bake_cookie(snap):
28 | """Bake a cookie."""
29 | snap["cookies"] += 1
30 | print("Here's a cookie!")
31 |
32 |
33 | ctx = zproc.Context(wait=True)
34 | state = ctx.create_state()
35 | state["cookies"] = 0
36 |
37 |
38 | @ctx.spawn
39 | def cookie_eater(ctx):
40 | """Eat cookies as they're baked."""
41 | state = ctx.create_state()
42 | state["ready"] = True
43 |
44 | for _ in state.when_change("cookies"):
45 | eat_cookie(state)
46 |
47 |
48 | # wait for that process
49 | next(state.when_available("ready"))
50 |
51 | # finally, get to work.
52 | print(cookie_eater)
53 | for _ in range(5):
54 | bake_cookie(state)
55 |
56 |
--------------------------------------------------------------------------------
/examples/cookie_eater_extreme.py:
--------------------------------------------------------------------------------
1 | import random
2 | import time
3 |
4 | import zproc
5 |
6 | NUM_PROCS = 100
7 | NUM_COOKIES = 5
8 |
9 |
10 | #
11 | # define aotmic operations
12 | #
13 |
14 |
15 | @zproc.atomic
16 | def decrement(state, key):
17 | state[key] -= 1
18 |
19 |
20 | @zproc.atomic
21 | def increment(state, key):
22 | state[key] += 1
23 |
24 |
25 | #
26 | # define cookie eater
27 | #
28 |
29 |
30 | def cookie_eater(ctx):
31 | state = ctx.create_state()
32 | increment(state, "ready")
33 |
34 | # some fuzzing
35 | time.sleep(random.random())
36 |
37 | # passing `start_time=0` gives updates from the very begining,
38 | # no matter in what space-time we currently are!
39 | for _ in state.when_change("cookies", count=NUM_COOKIES, start_time=0):
40 | decrement(state, "cookies")
41 |
42 | increment(state, "done")
43 |
44 |
45 | #
46 | # Here's where the maigc happens
47 | #
48 |
49 | # boilerplate
50 | ctx = zproc.Context(wait=True)
51 | state = ctx.create_state({"cookies": 0, "ready": 0, "done": 0})
52 |
53 | # store a handle to receive "ready" from eater
54 | ready = state.when_change("ready", count=NUM_PROCS)
55 |
56 | # start some eater processes
57 | eaters = ctx.spawn(cookie_eater, count=NUM_PROCS)
58 | print(eaters)
59 |
60 | # wait for all to be ready
61 | ready = list(ready)
62 | print("ready:", ready)
63 | assert len(ready) == NUM_PROCS
64 |
65 | # store a handle to receive "done" from eater
66 | done = state.when_change("done", count=NUM_PROCS)
67 |
68 | # make some cookies
69 | for _ in range(NUM_COOKIES):
70 | increment(state, "cookies")
71 |
72 | # wait for all to complete
73 | done = list(done)
74 | print("done:", done)
75 | assert len(done) == NUM_PROCS
76 |
77 | # finally, check if right amount of cookies were consumed
78 | assert state["cookies"] == NUM_COOKIES - NUM_PROCS * NUM_COOKIES
79 |
--------------------------------------------------------------------------------
/examples/luck_test.py:
--------------------------------------------------------------------------------
1 | """
2 | A random sync test, by generating random numbers into the state.
3 |
4 | # Expected output
5 |
6 | num gen: 0.9335641557984383
7 |
8 | ......
9 |
10 | listener: foo is between 0.6 and 0.61, so I awake
11 | num gen:
12 | listener: I set STOP to True
13 | listener: exit
14 | num gen: STOP was set to True, so lets exit
15 | num gen: exit
16 |
17 | """
18 | import random
19 |
20 | import zproc
21 |
22 |
23 | def num_listener(state, low, high):
24 | # blocks until num is between the specified range
25 | state.when(lambda state: low < state.get("num") < high)
26 |
27 | print("listener: foo is between {0} and {1}, so I awake".format(low, high))
28 |
29 | state["STOP"] = True
30 | print("listener: I set STOP to True")
31 |
32 | print("listener: I exit")
33 |
34 |
35 | if __name__ == "__main__":
36 | ctx = zproc.Context() # create a context for us to work with
37 | state = ctx.state
38 |
39 | state.setdefault("num", 0) # set the default value, just to be safe
40 |
41 | # give the context some processes to work with
42 | # also give some props to the num listener
43 | ctx.spawn(num_listener, args=[0.6, 0.601])
44 |
45 | while True:
46 | if state.get("STOP"):
47 | print("num gen: STOP was set to True, so lets exit")
48 | break
49 | else:
50 | num = random.random()
51 | state["num"] = num
52 |
53 | print("num gen:", num)
54 |
55 | print("num gen: I exit")
56 |
--------------------------------------------------------------------------------
/examples/nested_procs.py:
--------------------------------------------------------------------------------
1 | """
2 | Demonstration of how to handle nested processes
3 | """
4 | import zproc
5 |
6 | ctx = zproc.Context(wait=True)
7 | print("level0", ctx.state)
8 |
9 | ctx.state["msg"] = "hello from level0"
10 |
11 |
12 | @ctx._process
13 | def child1(state):
14 | print("level1:", state)
15 | state["msg"] = "hello from level1"
16 |
17 | ctx = zproc.Context(state.address, wait=True)
18 |
19 | @ctx._process
20 | def child2(state):
21 | print("level2:", state)
22 | state["msg"] = "hello from level2"
23 |
24 | ctx = zproc.Context(state.address, wait=True)
25 |
26 | @ctx._process
27 | def child3(state):
28 | print("level3:", state)
29 |
--------------------------------------------------------------------------------
/examples/peanut_processor.py:
--------------------------------------------------------------------------------
1 | """
2 | Demonstrates how live-ness of events works in ZProc.
3 |
4 | Example output:
5 |
6 | Consuming LIVE events:
7 |
8 | PEANUT GEN: 1
9 | live consumer got: 1
10 | PEANUT GEN: 2
11 | PEANUT GEN: 3
12 | live consumer got: 3
13 | PEANUT GEN: 4
14 | PEANUT GEN: 5
15 | live consumer got: 5
16 | PEANUT GEN: 6
17 | PEANUT GEN: 7
18 | live consumer got: 7
19 | PEANUT GEN: 8
20 | PEANUT GEN: 9
21 | PEANUT GEN: 10
22 | live consumer got: 10
23 | PEANUT GEN: 11
24 | PEANUT GEN: 12
25 | live consumer got: 12
26 | PEANUT GEN: 13
27 | PEANUT GEN: 14
28 | live consumer got: 14
29 | PEANUT GEN: 15
30 | PEANUT GEN: 16
31 | live consumer got: 16
32 | PEANUT GEN: 17
33 | PEANUT GEN: 18
34 | live consumer got: 18
35 | PEANUT GEN: 19
36 | PEANUT GEN: 20
37 | live consumer got: 20
38 | PEANUT GEN: 21
39 |
40 | Consuming non-LIVE events:
41 |
42 | non-live consumer got: 1
43 | PEANUT GEN: 22
44 | PEANUT GEN: 23
45 | non-live consumer got: 2
46 | PEANUT GEN: 24
47 | PEANUT GEN: 25
48 | non-live consumer got: 3
49 | PEANUT GEN: 26
50 | PEANUT GEN: 27
51 | non-live consumer got: 4
52 | PEANUT GEN: 28
53 | PEANUT GEN: 29
54 | non-live consumer got: 5
55 | PEANUT GEN: 30
56 | PEANUT GEN: 31
57 | non-live consumer got: 6
58 | PEANUT GEN: 32
59 | PEANUT GEN: 33
60 | non-live consumer got: 7
61 | PEANUT GEN: 34
62 | PEANUT GEN: 35
63 | non-live consumer got: 8
64 | PEANUT GEN: 36
65 | PEANUT GEN: 37
66 | non-live consumer got: 9
67 | PEANUT GEN: 38
68 | PEANUT GEN: 39
69 | non-live consumer got: 10
70 | PEANUT GEN: 40
71 | PEANUT GEN: 41
72 |
73 | Consuming a hybrid of LIVE and non-LIVE events:
74 |
75 | hybrid consumer got: 11
76 | PEANUT GEN: 42
77 | PEANUT GEN: 43
78 | hybrid consumer got: 42
79 | PEANUT GEN: 44
80 | PEANUT GEN: 45
81 | hybrid consumer got: 44
82 | PEANUT GEN: 46
83 | PEANUT GEN: 47
84 | hybrid consumer got: 46
85 | PEANUT GEN: 48
86 | PEANUT GEN: 49
87 | hybrid consumer got: 48
88 | PEANUT GEN: 50
89 | PEANUT GEN: 51
90 | hybrid consumer got: 50
91 | PEANUT GEN: 52
92 | PEANUT GEN: 53
93 | hybrid consumer got: 52
94 | PEANUT GEN: 54
95 | PEANUT GEN: 55
96 | hybrid consumer got: 54
97 | PEANUT GEN: 56
98 | PEANUT GEN: 57
99 | hybrid consumer got: 56
100 | PEANUT GEN: 58
101 | PEANUT GEN: 59
102 | hybrid consumer got: 58
103 | PEANUT GEN: 60
104 | PEANUT GEN: 61
105 | PEANUT GEN: 62
106 | PEANUT GEN: 63
107 | PEANUT GEN: 64
108 | PEANUT GEN: 65
109 | PEANUT GEN: 66
110 | PEANUT GEN: 67
111 | PEANUT GEN: 68
112 | ...
113 | """
114 | from time import sleep
115 |
116 | import zproc
117 |
118 | ctx = zproc.Context()
119 | state = ctx.state
120 |
121 | state["peanuts"] = 0
122 |
123 |
124 | @zproc.atomic
125 | def inc_peanuts(snap):
126 | snap["peanuts"] += 1
127 | print("PEANUT GEN:", snap["peanuts"])
128 |
129 |
130 | @ctx._process
131 | def peanut_gen(state):
132 | while True:
133 | inc_peanuts(state)
134 | sleep(1)
135 |
136 |
137 | print("\nConsuming LIVE events:\n")
138 |
139 | for _ in range(10):
140 | num = state.when_change("peanuts", live=True)
141 | print("live consumer got:", num)
142 |
143 | sleep(2)
144 |
145 | print("\nConsuming non-LIVE events:\n")
146 |
147 | for _ in range(10):
148 | num = state.when_change("peanuts", live=False)
149 | print("non-live consumer got:", num)
150 |
151 | sleep(2)
152 |
153 | print("\nConsuming a hybrid of LIVE and non-LIVE events:\n")
154 |
155 | for _ in range(10):
156 | num = state.when_change("peanuts", live=False)
157 | print("hybrid consumer got:", num)
158 |
159 | state.go_live()
160 |
161 | sleep(2)
162 |
163 | print("Exit")
164 |
--------------------------------------------------------------------------------
/examples/remote_exceptions.py:
--------------------------------------------------------------------------------
1 | from time import sleep
2 |
3 | import zproc
4 |
5 | ctx = zproc.Context()
6 |
7 |
8 | @ctx.spawn(pass_state=False)
9 | def my_process():
10 | sleep(1)
11 |
12 | try:
13 | raise ValueError("hello!")
14 | except Exception as e:
15 | print("encountered:", repr(e))
16 |
17 | # This serializes the current exception and sends it back to parent.
18 | return zproc.RemoteException()
19 |
20 |
21 | sleep(5)
22 |
23 | try:
24 | my_process.wait() # waits for a return value from the process.
25 | except Exception as e:
26 | print("caught it!", repr(e))
27 |
--------------------------------------------------------------------------------
/examples/state_watchers.py:
--------------------------------------------------------------------------------
1 | """
2 | A demonstration of the numerous ways of "watching" the state.
3 |
4 | # Expected output
5 |
6 | .get_when_not_equal('flag1', False) -> None
7 |
8 | main: I set flag1:
9 | .get_when(lambda s: s.get("flag1") is True) -> {'flag1': True}
10 | .get_when_equal('flag1', True) -> True
11 | .get_when_change() -> {'flag1': True}
12 |
13 | main: I set flag2:
14 | .get_when_change("flag1", exclude=True) -> {'flag1': True, 'flag2': True}
15 | .get_when_change("flag2") -> True
16 |
17 | """
18 | from time import sleep
19 |
20 | import zproc
21 |
22 |
23 | def child1(state):
24 | val = state.when(lambda s: s.get("flag1") is True)
25 | print('.get_when(lambda s: s.get("flag1") is True) ->', val)
26 |
27 |
28 | def child2(state):
29 | val = state.when_equal("flag1", True)
30 | print(".get_when_equal('flag1', True) ->", val)
31 |
32 |
33 | def child3(state):
34 | val = state.when_not_equal("flag1", False)
35 | print(".get_when_not_equal('flag1', False) ->", val)
36 |
37 |
38 | def child4(state):
39 | val = state.when_change()
40 | print(".get_when_change() ->", val)
41 |
42 |
43 | def child5(state):
44 | val = state.when_change("flag2")
45 | print('.get_when_change("flag2") ->', val)
46 |
47 |
48 | def child6(state):
49 | val = state.when_change("flag1", exclude=True)
50 | print('.get_when_change("flag1", exclude=True) ->', val)
51 |
52 |
53 | if __name__ == "__main__":
54 | ctx = zproc.Context(wait=True)
55 |
56 | ctx.spawn(child1, child2, child3, child4, child5, child6)
57 |
58 | sleep(1)
59 |
60 | print("\nmain: I set flag1:")
61 | ctx.state["flag1"] = True
62 |
63 | sleep(1)
64 |
65 | print("\nmain: I set flag2:")
66 | ctx.state["flag2"] = True
67 |
--------------------------------------------------------------------------------
/mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | ignore_missing_imports = True
--------------------------------------------------------------------------------
/readthedocs.yml:
--------------------------------------------------------------------------------
1 | build:
2 | image: latest
3 |
4 | python:
5 | version: 3.5
6 | pip_install: true
7 | setup_py_install: true
8 | formats: []
9 | extra_requirements:
10 | - docs
11 |
--------------------------------------------------------------------------------
/requirements.in:
--------------------------------------------------------------------------------
1 | pyzmq
2 | tblib
3 | psutil
4 | cloudpickle
5 | sphinx-rtd-theme
6 | ipython
7 | sphinx
8 | twine
9 | pytest
10 | yappi
11 | pytest-repeat
12 | mypy
13 | -e .
14 | pip-tools
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile
3 | # To update, run:
4 | #
5 | # pip-compile -v requirements.in
6 | #
7 | -e file:///home/dev/Projects/zproc
8 | alabaster==0.7.12 # via sphinx
9 | atomicwrites==1.3.0 # via pytest
10 | attrs==19.1.0 # via pytest
11 | babel==2.6.0 # via sphinx
12 | backcall==0.1.0 # via ipython
13 | bleach==3.1.0 # via readme-renderer
14 | certifi==2019.3.9 # via requests
15 | chardet==3.0.4 # via requests
16 | click==7.0 # via pip-tools
17 | cloudpickle==0.8.0
18 | decorator==4.3.2 # via ipython, traitlets
19 | docutils==0.14 # via readme-renderer, sphinx
20 | idna==2.8 # via requests
21 | imagesize==1.1.0 # via sphinx
22 | ipython-genutils==0.2.0 # via traitlets
23 | ipython==7.3.0
24 | jedi==0.13.3 # via ipython
25 | jinja2==2.10 # via sphinx
26 | markupsafe==1.1.1 # via jinja2
27 | more-itertools==6.0.0 # via pytest
28 | mypy-extensions==0.4.1 # via mypy
29 | mypy==0.670
30 | packaging==19.0 # via sphinx
31 | parso==0.3.4 # via jedi
32 | pexpect==4.6.0 # via ipython
33 | pickleshare==0.7.5 # via ipython
34 | pip-tools==3.4.0
35 | pkginfo==1.5.0.1 # via twine
36 | pluggy==0.9.0 # via pytest
37 | prompt-toolkit==2.0.9 # via ipython
38 | psutil==5.6.1
39 | ptyprocess==0.6.0 # via pexpect
40 | py==1.8.0 # via pytest
41 | pygments==2.3.1 # via ipython, readme-renderer, sphinx
42 | pyparsing==2.3.1 # via packaging
43 | pytest-repeat==0.8.0
44 | pytest==4.3.0
45 | pytz==2018.9 # via babel
46 | pyzmq==18.0.1
47 | readme-renderer==24.0 # via twine
48 | requests-toolbelt==0.9.1 # via twine
49 | requests==2.21.0 # via requests-toolbelt, sphinx, twine
50 | six==1.12.0 # via bleach, packaging, pip-tools, prompt-toolkit, pytest, readme-renderer, sphinx, traitlets
51 | snowballstemmer==1.2.1 # via sphinx
52 | sphinx-rtd-theme==0.4.3
53 | sphinx==1.8.5
54 | sphinxcontrib-websupport==1.1.0 # via sphinx
55 | tblib==1.3.2
56 | tqdm==4.31.1 # via twine
57 | traitlets==4.3.2 # via ipython
58 | twine==1.13.0
59 | typed-ast==1.3.1 # via mypy
60 | urllib3==1.24.2 # via requests
61 | wcwidth==0.1.7 # via prompt-toolkit
62 | webencodings==0.5.1 # via bleach
63 | yappi==1.0
64 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 |
4 | # Note: To use the 'upload' functionality of this file, you must:
5 | # $ pip install twine
6 |
7 | import io
8 | import os
9 | import sys
10 | from shutil import rmtree
11 |
12 | from setuptools import find_packages, Command, setup
13 |
14 | # Package meta-data.
15 | NAME = "zproc"
16 | DESCRIPTION = "ZProc - Multi Processing on steroids"
17 | URL = "https://github.com/devxpy/zproc"
18 | EMAIL = "devxpy@gmail.com"
19 | AUTHOR = "Dev Aggarwal"
20 | REQUIRES_PYTHON = ">=3.6"
21 | VERSION = None
22 |
23 | # What packages are required for this module to be executed?
24 | REQUIRED = ["pyzmq", "tblib", "psutil", "cloudpickle"]
25 |
26 | # What packages are optional?
27 | EXTRA = {
28 | # "docs" dependencies are used by readthedocs (see `readthedocs.yml` file)
29 | "docs": ["sphinx", "twine", "sphinx_rtd_theme"]
30 | }
31 |
32 | # The rest you shouldn't have to touch too much :)
33 | # ------------------------------------------------
34 | # Except, perhaps the License and Trove Classifiers!
35 | # If you do change the License, remember to change the Trove Classifier for that!
36 |
37 | here = os.path.abspath(os.path.dirname(__file__))
38 |
39 | # Import the README and use it as the long-description.
40 | # Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
41 | with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f:
42 | long_description = "\n" + f.read()
43 |
44 | # Load the package's __version__.py module as a dictionary.
45 | about = {}
46 | if not VERSION:
47 | with open(os.path.join(here, NAME, "__version__.py")) as f:
48 | exec(f.read(), about)
49 | else:
50 | about["__version__"] = VERSION
51 |
52 |
53 | class UploadCommand(Command):
54 | """Support setup.py upload."""
55 |
56 | description = "Build and publish the package."
57 | user_options = []
58 |
59 | @staticmethod
60 | def status(s):
61 | """Prints things in bold."""
62 | print("\033[1m{0}\033[0m".format(s))
63 |
64 | def initialize_options(self):
65 | pass
66 |
67 | def finalize_options(self):
68 | pass
69 |
70 | def run(self):
71 | try:
72 | self.status("Removing previous builds…")
73 | rmtree(os.path.join(here, "dist"))
74 | except OSError:
75 | pass
76 |
77 | self.status("Building Source and Wheel (universal) distribution…")
78 | os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
79 |
80 | self.status("Uploading the package to PyPi via Twine…")
81 | os.system("twine upload dist/*")
82 |
83 | self.status("Pushing git tags…")
84 | os.system("git tag v{0}".format(about["__version__"]))
85 | os.system("git push --tags")
86 |
87 | sys.exit()
88 |
89 |
90 | # Where the magic happens:
91 | setup(
92 | name=NAME,
93 | version=about["__version__"],
94 | description=DESCRIPTION,
95 | long_description=long_description,
96 | long_description_content_type="text/markdown",
97 | author=AUTHOR,
98 | author_email=EMAIL,
99 | python_requires=REQUIRES_PYTHON,
100 | url=URL,
101 | packages=find_packages(exclude=("tests", "docs", "examples", "benchmarks")),
102 | install_requires=REQUIRED,
103 | extra_require=EXTRA,
104 | include_package_data=True,
105 | license="MIT",
106 | classifiers=[
107 | # Trove classifiers
108 | # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
109 | "License :: OSI Approved :: MIT License",
110 | "Programming Language :: Python",
111 | "Programming Language :: Python :: 3",
112 | "Programming Language :: Python :: 3.6",
113 | "Programming Language :: Python :: 3.7",
114 | "Programming Language :: Python :: Implementation :: CPython",
115 | ],
116 | # $ setup.py publish support.
117 | cmdclass={"upload": UploadCommand},
118 | )
119 |
--------------------------------------------------------------------------------
/tests/resillience_tests/README.md:
--------------------------------------------------------------------------------
1 | These test are meant to test resillience & how well ZProc behaves under stress conditions.
2 |
3 | These were created to test non-determinstic behavior,
4 | wherever automated testing falls short.
--------------------------------------------------------------------------------
/tests/resillience_tests/nested_process.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import zproc
4 |
5 | ctx = zproc.Context()
6 |
7 | for i in range(250):
8 |
9 | @ctx.spawn
10 | def p1(ctx):
11 | @ctx.spawn
12 | def p2(ctx):
13 | @ctx.spawn
14 | def p3(ctx):
15 | @ctx.spawn
16 | def p4(ctx):
17 | @ctx.spawn(pass_context=False)
18 | def pn():
19 | time.sleep(1)
20 |
21 | print(i)
22 | return i
23 |
24 | assert p1.wait() == i
25 |
--------------------------------------------------------------------------------
/tests/resillience_tests/process_wait.py:
--------------------------------------------------------------------------------
1 | import zproc
2 |
3 | ctx = zproc.Context()
4 |
5 | for i in range(250):
6 |
7 | @ctx.spawn
8 | def my_process(ctx):
9 | assert isinstance(ctx, zproc.Context)
10 | state = ctx.create_state()
11 | assert isinstance(state, zproc.State)
12 | print(i)
13 | return i
14 |
15 | assert my_process.wait() == i
16 |
--------------------------------------------------------------------------------
/tests/resillience_tests/start_server.py:
--------------------------------------------------------------------------------
1 | import zproc
2 |
3 | for i in range(100):
4 | print(zproc.start_server())
5 |
--------------------------------------------------------------------------------
/tests/resillience_tests/state_watchers.py:
--------------------------------------------------------------------------------
1 | import random
2 | import time
3 |
4 | import zproc
5 |
6 | MAX_ITER = 100
7 | SLOW = False
8 |
9 | ctx = zproc.Context()
10 | state = ctx.create_state({"foobar": 0})
11 |
12 |
13 | @zproc.atomic
14 | def inc(snap):
15 | snap["foobar"] += 1
16 |
17 |
18 | @ctx.spawn
19 | def generator(ctx):
20 | state = ctx.create_state()
21 | while True:
22 | inc(state)
23 | if SLOW:
24 | time.sleep(random.random())
25 |
26 |
27 | print("LIVE:")
28 |
29 |
30 | @ctx.spawn
31 | def test_process(ctx):
32 | state = ctx.create_state()
33 |
34 | for snap in state.when_change("foobar", live=True, count=MAX_ITER):
35 | print(snap, end=",", flush=True)
36 |
37 | if SLOW:
38 | time.sleep(random.random())
39 | print()
40 |
41 |
42 | test_process.wait()
43 | print("BUFFERED:")
44 |
45 |
46 | @ctx.spawn
47 | def test_process(ctx):
48 | state = ctx.create_state()
49 |
50 | for snap in state.when_change("foobar", live=False, count=MAX_ITER):
51 | print(snap, end=",", flush=True)
52 |
53 | if SLOW:
54 | time.sleep(random.random())
55 |
56 | print()
57 |
58 |
59 | test_process.wait()
60 |
--------------------------------------------------------------------------------
/tests/resillience_tests/swarm.py:
--------------------------------------------------------------------------------
1 | import zproc
2 |
3 | swarm = zproc.Context().create_swarm()
4 |
5 | fn = lambda x: x
6 |
7 | i = 0
8 | while True:
9 | assert swarm.map(fn, range(10 ** 5)) == list(map(fn, range(10 ** 5)))
10 | print(i)
11 | i += 1
12 |
--------------------------------------------------------------------------------
/tests/test_atomic_contract.py:
--------------------------------------------------------------------------------
1 | import os
2 | import signal
3 | import time
4 |
5 | import pytest
6 |
7 | import zproc
8 |
9 |
10 | @pytest.fixture
11 | def ctx():
12 | return zproc.Context()
13 |
14 |
15 | @pytest.fixture
16 | def state(ctx):
17 | return ctx.create_state()
18 |
19 |
20 | def test_exception_contract(ctx, state):
21 | @zproc.atomic
22 | def mutator(snap):
23 | snap["x"] = 5
24 | raise ValueError
25 |
26 | with pytest.raises(ValueError):
27 | mutator(state)
28 |
29 | assert state == {}
30 |
31 |
32 | def test_signal_contract(ctx, state):
33 | @zproc.atomic
34 | def atomic_fn(snap):
35 | snap["x"] = 5
36 | time.sleep(0.1)
37 |
38 | curpid = os.getpid()
39 |
40 | @ctx.spawn(pass_context=False)
41 | def p():
42 | time.sleep(0.05)
43 | zproc.send_signal(signal.SIGINT, curpid)
44 |
45 | zproc.signal_to_exception(signal.SIGINT)
46 |
47 | with pytest.raises(zproc.SignalException):
48 | atomic_fn(state)
49 |
50 | print(state.copy())
51 | assert state == {"x": 5}
52 |
--------------------------------------------------------------------------------
/tests/test_dict_api.py:
--------------------------------------------------------------------------------
1 | """
2 | Test the dict API, offered by State
3 | """
4 | import pytest
5 |
6 | import zproc
7 |
8 |
9 | @pytest.fixture
10 | def ctx():
11 | return zproc.Context()
12 |
13 |
14 | @pytest.fixture
15 | def pydict() -> dict:
16 | return {"foo": "foo", "bar": "bar"}
17 |
18 |
19 | @pytest.fixture
20 | def state(pydict, ctx) -> zproc.State:
21 | return ctx.create_state(pydict)
22 |
23 |
24 | def test_update(state, pydict):
25 | state.update({"zoo": 1, "dog": 2})
26 | pydict.update({"zoo": 1, "dog": 2})
27 |
28 | assert state == pydict
29 |
30 |
31 | def test__contains__(state, pydict):
32 | assert ("foo" in state) == ("foo" in pydict)
33 | assert ("foo" not in state) == ("foo" not in pydict)
34 |
35 |
36 | def test__delitem__(state, pydict):
37 | del state["foo"]
38 | del pydict["foo"]
39 |
40 | assert state == pydict
41 |
42 |
43 | def test__eq__(state, pydict):
44 | assert (state == {"bar": "bar"}) == (pydict == {"bar": "bar"})
45 |
46 |
47 | def test__getitem__(state, pydict):
48 | assert state["bar"] == pydict["bar"]
49 |
50 |
51 | def test__iter__(state, pydict):
52 | for k1, k2 in zip(state, pydict):
53 | assert k1 == k2
54 |
55 |
56 | def test__len__(state, pydict):
57 | assert len(state) == len(pydict)
58 |
59 |
60 | def test__ne__(state, pydict):
61 | assert (state != {"bar": "bar"}) == (pydict != {"bar": "bar"})
62 |
63 |
64 | def test__setitem__(state, pydict):
65 | state["foo"] = 2
66 | pydict["foo"] = 2
67 | assert state == pydict
68 |
69 |
70 | def test_clear(state, pydict):
71 | state.clear()
72 | pydict.clear()
73 | assert state == pydict
74 |
75 |
76 | def test_dict_inbuilt(state, pydict):
77 | assert dict(state) == dict(pydict)
78 |
79 |
80 | def test_copy(state, pydict):
81 | assert state.copy() == pydict.copy()
82 |
83 |
84 | def test_get(state, pydict):
85 | assert state.get("xxx", []) == pydict.get("xxx", [])
86 | assert state.get("foo") == pydict.get("foo")
87 |
88 |
89 | def test_items(state, pydict):
90 | for i, j in zip(state.items(), pydict.items()):
91 | assert i[0] == j[0] and i[1] == j[1]
92 |
93 |
94 | def test_values(state, pydict):
95 | for i, j in zip(state.values(), pydict.values()):
96 | assert i == j
97 |
98 |
99 | def test_keys(state, pydict):
100 | for i, j in zip(state.keys(), pydict.keys()):
101 | assert i == j
102 |
103 |
104 | def test_setdefault(state, pydict):
105 | state.setdefault("zzz", None)
106 | pydict.setdefault("zzz", None)
107 |
108 | assert state == pydict
109 |
110 |
111 | def test_pop(state, pydict):
112 | assert state.pop("foo") == pydict.pop("foo")
113 | assert state == pydict
114 |
115 |
116 | def test_popitem(state, pydict):
117 | assert state.popitem() == pydict.popitem()
118 | assert state == pydict
119 |
--------------------------------------------------------------------------------
/tests/test_liveness.py:
--------------------------------------------------------------------------------
1 | from time import sleep
2 |
3 | import pytest
4 |
5 | import zproc
6 |
7 |
8 | @pytest.fixture
9 | def state() -> zproc.State:
10 | ctx = zproc.Context()
11 |
12 | @ctx.spawn
13 | def mutator(ctx: zproc.Context):
14 | state = ctx.create_state()
15 |
16 | for n in range(10):
17 | sleep(0.1)
18 | state["counter"] = n
19 |
20 | return ctx.create_state()
21 |
22 |
23 | def test_not_live(state: zproc.State):
24 | it = state.when_change("counter")
25 | sleep(0.25)
26 | assert next(it)["counter"] == 0
27 |
28 |
29 | def test_live(state: zproc.State):
30 | it = state.when_change("counter", live=True)
31 | sleep(0.25)
32 | assert next(it)["counter"] > 0
33 |
--------------------------------------------------------------------------------
/tests/test_maps.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | import zproc
4 |
5 |
6 | @pytest.fixture
7 | def ctx():
8 | return zproc.Context()
9 |
10 |
11 | @pytest.fixture
12 | def swarm(ctx):
13 | return ctx.create_swarm()
14 |
15 |
16 | def test_regular(swarm):
17 | r1 = swarm.map(pow, range(10 ** 5), args=[10])
18 | r2 = map(lambda x: pow(x, 10), range(10 ** 5))
19 |
20 | assert r1 == list(r2)
21 |
22 |
23 | def test_lazy(swarm):
24 | r1 = swarm.map_lazy(pow, range(10 ** 5), args=[10])
25 | r2 = swarm.map_lazy(pow, range(10 ** 5), args=[10])
26 | r3 = map(lambda x: pow(x, 10), range(10 ** 5))
27 |
28 | assert list(r1) == r2.as_list == list(r3)
29 |
30 |
31 | def test_nested_map(ctx):
32 | @ctx.spawn
33 | def p1(ctx: zproc.Context):
34 | swarm = ctx.create_swarm()
35 | return swarm.map(pow, range(100), args=[2])
36 |
37 | assert p1.wait() == list(map(lambda x: pow(x, 2), range(100)))
38 |
39 |
40 | def test_remote_result(ctx):
41 | @ctx.spawn
42 | def p2(ctx: zproc.Context):
43 | swarm = ctx.create_swarm()
44 | result = swarm.map_lazy(pow, range(100), args=[2])
45 | return result.task_id
46 |
47 | result = zproc.SequenceTaskResult(ctx.server_address, p2.wait()).as_list
48 | assert result == list(map(lambda x: pow(x, 2), range(100)))
49 |
--------------------------------------------------------------------------------
/tests/test_namespaces.py:
--------------------------------------------------------------------------------
1 | import zproc
2 |
3 |
4 | def test_namespaces():
5 | state = zproc.Context().create_state()
6 |
7 | state.namespace = "test1"
8 | state["foo"] = 10
9 |
10 | assert state == {"foo": 10}
11 |
12 | state.namespace = "test2"
13 | state["bar"] = 10
14 |
15 | assert state == {"bar": 10}
16 |
17 | state.namespace = "test3"
18 | assert state == {}
19 |
--------------------------------------------------------------------------------
/tests/test_ping.py:
--------------------------------------------------------------------------------
1 | """
2 | Tests the "ping()" API
3 | """
4 | import pytest
5 |
6 | import zproc
7 |
8 |
9 | @pytest.fixture
10 | def ctx():
11 | return zproc.Context()
12 |
13 |
14 | @pytest.fixture
15 | def state(ctx):
16 | return ctx.create_state()
17 |
18 |
19 | def test_ping(ctx, state):
20 | pid = ctx.server_process.pid
21 | assert zproc.ping(ctx.server_address) == pid
22 | assert state.ping() == pid
23 | assert ctx.ping() == pid
24 |
25 |
26 | def test_timeout(ctx, state):
27 | pid = ctx.server_process.pid
28 | assert zproc.ping(ctx.server_address, timeout=0.1) == pid
29 | assert state.ping(timeout=0.1) == pid
30 | assert ctx.ping(timeout=0.1) == pid
31 |
32 |
33 | def test_timeout_error(ctx, state):
34 | with pytest.raises(TimeoutError):
35 | zproc.ping(ctx.server_address, timeout=0)
36 |
37 | with pytest.raises(TimeoutError):
38 | ctx.ping(timeout=0)
39 |
40 | with pytest.raises(TimeoutError):
41 | state.ping(timeout=0)
42 |
43 |
44 | def test_ping_after_close(ctx, state):
45 | ctx.server_process.terminate()
46 |
47 | with pytest.raises(TimeoutError):
48 | zproc.ping(ctx.server_address, timeout=0.1)
49 |
50 | with pytest.raises(TimeoutError):
51 | ctx.ping(timeout=0.1)
52 |
53 | with pytest.raises(TimeoutError):
54 | state.ping(timeout=0.1)
55 |
--------------------------------------------------------------------------------
/tests/test_process_kwargs.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | import zproc
4 |
5 |
6 | @pytest.fixture
7 | def ctx():
8 | return zproc.Context()
9 |
10 |
11 | def test_not_pass_ctx(ctx):
12 | @ctx.spawn(pass_context=False)
13 | def my_process():
14 | return 0
15 |
16 | assert my_process.wait() == 0
17 |
18 |
19 | def test_pass_ctx(ctx):
20 | @ctx.spawn(pass_context=True)
21 | def my_process(ctx):
22 | assert isinstance(ctx, zproc.Context)
23 | return 1
24 |
25 | assert my_process.wait() == 1
26 |
--------------------------------------------------------------------------------
/tests/test_process_wait.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import pytest
4 |
5 | import zproc
6 |
7 | TOLERANCE = 0.1
8 |
9 |
10 | @pytest.fixture
11 | def ctx():
12 | return zproc.Context(pass_context=False)
13 |
14 |
15 | def test_timeout_accuracy(ctx):
16 | @ctx.spawn
17 | def test():
18 | time.sleep(0.5)
19 |
20 | start = time.time()
21 | try:
22 | ctx.wait(0.05)
23 | except TimeoutError:
24 | end = time.time()
25 | else:
26 | raise ValueError("This should have raised TimeoutError!")
27 | diff = end - start
28 |
29 | assert diff == pytest.approx(0.05, TOLERANCE)
30 |
31 |
32 | def test_timeout_accuracy_parallel(ctx):
33 | @ctx.spawn
34 | def test1():
35 | time.sleep(0.5)
36 |
37 | @ctx.spawn
38 | def test2():
39 | time.sleep(1)
40 |
41 | start = time.time()
42 | try:
43 | ctx.wait(0.6)
44 | except TimeoutError:
45 | end = time.time()
46 | else:
47 | raise ValueError("This should have raised TimeoutError!")
48 | diff = end - start
49 |
50 | assert diff == pytest.approx(0.6, TOLERANCE)
51 |
52 |
53 | def test_timeout1(ctx):
54 | @ctx.spawn
55 | def test():
56 | time.sleep(0.5)
57 |
58 | with pytest.raises(TimeoutError):
59 | ctx.wait(0.1)
60 |
61 |
62 | def test_timeout2(ctx):
63 | @ctx.spawn
64 | def test():
65 | time.sleep(0.5)
66 |
67 | with pytest.raises(TimeoutError):
68 | test.wait(0.1)
69 |
70 |
71 | def test_wait_timeout(ctx):
72 | @ctx.spawn
73 | def test1():
74 | time.sleep(0.5)
75 |
76 | @ctx.spawn
77 | def test2():
78 | time.sleep(1)
79 |
80 | # will raise an exc
81 | with pytest.raises(TimeoutError):
82 | ctx.wait(0.6)
83 |
84 |
85 | def test_wait_timeout_dumb(ctx):
86 | @ctx.spawn
87 | def test1():
88 | time.sleep(0.5)
89 |
90 | @ctx.spawn
91 | def test2():
92 | time.sleep(1)
93 |
94 | # won't raise exc
95 | for i in ctx.process_list:
96 | i.wait(0.6)
97 |
98 |
99 | def test_wait_error(ctx):
100 | @ctx.spawn
101 | def test():
102 | raise ValueError
103 |
104 | with pytest.raises(zproc.ProcessWaitError):
105 | test.wait()
106 |
107 | with pytest.raises(zproc.ProcessWaitError):
108 | ctx.wait()
109 |
110 |
111 | def test_exit(ctx):
112 | @ctx.spawn
113 | def test():
114 | raise zproc.ProcessExit(1)
115 |
116 | with pytest.raises(zproc.ProcessWaitError):
117 | test.wait()
118 |
119 | assert test.exitcode == 1
120 |
121 | with pytest.raises(zproc.ProcessWaitError):
122 | ctx.wait()
123 |
--------------------------------------------------------------------------------
/tests/test_retries.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | import zproc
4 |
5 |
6 | @pytest.fixture
7 | def ctx():
8 | return zproc.Context()
9 |
10 |
11 | @pytest.fixture
12 | def state(ctx):
13 | return ctx.create_state({"times": 0})
14 |
15 |
16 | def test_retry(ctx, state):
17 | @ctx.spawn(retry_for=[ValueError], max_retries=5, retry_delay=0)
18 | def p(ctx):
19 | state = ctx.create_state()
20 | try:
21 | raise ValueError
22 | finally:
23 | state["times"] += 1
24 |
25 | with pytest.raises(zproc.ProcessWaitError):
26 | p.wait()
27 | assert state["times"] == 6
28 |
29 |
30 | def test_infinite_retry(ctx, state):
31 | @ctx.spawn(retry_for=[ValueError], max_retries=None, retry_delay=0.005)
32 | def p(ctx):
33 | state = ctx.create_state()
34 | try:
35 | raise ValueError
36 | finally:
37 | # it's okay to do this, since no other parallel processes are runing
38 | state["times"] += 1
39 |
40 | with pytest.raises(TimeoutError):
41 | p.wait(timeout=0.1)
42 | p.stop()
43 | assert 10 <= state["times"] <= 20
44 |
--------------------------------------------------------------------------------
/tests/test_server_address.py:
--------------------------------------------------------------------------------
1 | """
2 | Tests the re-creation of object, using the UUID feature
3 | """
4 | import random
5 |
6 | import pytest
7 |
8 | import zproc
9 |
10 | TEST_VALUE = {"foo": 42}
11 |
12 |
13 | def test_random_addr():
14 | ctx = zproc.Context()
15 | state = ctx.create_state(TEST_VALUE)
16 |
17 | ctx = zproc.Context(ctx.server_address, start_server=False)
18 | assert state == TEST_VALUE
19 |
20 | state = zproc.State(ctx.server_address)
21 | assert state == TEST_VALUE
22 |
23 |
24 | def test_static_addr():
25 | addr = "tcp://127.0.0.1:%d" % random.randint(20000, 50000)
26 |
27 | ctx = zproc.Context(addr)
28 | state = ctx.create_state(TEST_VALUE)
29 |
30 | assert state == TEST_VALUE
31 |
32 | state = zproc.State(addr)
33 | assert state == TEST_VALUE
34 |
35 |
36 | def test_start_server():
37 | _, addr = zproc.start_server()
38 |
39 | ctx = zproc.Context(addr, start_server=False)
40 | state = ctx.create_state(TEST_VALUE)
41 |
42 | ctx = zproc.Context(addr, start_server=False)
43 | assert state == TEST_VALUE
44 |
45 | state = zproc.State(ctx.server_address)
46 | assert state == TEST_VALUE
47 |
48 |
49 | def test_not_start_server():
50 | with pytest.raises(AssertionError):
51 | zproc.Context(start_server=False)
52 |
--------------------------------------------------------------------------------
/tests/test_state_watchers.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | import pytest
4 |
5 | import zproc
6 |
7 |
8 | @pytest.fixture
9 | def state():
10 | ctx = zproc.Context()
11 |
12 | @ctx.spawn()
13 | def updater(ctx):
14 | state = ctx.create_state()
15 |
16 | state["none"] = None
17 | state["flag"] = False
18 | time.sleep(0.1)
19 | state["avail"] = True
20 | state["flag"] = True
21 | state["none"] = True
22 |
23 | return ctx.create_state()
24 |
25 |
26 | ###
27 |
28 |
29 | def test_when_change(state):
30 | it = state.when_change()
31 | assert isinstance(next(it), dict)
32 |
33 |
34 | ###
35 |
36 |
37 | def test_when(state):
38 | it = state.when(lambda s: s.get("flag") is True)
39 | assert next(it)["flag"] is True
40 |
41 |
42 | ###
43 |
44 |
45 | def test_when_equal(state):
46 | it = state.when_equal("flag", True)
47 | assert next(it)["flag"]
48 |
49 |
50 | ###
51 |
52 |
53 | def test_when_not_equal(state):
54 | it = state.when_not_equal("flag", False)
55 | assert next(it).get("flag") is not False
56 |
57 |
58 | ###
59 |
60 |
61 | def test_when_none(state):
62 | it = state.when_none("none")
63 | assert next(it).get("none") is None
64 |
65 |
66 | ###
67 |
68 |
69 | def test_when_not_none(state):
70 | it = state.when_not_none("none")
71 | assert next(it)["none"] is not None
72 |
73 |
74 | ###
75 |
76 |
77 | def test_when_avail(state):
78 | it = state.when_available("avail")
79 | assert "avail" in next(it)
80 |
--------------------------------------------------------------------------------
/zproc/__init__.py:
--------------------------------------------------------------------------------
1 | from .__version__ import __version__
2 | from .context import Context
3 | from .exceptions import (
4 | ProcessWaitError,
5 | RemoteException,
6 | SignalException,
7 | ProcessExit,
8 | signal_to_exception,
9 | exception_to_signal,
10 | send_signal,
11 | )
12 | from .process import Process
13 | from .server.tools import start_server, ping
14 | from .state.state import State, atomic
15 | from .task.result import SequenceTaskResult, SimpleTaskResult
16 | from .task.swarm import Swarm
17 | from .util import clean_process_tree, consume, create_ipc_address
18 |
19 |
20 | @atomic
21 | def increase(state, key, step=1):
22 | state[key] += step
23 |
24 |
25 | @atomic
26 | def decrease(state, key, step=1):
27 | state[key] -= step
28 |
29 |
30 | @atomic
31 | def append(state, key, item):
32 | state[key].append(item)
33 |
--------------------------------------------------------------------------------
/zproc/__version__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.9.5"
2 |
--------------------------------------------------------------------------------
/zproc/child.py:
--------------------------------------------------------------------------------
1 | import os
2 | import textwrap
3 | import time
4 | import traceback
5 | from functools import wraps
6 |
7 | import zmq
8 |
9 | from zproc import exceptions, util, serializer
10 |
11 |
12 | class ChildProcess:
13 | exitcode = 0
14 | retries = 0
15 |
16 | def __init__(self, **kwargs):
17 | self.kwargs = kwargs
18 |
19 | self.pass_context = self.kwargs["pass_context"]
20 | self.max_retries = self.kwargs["max_retries"]
21 | self.retry_delay = self.kwargs["retry_delay"]
22 | self.retry_args = self.kwargs["retry_args"]
23 | self.retry_kwargs = self.kwargs["retry_kwargs"]
24 | self.to_catch = tuple(util.to_catchable_exc(self.kwargs["retry_for"]))
25 | self.target = self.kwargs["target"]
26 |
27 | self.target_args = self.kwargs["target_args"]
28 | self.target_kwargs = self.kwargs["target_kwargs"]
29 |
30 | self.basic_info = "target: %s\npid: %r\nppid: %r" % (
31 | util.callable_repr(self.target),
32 | os.getpid(),
33 | os.getppid(),
34 | )
35 |
36 | self.main()
37 |
38 | def _handle_exc(self, e: Exception, *, handle_retry: bool = False):
39 | retry_info = ""
40 | if handle_retry:
41 | if self.max_retries is not None and self.retries > self.max_retries:
42 | raise e
43 |
44 | retry_info = "Tried - %r time(s)\n" % self.retries
45 | if self.max_retries is not None and self.retries >= self.max_retries:
46 | retry_info += "*Max retries reached*\n"
47 | if isinstance(e, exceptions.SignalException):
48 | exceptions.exception_to_signal(e)
49 | else:
50 | retry_info += "Next retry in - %r sec\n" % self.retry_delay
51 |
52 | report = "\n".join((self.basic_info, retry_info, traceback.format_exc()))
53 | report = textwrap.indent(report, " " * 2)
54 | print("\n[ZProc] Crash report:\n" + report)
55 |
56 | if handle_retry:
57 | time.sleep(self.retry_delay)
58 |
59 | def main(self):
60 | @wraps(self.target)
61 | def target_wrapper(*args, **kwargs):
62 | while True:
63 | self.retries += 1
64 | try:
65 | return self.target(*args, **kwargs)
66 | except exceptions.ProcessExit as e:
67 | self.exitcode = e.exitcode
68 | return None
69 | except self.to_catch as e:
70 | self._handle_exc(e, handle_retry=True)
71 |
72 | if self.retry_args is not None:
73 | self.target_args = self.retry_args
74 | if self.retry_kwargs is not None:
75 | self.target_kwargs = self.retry_kwargs
76 |
77 | try:
78 | if self.pass_context:
79 | from .context import Context # this helps avoid a circular import
80 |
81 | return_value = target_wrapper(
82 | Context(
83 | self.kwargs["server_address"],
84 | namespace=self.kwargs["namespace"],
85 | start_server=False,
86 | ),
87 | *self.target_args,
88 | **self.target_kwargs
89 | )
90 | else:
91 | return_value = target_wrapper(*self.target_args, **self.target_kwargs)
92 | # print(return_value)
93 | with util.create_zmq_ctx(linger=True) as zmq_ctx:
94 | with zmq_ctx.socket(zmq.PAIR) as result_sock:
95 | result_sock.connect(self.kwargs["result_address"])
96 | result_sock.send(serializer.dumps(return_value))
97 | except Exception as e:
98 | self._handle_exc(e)
99 | finally:
100 | util.clean_process_tree(self.exitcode)
101 |
--------------------------------------------------------------------------------
/zproc/consts.py:
--------------------------------------------------------------------------------
1 | import struct
2 | from typing import NamedTuple
3 |
4 | TASK_NONCE_LENGTH = ZMQ_IDENTITY_LENGTH = 5
5 |
6 | TASK_INFO_FMT = ">III"
7 | TASK_ID_LENGTH = TASK_NONCE_LENGTH + struct.calcsize(TASK_INFO_FMT)
8 |
9 | CHUNK_INFO_FMT = ">i"
10 | CHUNK_ID_LENGTH = TASK_ID_LENGTH + struct.calcsize(CHUNK_INFO_FMT)
11 |
12 | DEFAULT_ZMQ_RECVTIMEO = -1
13 | DEFAULT_NAMESPACE = "default"
14 |
15 |
16 | EMPTY_MULTIPART = [b""]
17 |
18 |
19 | class Msgs:
20 | cmd = 0
21 | info = 1
22 | namespace = 2
23 | args = 3
24 | kwargs = 4
25 |
26 |
27 | class Cmds:
28 | ping = 0
29 | get_server_meta = 1
30 |
31 | get_state = 2
32 | set_state = 3
33 |
34 | run_fn_atomically = 4
35 | run_dict_method = 5
36 |
37 | time = 6
38 |
39 |
40 | class ServerMeta(NamedTuple):
41 | version: str
42 |
43 | state_router: str
44 | watcher_router: str
45 |
46 | task_router: str
47 | task_result_pull: str
48 |
49 | task_proxy_in: str
50 | task_proxy_out: str
51 |
52 |
53 | class StateUpdate(NamedTuple):
54 | before: dict
55 | after: dict
56 | timestamp: float
57 | is_identical: bool
58 |
--------------------------------------------------------------------------------
/zproc/context.py:
--------------------------------------------------------------------------------
1 | import atexit
2 | import multiprocessing
3 | import pprint
4 | import signal
5 | import time
6 | from contextlib import suppress
7 | from typing import Callable, Union, Any, List, Mapping, Sequence, Tuple, cast
8 |
9 | from . import util
10 | from .consts import DEFAULT_NAMESPACE
11 | from .process import Process
12 | from .server import tools
13 | from .state.state import State
14 | from .task.map_plus import map_plus
15 | from .task.swarm import Swarm
16 |
17 |
18 | class ProcessList(list):
19 | def __str__(self):
20 | return ProcessList.__qualname__ + ": " + pprint.pformat(list(self))
21 |
22 | def __repr__(self):
23 | return "<" + self.__str__() + ">"
24 |
25 | @staticmethod
26 | def _wait_or_catch_exc(
27 | process: Process, timeout: Union[int, float] = None
28 | ) -> Union[Exception, Any]:
29 | try:
30 | return process.wait(timeout)
31 | except Exception as e:
32 | return e
33 |
34 | def wait(
35 | self, timeout: Union[int, float] = None, safe: bool = False
36 | ) -> List[Union[Any, Exception]]:
37 | """
38 | Call :py:meth:`~Process.wait()` on all the Processes in this list.
39 |
40 | :param timeout:
41 | Same as :py:meth:`~Process.wait()`.
42 |
43 | This parameter controls the timeout for all the Processes combined,
44 | not a single :py:meth:`~Process.wait()` call.
45 | :param safe:
46 | Suppress any errors that occur while waiting for a Process.
47 |
48 | The return value of failed :py:meth:`~Process.wait()` calls are substituted with the ``Exception`` that occurred.
49 | :return:
50 | A ``list`` containing the values returned by child Processes of this Context.
51 | """
52 | if safe:
53 | _wait = self._wait_or_catch_exc
54 | else:
55 | _wait = Process.wait
56 |
57 | if timeout is None:
58 | return [_wait(process) for process in self]
59 | else:
60 | final = time.time() + timeout
61 | return [_wait(process, final - time.time()) for process in self]
62 |
63 | def start(self):
64 | """
65 | Call :py:meth:`~Process.start()` on all the child processes of this Context
66 |
67 | Ignores if a Process is already started, unlike :py:meth:`~Process.start()`,
68 | which throws an ``AssertionError``.
69 | """
70 | with suppress(AssertionError):
71 | for process in self:
72 | process.start()
73 |
74 | def stop(self):
75 | """
76 | Call :py:meth:`~Process.stop()` on all the Processes in this list.
77 |
78 | Retains the same order as ``Context.process_list``.
79 |
80 | :return:
81 | A ``list`` containing the exitcodes of the child Processes of this Context.
82 | """
83 | return [proc.stop() for proc in self]
84 |
85 |
86 | class Context:
87 | #: The :py:class:`multiprocessing.Process` object for the server.
88 | server_process: multiprocessing.Process
89 |
90 | def __init__(
91 | self,
92 | server_address: str = None,
93 | *,
94 | start_server: bool = True,
95 | backend: Callable = multiprocessing.Process,
96 | wait: bool = False,
97 | cleanup: bool = True,
98 | namespace: str = DEFAULT_NAMESPACE,
99 | **process_kwargs
100 | ) -> None:
101 | r"""
102 | Provides a high level interface to :py:class:`State` and :py:class:`Process`.
103 |
104 | Primarily used to manage and launch processes.
105 |
106 | All processes launched using a Context, share the same state.
107 |
108 | Don't share a Context object between Processes / Threads.
109 | A Context object is not thread-safe.
110 |
111 | :param server_address:
112 | The address of the server.
113 |
114 | If this is set to ``None``, a random address will be generated.
115 | :param start_server:
116 | Whether to start the ZProc server.
117 | It is started automatically by default.
118 |
119 | If this is set to ``None``, then you must either -
120 |
121 | - Start a server using a different Context object.
122 | - Start one manually, using :py:func:`start_server`.
123 |
124 | In both cases,
125 | it the user's responsibility to make sure that the ``server_address`` argument
126 | is satisfied.
127 |
128 | .. note::
129 |
130 | If the server is not started before-hand,
131 | the Context object will block infinitely, waiting for the server to respond.
132 |
133 | In case you want to play around,
134 | the :py:func:`ping` function is handy,
135 | since it let's you *detect* the presence of a server at a given address.
136 | :param backend:
137 | .. include:: /api/snippets/backend.rst
138 | :param wait:
139 | Wait for all running process to finish their work before exiting.
140 |
141 | Alternative to manually calling :py:meth:`~Context.wait` at exit.
142 | :param cleanup:
143 | Whether to cleanup the process tree before exiting.
144 |
145 | Registers a signal handler for ``SIGTERM``, and an ``atexit`` handler.
146 | :param \*\*process_kwargs:
147 | Keyword arguments that :py:class:`~Process` takes,
148 | except ``server_address`` and ``target``.
149 |
150 | If provided,
151 | these will be used while creating processes using this Context.
152 | """
153 | #: A :py:class:`ProcessList` object containing all Processes created under this Context.
154 | self.process_list = ProcessList()
155 | #: Passed on from the constructor. This is read-only.
156 | self.backend = backend
157 | #: Passed on from the constructor. This is read-only.
158 | self.namespace = namespace
159 | #: Passed on from the constructor.
160 | self.process_kwargs = process_kwargs
161 |
162 | self.process_kwargs.setdefault("namespace", self.namespace)
163 | self.process_kwargs.setdefault("backend", self.backend)
164 |
165 | self.server_address = cast(str, server_address)
166 | """The server's address.
167 |
168 | This holds the address this Context is connected to,
169 | not necessarily the value provided in the constructor.
170 |
171 | This is read-only."""
172 |
173 | if start_server:
174 | self.start_server()
175 |
176 | assert self.server_address is not None, (
177 | "Couldn't determine the server address. "
178 | "Hint: Either provide the `server_address` parameter, "
179 | "or pass `start_server=True`."
180 | )
181 |
182 | # register cleanup before wait, so that wait runs before cleanup.
183 | # (order of execution is reversed)
184 | if cleanup:
185 | atexit.register(util.clean_process_tree)
186 | if util.is_main_thread():
187 | signal.signal(signal.SIGTERM, util.clean_process_tree)
188 | if wait:
189 | atexit.register(self.wait)
190 |
191 | def __str__(self):
192 | return "%s - server: %r at %#x" % (
193 | self.__class__.__qualname__,
194 | self.server_address,
195 | id(self),
196 | )
197 |
198 | def __repr__(self):
199 | return util.enclose_in_brackets(self.__str__())
200 |
201 | def create_state(self, value: dict = None, *, namespace: str = None):
202 | """
203 | Creates a new :py:class:`State` object, sharing the same zproc server as this Context.
204 |
205 | :param value:
206 | If provided, call ``state.update(value)``.
207 | :param namespace:
208 | Use this as the namespace for the :py:class:`State` object,
209 | instead of this :py:class:`Context`\ 's namespace.
210 | :return:
211 | A :py:class:`State` object.
212 | """
213 | if namespace is None:
214 | namespace = self.namespace
215 | state = State(self.server_address, namespace=namespace)
216 | if value is not None:
217 | state.update(value)
218 | return state
219 |
220 | def create_swarm(self, count: int = None):
221 | swarm = Swarm(self.server_address, namespace=self.namespace)
222 | swarm.start(count)
223 | return swarm
224 |
225 | def start_server(self) -> Tuple[multiprocessing.Process, str]:
226 | out = tools.start_server(self.server_address, backend=self.backend)
227 | self.server_process, self.server_address = out
228 | return out
229 |
230 | def _process(
231 | self, target: Callable = None, **process_kwargs
232 | ) -> Union[Process, Callable]:
233 | r"""
234 | Produce a child process bound to this context.
235 |
236 | Can be used both as a function and decorator:
237 |
238 | .. code-block:: python
239 | :caption: Usage
240 |
241 | @zproc.process(pass_context=True) # you may pass some arguments here
242 | def p1(ctx):
243 | print('hello', ctx)
244 |
245 |
246 | @zproc.process # or not...
247 | def p2(state):
248 | print('hello', state)
249 |
250 |
251 | def p3(state):
252 | print('hello', state)
253 |
254 | zproc.process(p3) # or just use as a good ol' function
255 |
256 | :param target:
257 | Passed on to the :py:class:`Process` constructor.
258 |
259 | *Must be omitted when using this as a decorator.*
260 |
261 | :param \*\*process_kwargs:
262 | .. include:: /api/context/params/process_kwargs.rst
263 |
264 | :return: The :py:class:`Process` instance produced.
265 | """
266 | process = Process(
267 | self.server_address, target, **{**self.process_kwargs, **process_kwargs}
268 | )
269 | self.process_list.append(process)
270 | return process
271 |
272 | def spawn(self, *targets: Callable, count: int = 1, **process_kwargs):
273 | r"""
274 | Produce one or many child process(s) bound to this context.
275 |
276 | :param \*targets:
277 | Passed on to the :py:class:`Process` constructor, one at a time.
278 |
279 | :param count:
280 | The number of processes to spawn for each item in ``targets``.
281 |
282 | :param \*\*process_kwargs:
283 | .. include:: /api/context/params/process_kwargs.rst
284 |
285 | :return:
286 | A ``ProcessList`` of the :py:class:`Process` instance(s) produced.
287 | """
288 |
289 | if not targets:
290 |
291 | def wrapper(target: Callable):
292 | return self.spawn(target, count=count, **process_kwargs)
293 |
294 | return wrapper
295 |
296 | if len(targets) * count == 1:
297 | return self._process(targets[0], **process_kwargs)
298 |
299 | return ProcessList(
300 | self._process(target, **process_kwargs)
301 | for _ in range(count)
302 | for target in targets
303 | )
304 |
305 | def spawn_map(
306 | self,
307 | target: Callable,
308 | map_iter: Sequence[Any] = None,
309 | *,
310 | map_args: Sequence[Sequence[Any]] = None,
311 | args: Sequence = None,
312 | map_kwargs: Sequence[Mapping[str, Any]] = None,
313 | kwargs: Mapping = None,
314 | **process_kwargs
315 | ):
316 | return ProcessList(
317 | map_plus(
318 | lambda *args, **kwargs: self._process(
319 | target, args=args, kwargs=kwargs, **process_kwargs
320 | ),
321 | map_iter,
322 | map_args,
323 | args,
324 | map_kwargs,
325 | kwargs,
326 | )
327 | )
328 |
329 | def wait(
330 | self, timeout: Union[int, float] = None, safe: bool = False
331 | ) -> List[Union[Any, Exception]]:
332 | """
333 | alias for :py:meth:`ProcessList.wait()`
334 | """
335 | return self.process_list.wait(timeout, safe)
336 |
337 | def start_all(self):
338 | """
339 | alias for :py:meth:`ProcessList.start_all()`
340 | """
341 | return self.process_list.start()
342 |
343 | def stop_all(self):
344 | """
345 | alias for :py:meth:`ProcessList.stop_all()`
346 | """
347 | return self.process_list.stop()
348 |
349 | def ping(self, **kwargs):
350 | r"""
351 | Ping the zproc server.
352 |
353 | :param \*\*kwargs: Keyword arguments that :py:func:`ping` takes, except ``server_address``.
354 | :return: Same as :py:func:`ping`
355 | """
356 | return tools.ping(self.server_address, **kwargs)
357 |
--------------------------------------------------------------------------------
/zproc/exceptions.py:
--------------------------------------------------------------------------------
1 | import os
2 | import signal
3 | import sys
4 | from typing import Union
5 |
6 | from tblib.pickling_support import unpickle_traceback, pickle_traceback
7 |
8 | from zproc import util
9 |
10 |
11 | class ProcessWaitError(Exception):
12 | def __init__(self, message, exitcode, process=None) -> None:
13 | self.exitcode = exitcode
14 | self.process = process
15 | self.message = message
16 |
17 | def __str__(self):
18 | return self.message
19 |
20 |
21 | class RemoteException(Exception):
22 | def __init__(self, exc_info=None):
23 | self.exc_info = exc_info
24 | if self.exc_info is None:
25 | self.exc_info = sys.exc_info()
26 |
27 | @classmethod
28 | def _from_pickled_state(cls, exc_type, value, pickled_tb):
29 | return cls([exc_type, value, unpickle_traceback(*pickled_tb)])
30 |
31 | def __reduce__(self):
32 | return (
33 | RemoteException._from_pickled_state,
34 | (self.exc_info[0], self.exc_info[1], pickle_traceback(self.exc_info[2])[1]),
35 | )
36 |
37 | def reraise(self):
38 | raise self.exc_info[0].with_traceback(self.exc_info[1], self.exc_info[2])
39 |
40 | def __str__(self):
41 | return self.__class__.__qualname__ + ": " + str(self.exc_info)
42 |
43 | def __repr__(self):
44 | return util.enclose_in_brackets(self.__str__())
45 |
46 |
47 | class SignalException(Exception):
48 | def __init__(self, signum: int, frame=None):
49 | self.signum = signum
50 | self.frame = frame
51 |
52 | def __str__(self):
53 | return "SignalException: Received signal - %r." % self.signum
54 |
55 | def __repr__(self):
56 | return util.enclose_in_brackets(self.__str__())
57 |
58 |
59 | def _sig_exc_handler(sig, frame):
60 | raise SignalException(sig, frame)
61 |
62 |
63 | def signal_to_exception(sig: signal.Signals) -> SignalException:
64 | """
65 | Convert a ``signal.Signals`` to a ``SignalException``.
66 |
67 | This allows for natural, pythonic signal handing with the use of try-except blocks.
68 |
69 | .. code-block:: python
70 |
71 | import signal
72 | import zproc
73 |
74 | zproc.signal_to_exception(signals.SIGTERM)
75 | try:
76 | ...
77 | except zproc.SignalException as e:
78 | print("encountered:", e)
79 | finally:
80 | zproc.exception_to_signal(signals.SIGTERM)
81 | """
82 | signal.signal(sig, _sig_exc_handler)
83 | return SignalException(sig)
84 |
85 |
86 | def exception_to_signal(sig: Union[SignalException, signal.Signals]):
87 | """
88 | Rollback any changes done by :py:func:`signal_to_exception`.
89 | """
90 | if isinstance(sig, SignalException):
91 | signum = sig.signum
92 | else:
93 | signum = sig.value
94 | signal.signal(signum, signal.SIG_DFL)
95 |
96 |
97 | def send_signal(sig: Union[SignalException, signal.Signals], pid: int = None):
98 | if pid is None:
99 | pid = os.getpid()
100 | if isinstance(sig, SignalException):
101 | signum = sig.signum
102 | else:
103 | signum = sig.value
104 | return os.kill(pid, signum)
105 |
106 |
107 | class ProcessExit(Exception):
108 | def __init__(self, exitcode=0):
109 | """
110 | Indicates that a Process should exit.
111 |
112 | When raised inside a :py:class:`Process`,
113 | it will cause the Process to shut down and exit with ``exitcode``.
114 |
115 | This is preferable to ``os._exit()``,
116 | as it signals ZProc to properly cleanup resources.
117 |
118 | Also useful for the ``Context.call_when_*`` decorators,
119 | since they run in a never-ending infinite loop.
120 | (making this the _right_ way to stop them from within the Process).
121 | """
122 | self.exitcode = exitcode
123 |
--------------------------------------------------------------------------------
/zproc/process.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import os
3 | import signal
4 | import time
5 | from typing import Callable, Union, Sequence, Mapping, Optional, Iterable, Type
6 |
7 | import zmq
8 |
9 | from zproc import util, exceptions, serializer
10 | from zproc.child import ChildProcess
11 | from zproc.consts import DEFAULT_NAMESPACE
12 |
13 |
14 | class Process:
15 | _result = None # type:None
16 | _has_returned = False
17 |
18 | def __init__(
19 | self,
20 | server_address: str,
21 | target: Callable,
22 | *,
23 | args: Sequence = None,
24 | kwargs: Mapping = None,
25 | start: bool = True,
26 | pass_context: bool = True,
27 | retry_for: Iterable[Union[signal.Signals, Type[BaseException]]] = (),
28 | retry_delay: Union[int, float] = 5,
29 | max_retries: Optional[int] = 0,
30 | retry_args: tuple = None,
31 | retry_kwargs: dict = None,
32 | backend: Callable = multiprocessing.Process,
33 | namespace: str = DEFAULT_NAMESPACE,
34 | ) -> None:
35 | """
36 | Provides a higher level interface to :py:class:`multiprocessing.Process`.
37 |
38 | Please don't share a Process object between Processes / Threads.
39 | A Process object is not thread-safe.
40 |
41 | :param server_address:
42 | .. include:: /api/snippets/server_address.rst
43 |
44 | If you are using a :py:class:`Context`, then this is automatically provided.
45 | :param target:
46 | The Callable to be invoked inside a new process.
47 |
48 | *The* ``target`` *is invoked with the following signature:*
49 |
50 | .. code-block:: python
51 |
52 | target(state, *args, **kwargs)
53 |
54 | *Where:*
55 |
56 | - ``state`` is a :py:class:`State` instance.
57 | - ``args`` and ``kwargs`` are passed from the constructor.
58 | :param args:
59 | The argument tuple for ``target``.
60 |
61 | By default, it is an empty ``tuple``.
62 | :param kwargs:
63 | A dictionary of keyword arguments for ``target``.
64 |
65 | By default, it is an empty ``dict``.
66 | :param pass_state:
67 | Weather this process needs to access the state.
68 |
69 | If this is set to ``False``,
70 | then the ``state`` argument won't be provided to the ``target``.
71 |
72 | *If this is enabled, the* ``target`` *is invoked with the following signature:*
73 |
74 | .. code-block:: python
75 |
76 | target(*args, **kwargs)
77 |
78 | *Where:*
79 |
80 | - ``args`` and ``kwargs`` are passed from the constructor.
81 |
82 | Has no effect if ``pass_context`` is set to ``True``.
83 | :param pass_context:
84 | Weather to pass a :py:class:`Context` to this process.
85 |
86 | If this is set to ``True``,
87 | then the first argument to ``target`` will be a new :py:class:`Context` object
88 |
89 | This will take the place of the default - :py:class:`State`.
90 |
91 | *If this is enabled, the* ``target`` *is invoked with the following signature*:
92 |
93 | .. code-block:: python
94 |
95 | target(ctx, *args, **kwargs)
96 |
97 | *Where:*
98 |
99 | - ``ctx`` is a :py:class:`Context` object.
100 | - ``args`` and ``kwargs`` are passed from the constructor.
101 |
102 | .. note::
103 | The :py:class:`Context` object provided here,
104 | will be a new object, NOT the one used to create this process.
105 |
106 | Such that,
107 | this new :py:class:`Context` can be used to spwan new processes,
108 | that share the same state.
109 |
110 | **This is the recommended way to create nested processes
111 | that share the same state.**
112 |
113 |
114 | :param start:
115 | Automatically call :py:meth:`.start()` on the process.
116 | :param retry_for:
117 | Retry only when one of these ``Exception``/``signal.Signals`` is raised.
118 |
119 | .. code-block:: python
120 | :caption: Example
121 |
122 | import signal
123 |
124 | # retry if a ConnectionError, ValueError or signal.SIGTERM is received.
125 | ctx.spawn(
126 | my_process,
127 | retry_for=(ConnectionError, ValueError, signal.SIGTERM)
128 | )
129 |
130 | To retry for *any* Exception - ``retry_for=(Exception, )``
131 |
132 | The items of this sequence MUST be a subclass of ``BaseException`` or of type ``signal.Signals``.
133 | :param retry_delay:
134 | The delay in seconds, before retrying.
135 | :param max_retries:
136 | Maximum number of retries before giving up.
137 | If set to ``None``, the Process will never stop retrying.
138 |
139 | After "max_tries", any Exception / Signal will exhibit default behavior.
140 | :param retry_args:
141 | Used in place of ``args`` when retrying.
142 |
143 | If set to ``None``, then it has no effect.
144 | :param retry_kwargs:
145 | Used in place of ``kwargs`` when retrying.
146 |
147 | If set to ``None``, then it has no effect.
148 | :param backend:
149 | .. include:: /api/snippets/backend.rst
150 | """
151 | #: Passed on from the constructor.
152 | self.server_address = server_address
153 | #: Passed on from the constructor.
154 | self.namespace = namespace
155 | #: Passed on from the constructor.
156 | self.target = target
157 |
158 | if args is None:
159 | args = ()
160 | if kwargs is None:
161 | kwargs = {}
162 |
163 | self._zmq_ctx = util.create_zmq_ctx()
164 |
165 | self._result_sock = self._zmq_ctx.socket(zmq.PAIR)
166 | # The result socket is meant to be used only after the process completes (after `join()`).
167 | # That implies -- we shouldn't need to wait for the result message.
168 | self._result_sock.setsockopt(zmq.RCVTIMEO, 0)
169 | result_address = util.bind_to_random_address(self._result_sock)
170 | #: The :py:class:`multiprocessing.Process` instance for the child process.
171 | self.child = backend(
172 | target=ChildProcess,
173 | kwargs=dict(
174 | target=self.target,
175 | server_address=self.server_address,
176 | namespace=self.namespace,
177 | pass_context=pass_context,
178 | target_args=args,
179 | target_kwargs=kwargs,
180 | retry_for=retry_for,
181 | retry_delay=retry_delay,
182 | max_retries=max_retries,
183 | retry_args=retry_args,
184 | retry_kwargs=retry_kwargs,
185 | result_address=result_address,
186 | ),
187 | )
188 | if start:
189 | self.child.start()
190 |
191 | def __str__(self):
192 | try:
193 | pid = self.pid
194 | except AttributeError:
195 | pid = None
196 | try:
197 | exitcode = self.exitcode
198 | except AttributeError:
199 | exitcode = None
200 | try:
201 | is_alive = self.is_alive
202 | except AttributeError:
203 | is_alive = False
204 |
205 | return "%s - pid: %r target: %s ppid: %r is_alive: %r exitcode: %r" % (
206 | Process.__qualname__,
207 | pid,
208 | util.callable_repr(self.target),
209 | os.getpid(),
210 | is_alive,
211 | exitcode,
212 | )
213 |
214 | def __repr__(self):
215 | return util.enclose_in_brackets(self.__str__())
216 |
217 | def start(self):
218 | """
219 | Start this Process
220 |
221 | If the child has already been started once, it will return with an :py:exc:`AssertionError`.
222 |
223 | :return: the process PID
224 | """
225 | self.child.start()
226 | return self.pid
227 |
228 | def _cleanup(self):
229 | self._result_sock.close()
230 | util.close_zmq_ctx(self._zmq_ctx)
231 |
232 | def stop(self):
233 | """
234 | Stop this process.
235 |
236 | Once closed, it should not, and cannot be used again.
237 |
238 | :return: :py:attr:`~exitcode`.
239 | """
240 | self.child.terminate()
241 | self._cleanup()
242 | return self.child.exitcode
243 |
244 | def wait(self, timeout: Union[int, float] = None):
245 | """
246 | Wait until this process finishes execution,
247 | then return the value returned by the ``target``.
248 |
249 | This method raises a a :py:exc:`.ProcessWaitError`,
250 | if the child Process exits with a non-zero exitcode,
251 | or if something goes wrong while communicating with the child.
252 |
253 | :param timeout:
254 | The timeout in seconds.
255 |
256 | If the value is ``None``, it will block until the zproc server replies.
257 |
258 | For all other values, it will wait for a reply,
259 | for that amount of time before returning with a :py:class:`TimeoutError`.
260 |
261 | :return:
262 | The value returned by the ``target`` function.
263 | """
264 | # try to fetch the cached result.
265 | if self._has_returned:
266 | return self._result
267 |
268 | if timeout is not None:
269 | target = time.time() + timeout
270 | while time.time() < target:
271 | self.child.join(timeout)
272 |
273 | if self.is_alive:
274 | raise TimeoutError(
275 | f"Timed-out while waiting for Process to return. -- {self!r}"
276 | )
277 | else:
278 | self.child.join()
279 | if self.is_alive:
280 | return None
281 | exitcode = self.exitcode
282 | if exitcode != 0:
283 | raise exceptions.ProcessWaitError(
284 | f"Process finished with a non-zero exitcode ({exitcode}). -- {self!r}",
285 | exitcode,
286 | self,
287 | )
288 | try:
289 | self._result = serializer.loads(self._result_sock.recv())
290 | except zmq.error.Again:
291 | raise exceptions.ProcessWaitError(
292 | "The Process died before sending its return value. "
293 | "It probably crashed, got killed, or exited without warning.",
294 | exitcode,
295 | )
296 |
297 | self._has_returned = True
298 | self._cleanup()
299 | return self._result
300 |
301 | @property
302 | def is_alive(self):
303 | """
304 | Whether the child process is alive.
305 |
306 | Roughly, a process object is alive;
307 | from the moment the :py:meth:`start` method returns,
308 | until the child process is stopped manually (using :py:meth:`stop`) or naturally exits
309 | """
310 | return self.child.is_alive()
311 |
312 | @property
313 | def pid(self):
314 | """
315 | The process ID.
316 |
317 | Before the process is started, this will be None.
318 | """
319 | return self.child.pid
320 |
321 | @property
322 | def exitcode(self):
323 | """
324 | The child’s exit code.
325 |
326 | This will be None if the process has not yet terminated.
327 | A negative value ``-N`` indicates that the child was terminated by signal ``N``.
328 | """
329 | return self.child.exitcode
330 |
--------------------------------------------------------------------------------
/zproc/serializer.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | from typing import Callable, Any, Dict
3 |
4 | from cloudpickle import cloudpickle
5 |
6 | from zproc import exceptions
7 |
8 |
9 | def dumps(obj: Any) -> bytes:
10 | return pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
11 |
12 |
13 | def loads(bytes_obj: bytes) -> Any:
14 | rep = pickle.loads(bytes_obj)
15 | if isinstance(rep, exceptions.RemoteException):
16 | rep.reraise()
17 | return rep
18 |
19 |
20 | def _get_fn_hash(fn: Callable):
21 | try:
22 | return hash(fn.__code__)
23 | except AttributeError:
24 | return hash(fn)
25 |
26 |
27 | _fn_dump_cache: Dict[int, bytes] = {}
28 |
29 |
30 | def dumps_fn(fn: Callable) -> bytes:
31 | fn_hash = _get_fn_hash(fn)
32 | try:
33 | fn_bytes = _fn_dump_cache[fn_hash]
34 | except KeyError:
35 | fn_bytes = cloudpickle.dumps(fn)
36 | _fn_dump_cache[fn_hash] = fn_bytes
37 | return fn_bytes
38 |
39 |
40 | _fn_load_cache: Dict[int, Callable] = {}
41 |
42 |
43 | def loads_fn(fn_bytes: bytes) -> Callable:
44 | fn_bytes_hash = hash(fn_bytes)
45 | try:
46 | fn = _fn_load_cache[fn_bytes_hash]
47 | except KeyError:
48 | fn = cloudpickle.loads(fn_bytes)
49 | _fn_load_cache[fn_bytes_hash] = fn
50 | return fn
51 |
--------------------------------------------------------------------------------
/zproc/server/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scientifichackers/zproc/cd385852c32afb76d3e3a558ba0c809466de0a60/zproc/server/__init__.py
--------------------------------------------------------------------------------
/zproc/server/main.py:
--------------------------------------------------------------------------------
1 | import atexit
2 |
3 | import zmq
4 |
5 | from zproc import exceptions, serializer, util
6 | from zproc.__version__ import __version__
7 | from zproc.consts import ServerMeta
8 | from zproc.exceptions import RemoteException
9 | from zproc.state.server import StateServer
10 | from zproc.task.server import start_task_server, start_task_proxy
11 |
12 |
13 | def main(server_address: str, send_conn):
14 | with util.socket_factory(zmq.ROUTER, zmq.ROUTER) as (
15 | zmq_ctx,
16 | state_router,
17 | watch_router,
18 | ):
19 | atexit.register(util.clean_process_tree)
20 |
21 | try:
22 | if server_address:
23 | state_router.bind(server_address)
24 | if "ipc" in server_address:
25 | _bind = util.bind_to_random_ipc
26 | else:
27 | _bind = util.bind_to_random_tcp
28 | else:
29 | _bind = util.bind_to_random_address
30 | server_address = _bind(state_router)
31 |
32 | server_meta = ServerMeta(
33 | __version__,
34 | server_address,
35 | _bind(watch_router),
36 | *start_task_server(_bind),
37 | *start_task_proxy(_bind)
38 | )
39 |
40 | state_server = StateServer(state_router, watch_router, server_meta)
41 | except Exception:
42 | with send_conn:
43 | send_conn.send_bytes(serializer.dumps(exceptions.RemoteException()))
44 | return
45 | else:
46 | with send_conn:
47 | send_conn.send_bytes(serializer.dumps(server_meta))
48 |
49 | while True:
50 | try:
51 | state_server.tick()
52 | except KeyboardInterrupt:
53 | util.log_internal_crash("State Server")
54 | return
55 | except Exception:
56 | if state_server.identity is None:
57 | util.log_internal_crash("State server")
58 | else:
59 | state_server.reply(RemoteException())
60 | finally:
61 | state_server.reset_internal_state()
62 |
--------------------------------------------------------------------------------
/zproc/server/tools.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | import os
3 | from collections import Callable
4 | from typing import Union, Tuple
5 |
6 | import zmq
7 |
8 | from zproc import util, serializer
9 | from zproc.consts import Msgs, Cmds
10 | from zproc.consts import ServerMeta
11 | from zproc.server.main import main
12 |
13 |
14 | def start_server(
15 | server_address: str = None, *, backend: Callable = multiprocessing.Process
16 | ) -> Tuple[multiprocessing.Process, str]:
17 | """
18 | Start a new zproc server.
19 |
20 | :param server_address:
21 | .. include:: /api/snippets/server_address.rst
22 | :param backend:
23 | .. include:: /api/snippets/backend.rst
24 |
25 | :return: `
26 | A `tuple``,
27 | containing a :py:class:`multiprocessing.Process` object for server and the server address.
28 | """
29 | recv_conn, send_conn = multiprocessing.Pipe()
30 |
31 | server_process = backend(target=main, args=[server_address, send_conn])
32 | server_process.start()
33 |
34 | try:
35 | with recv_conn:
36 | server_meta: ServerMeta = serializer.loads(recv_conn.recv_bytes())
37 | except zmq.ZMQError as e:
38 | if e.errno == 98:
39 | raise ConnectionError(
40 | "Encountered - %s. Perhaps the server is already running?" % repr(e)
41 | )
42 | if e.errno == 22:
43 | raise ValueError(
44 | "Encountered - %s. `server_address` must be a string containing a valid endpoint."
45 | % repr(e)
46 | )
47 | raise
48 |
49 | return server_process, server_meta.state_router
50 |
51 |
52 | def ping(
53 | server_address: str, *, timeout: float = None, payload: Union[bytes] = None
54 | ) -> int:
55 | """
56 | Ping the zproc server.
57 |
58 | This can be used to easily detect if a server is alive and running, with the aid of a suitable ``timeout``.
59 |
60 | :param server_address:
61 | .. include:: /api/snippets/server_address.rst
62 | :param timeout:
63 | The timeout in seconds.
64 |
65 | If this is set to ``None``, then it will block forever, until the zproc server replies.
66 |
67 | For all other values, it will wait for a reply,
68 | for that amount of time before returning with a :py:class:`TimeoutError`.
69 |
70 | By default it is set to ``None``.
71 | :param payload:
72 | payload that will be sent to the server.
73 |
74 | If it is set to None, then ``os.urandom(56)`` (56 random bytes) will be used.
75 |
76 | (No real reason for the ``56`` magic number.)
77 |
78 | :return:
79 | The zproc server's **pid**.
80 | """
81 | if payload is None:
82 | payload = os.urandom(56)
83 |
84 | with util.create_zmq_ctx() as zmq_ctx:
85 | with zmq_ctx.socket(zmq.DEALER) as dealer_sock:
86 | dealer_sock.connect(server_address)
87 | if timeout is not None:
88 | dealer_sock.setsockopt(zmq.RCVTIMEO, int(timeout * 1000))
89 |
90 | dealer_sock.send(
91 | serializer.dumps(
92 | {Msgs.cmd: Cmds.ping, Msgs.info: payload}
93 | )
94 | )
95 |
96 | try:
97 | recv_payload, pid = serializer.loads(dealer_sock.recv())
98 | except zmq.error.Again:
99 | raise TimeoutError(
100 | "Timed-out waiting while for the ZProc server to respond."
101 | )
102 |
103 | assert (
104 | recv_payload == payload
105 | ), "Payload doesn't match! The server connection may be compromised, or unstable."
106 |
107 | return pid
108 |
--------------------------------------------------------------------------------
/zproc/state/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scientifichackers/zproc/cd385852c32afb76d3e3a558ba0c809466de0a60/zproc/state/__init__.py
--------------------------------------------------------------------------------
/zproc/state/_type.py:
--------------------------------------------------------------------------------
1 | from zproc.consts import Msgs, Cmds
2 |
3 | STATE_DICT_METHODS = {
4 | "__contains__",
5 | "__delitem__",
6 | "__eq__",
7 | "__getitem__",
8 | "__iter__",
9 | "__len__",
10 | "__ne__",
11 | "__setitem__",
12 | "clear",
13 | "get",
14 | "pop",
15 | "popitem",
16 | "setdefault",
17 | "update",
18 | }
19 |
20 |
21 | def _create_remote_dict_method(dict_method_name: str):
22 | """
23 | Generates a method for the State class,
24 | that will call the "method_name" on the state (a ``dict``) stored on the server,
25 | and return the result.
26 |
27 | Glorified RPC.
28 | """
29 |
30 | def remote_method(self, *args, **kwargs):
31 | return self._s_request_reply(
32 | {
33 | Msgs.cmd: Cmds.run_dict_method,
34 | Msgs.info: dict_method_name,
35 | Msgs.args: args,
36 | Msgs.kwargs: kwargs,
37 | }
38 | )
39 |
40 | remote_method.__name__ = dict_method_name
41 | return remote_method
42 |
43 |
44 | class StateType(type):
45 | def __new__(mcs, *args, **kwargs):
46 | cls = super().__new__(mcs, *args, **kwargs)
47 |
48 | for name in STATE_DICT_METHODS:
49 | setattr(cls, name, _create_remote_dict_method(name))
50 |
51 | return cls
52 |
53 |
54 | # See "_type.pyi" for more
55 | class StateDictMethodStub:
56 | pass
57 |
--------------------------------------------------------------------------------
/zproc/state/_type.pyi:
--------------------------------------------------------------------------------
1 | from typing import TypeVar, Iterator, overload, Optional, Mapping, Union, Tuple
2 |
3 | T = TypeVar("T")
4 | KT = TypeVar("KT")
5 | VT = TypeVar("VT")
6 |
7 | class StateType(type):
8 | pass
9 |
10 | class StateDictMethodStub:
11 | def __contains__(self, o: object) -> bool: ...
12 | def __delitem__(self, v: KT) -> None: ...
13 | def __eq__(self, o: object) -> bool: ...
14 | def __getitem__(self, key: KT) -> VT: ...
15 | def __iter__(self) -> Iterator[KT]: ...
16 | def __len__(self) -> int: ...
17 | def __ne__(self, o: object) -> bool: ...
18 | def __setitem__(self, k: KT, v: VT) -> None: ...
19 | def clear(self) -> None: ...
20 | @overload
21 | def get(self, k: KT) -> Optional[VT]: ...
22 | @overload
23 | def get(self, k: KT, default: Union[KT, T]) -> Union[VT, T]: ...
24 | def pop(self, k: KT) -> VT: ...
25 | @overload
26 | def pop(self, k: KT) -> VT: ...
27 | @overload
28 | def pop(self, k: KT, default: Union[VT, T] = ...) -> Union[VT, T]: ...
29 | def popitem(self) -> Tuple[KT, VT]: ...
30 | def setdefault(self, k: KT, default: Optional[VT] = ...) -> VT: ...
31 | def update(self, a: Mapping[KT, VT]) -> None: ...
32 |
--------------------------------------------------------------------------------
/zproc/state/server.py:
--------------------------------------------------------------------------------
1 | import os
2 | import struct
3 | import time
4 | from bisect import bisect
5 | from collections import defaultdict
6 | from contextlib import contextmanager
7 | from copy import deepcopy
8 | from typing import Any, Dict, List, Tuple
9 |
10 | import zmq
11 |
12 | from zproc import serializer
13 | from zproc.consts import Cmds, ServerMeta
14 | from zproc.consts import Msgs
15 |
16 | RequestType = Dict[Msgs, Any]
17 |
18 |
19 | class StateServer:
20 | identity: bytes
21 | namespace: bytes
22 |
23 | state_map: Dict[bytes, dict]
24 | state: dict
25 |
26 | history: Dict[bytes, Tuple[List[float], List[List[bytes]]]]
27 | pending: Dict[bytes, Tuple[bytes, bytes, bool, float]]
28 |
29 | def __init__(
30 | self,
31 | state_router: zmq.Socket,
32 | watch_router: zmq.Socket,
33 | server_meta: ServerMeta,
34 | ) -> None:
35 | self.state_router = state_router
36 | self.watch_router = watch_router
37 | self.server_meta = server_meta
38 |
39 | self.dispatch_dict = {
40 | Cmds.run_fn_atomically: self.run_fn_atomically,
41 | Cmds.run_dict_method: self.run_dict_method,
42 | Cmds.get_state: self.send_state,
43 | Cmds.set_state: self.set_state,
44 | Cmds.get_server_meta: self.get_server_meta,
45 | Cmds.ping: self.ping,
46 | Cmds.time: self.time,
47 | }
48 | self.state_map = defaultdict(dict)
49 |
50 | self.history = defaultdict(lambda: ([], []))
51 | self.pending = {}
52 |
53 | def send_state(self, _):
54 | """reply with state to the current client"""
55 | self.reply(self.state)
56 |
57 | def get_server_meta(self, _):
58 | self.reply(self.server_meta)
59 |
60 | def ping(self, request):
61 | self.reply((request[Msgs.info], os.getpid()))
62 |
63 | def time(self, _):
64 | self.reply(time.time())
65 |
66 | def set_state(self, request):
67 | new = request[Msgs.info]
68 | with self.mutate_safely():
69 | self.state_map[self.namespace] = new
70 | self.reply(True)
71 |
72 | def run_dict_method(self, request):
73 | """Execute a method on the state ``dict`` and reply with the result."""
74 | state_method_name, args, kwargs = (
75 | request[Msgs.info],
76 | request[Msgs.args],
77 | request[Msgs.kwargs],
78 | )
79 | # print(method_name, args, kwargs)
80 | with self.mutate_safely():
81 | self.reply(getattr(self.state, state_method_name)(*args, **kwargs))
82 |
83 | def run_fn_atomically(self, request):
84 | """Execute a function, atomically and reply with the result."""
85 | fn = serializer.loads_fn(request[Msgs.info])
86 | args, kwargs = request[Msgs.args], request[Msgs.kwargs]
87 | with self.mutate_safely():
88 | self.reply(fn(self.state, *args, **kwargs))
89 |
90 | def recv_request(self):
91 | self.identity, request = self.state_router.recv_multipart()
92 | request = serializer.loads(request)
93 | try:
94 | self.namespace = request[Msgs.namespace]
95 | except KeyError:
96 | pass
97 | else:
98 | self.state = self.state_map[self.namespace]
99 | self.dispatch_dict[request[Msgs.cmd]](request)
100 |
101 | def reply(self, response):
102 | # print("server rep:", self.identity, response, time.time())
103 | self.state_router.send_multipart([self.identity, serializer.dumps(response)])
104 |
105 | @contextmanager
106 | def mutate_safely(self):
107 | old = deepcopy(self.state)
108 | stamp = time.time()
109 |
110 | try:
111 | yield
112 | except Exception:
113 | self.state = self.state_map[self.namespace] = old
114 | raise
115 |
116 | slot = self.history[self.namespace]
117 | slot[0].append(stamp)
118 | slot[1].append(
119 | [
120 | self.identity,
121 | serializer.dumps((old, self.state, stamp)),
122 | self.state == old,
123 | ]
124 | )
125 | self.resolve_pending()
126 |
127 | def resolve_watcher(
128 | self,
129 | w_ident: bytes,
130 | s_ident: bytes,
131 | namespace: bytes,
132 | identical_not_okay: bool,
133 | only_after: float,
134 | ) -> bool:
135 | timestamps, history = self.history[namespace]
136 | index = bisect(timestamps, only_after) - 1
137 |
138 | while True:
139 | index += 1
140 | try:
141 | ident, update, identical = history[index]
142 | except IndexError:
143 | break
144 | if ident == s_ident:
145 | continue
146 | if identical_not_okay and identical:
147 | continue
148 | self.watch_router.send_multipart([w_ident, update, bytes(identical)])
149 | return True
150 |
151 | return False
152 |
153 | def resolve_pending(self):
154 | pending = self.pending
155 | if not pending:
156 | return
157 | for w_ident in list(pending):
158 | if self.resolve_watcher(w_ident, *pending[w_ident]):
159 | del pending[w_ident]
160 |
161 | def recv_watcher(self):
162 | w_ident, s_ident, namespace, identical_okay, only_after = (
163 | self.watch_router.recv_multipart()
164 | )
165 | self.pending[w_ident] = (
166 | s_ident,
167 | namespace,
168 | not identical_okay,
169 | *struct.unpack("d", only_after),
170 | )
171 |
172 | def reset_internal_state(self):
173 | self.identity = None
174 | self.namespace = None
175 | self.state = None
176 |
177 | def tick(self):
178 | self.resolve_pending()
179 |
180 | for sock in zmq.select([self.watch_router, self.state_router], [], [])[0]:
181 | if sock is self.state_router:
182 | self.recv_request()
183 | elif sock is self.watch_router:
184 | self.recv_watcher()
185 |
--------------------------------------------------------------------------------
/zproc/state/state.py:
--------------------------------------------------------------------------------
1 | import math
2 | import os
3 | import struct
4 | import time
5 | from collections import deque
6 | from functools import wraps
7 | from pprint import pformat
8 | from textwrap import indent
9 | from typing import Hashable, Any, Callable, Dict, Mapping, Sequence
10 |
11 | import zmq
12 |
13 | from zproc import util, serializer
14 | from zproc.consts import (
15 | Msgs,
16 | Cmds,
17 | DEFAULT_NAMESPACE,
18 | DEFAULT_ZMQ_RECVTIMEO,
19 | StateUpdate,
20 | ZMQ_IDENTITY_LENGTH,
21 | ServerMeta,
22 | )
23 | from zproc.server import tools
24 | from zproc.state import _type
25 |
26 |
27 | class _SkipStateUpdate(Exception):
28 | pass
29 |
30 |
31 | def _dummy_callback(_):
32 | return _
33 |
34 |
35 | class StateWatcher:
36 | _time_limit: float
37 | _iters: int = 0
38 |
39 | def __init__(
40 | self,
41 | state: "State",
42 | live: bool,
43 | timeout: float,
44 | identical_okay: bool,
45 | start_time: bool,
46 | count: int,
47 | callback: Callable[[StateUpdate], Any] = _dummy_callback,
48 | ):
49 | self.state = state
50 | self.callback = callback
51 | self.live = live
52 | self.timeout = timeout
53 | self.identical_okay = identical_okay
54 | self.start_time = start_time
55 | self.count = count
56 |
57 | if count is None:
58 | self._iter_limit = math.inf
59 | else:
60 | self._iter_limit = count
61 |
62 | self._only_after = self.start_time
63 | if self._only_after is None:
64 | self._only_after = time.time()
65 |
66 | def _settimeout(self):
67 | if time.time() > self._time_limit:
68 | raise TimeoutError("Timed-out while waiting for a state update.")
69 |
70 | self.state._w_dealer.setsockopt(
71 | zmq.RCVTIMEO, int((self._time_limit - time.time()) * 1000)
72 | )
73 |
74 | def _request_reply(self) -> StateUpdate:
75 | response = util.strict_request_reply(
76 | [
77 | self.state._identity,
78 | self.state._namespace_bytes,
79 | bytes(self.identical_okay),
80 | struct.pack("d", self._only_after),
81 | ],
82 | self.state._w_dealer.send_multipart,
83 | self.state._w_dealer.recv_multipart,
84 | )
85 | return StateUpdate(
86 | *serializer.loads(response[0]), is_identical=bool(response[1])
87 | )
88 |
89 | def go_live(self):
90 | self._only_after = time.time()
91 |
92 | def __next__(self):
93 | if self.timeout is None:
94 | self.state._w_dealer.setsockopt(zmq.RCVTIMEO, DEFAULT_ZMQ_RECVTIMEO)
95 | else:
96 | self._time_limit = time.time() + self.timeout
97 |
98 | while self._iters < self._iter_limit:
99 | if self.timeout is not None:
100 | self._settimeout()
101 | if self.live:
102 | self._only_after = time.time()
103 | try:
104 | state_update = self._request_reply()
105 | except zmq.error.Again:
106 | raise TimeoutError("Timed-out while waiting for a state update.")
107 | if not self.live:
108 | self._only_after = state_update.timestamp
109 | try:
110 | value = self.callback(state_update)
111 | except _SkipStateUpdate:
112 | continue
113 | else:
114 | self._iters += 1
115 | return value
116 |
117 | raise StopIteration
118 |
119 | def __iter__(self):
120 | return self
121 |
122 | def consume(self):
123 | # consumes iterator at C speed
124 | deque(iter(self), maxlen=0)
125 |
126 |
127 | class State(_type.StateDictMethodStub, metaclass=_type.StateType):
128 | _server_meta: ServerMeta
129 |
130 | def __init__(
131 | self, server_address: str, *, namespace: str = DEFAULT_NAMESPACE
132 | ) -> None:
133 | """
134 | Allows accessing the state stored on the zproc server, through a dict-like API.
135 |
136 | Also allows changing the namespace.
137 |
138 | Serves the following ``dict``-like members, for accessing the state:
139 |
140 | - Magic methods:
141 | ``__contains__()``, ``__delitem__()``, ``__eq__()``,
142 | ``__getitem__()``, ``__iter__()``,
143 | ``__len__()``, ``__ne__()``, ``__setitem__()``
144 |
145 | - Methods:
146 | ``clear()``, ``copy()``, ``get()``,
147 | ``items()``, ``keys()``, ``pop()``, ``popitem()``,
148 | ``setdefault()``, ``update()``, ``values()``
149 |
150 | Please don't share a State object between Processes/Threads.
151 | A State object is not thread-safe.
152 |
153 | :param server_address:
154 | .. include:: /api/snippets/server_address.rst
155 |
156 | If you are using a :py:class:`Context`, then this is automatically provided.
157 | """
158 | #: Passed on from constructor. This is read-only
159 | self.server_address = server_address
160 | self.namespace = namespace
161 |
162 | self._zmq_ctx = util.create_zmq_ctx()
163 | self._s_dealer = self._create_s_dealer()
164 | self._w_dealer = self._create_w_dealer()
165 |
166 | def __str__(self):
167 | return "\n".join(
168 | (
169 | "%s - namespace: %r server: %r at %#x"
170 | % (
171 | self.__class__.__qualname__,
172 | self.namespace,
173 | self.server_address,
174 | id(self),
175 | ),
176 | indent("↳ " + pformat(self.copy()), " " * 2),
177 | )
178 | )
179 |
180 | def __repr__(self):
181 | return util.enclose_in_brackets(self.__str__())
182 |
183 | def fork(self, server_address: str = None, *, namespace: str = None) -> "State":
184 | r"""
185 | "Forks" this State object.
186 |
187 | Takes the same args as the :py:class:`State` constructor,
188 | except that they automatically default to the values provided during the creation of this State object.
189 |
190 | If no args are provided to this function,
191 | then it shall create a new :py:class:`State` object
192 | that follows the exact same semantics as this one.
193 |
194 | This is preferred over ``copy()``\ -ing a :py:class:`State` object.
195 |
196 | Useful when one needs to access 2 or more namespaces from the same code.
197 | """
198 | if server_address is None:
199 | server_address = self.server_address
200 | if namespace is None:
201 | namespace = self.namespace
202 |
203 | return self.__class__(server_address, namespace=namespace)
204 |
205 | _namespace_bytes: bytes
206 |
207 | @property
208 | def namespace(self) -> str:
209 | """
210 | This State's current working namespace as a ``str``.
211 |
212 | This property is read/write,
213 | such that you can switch namespaces on the fly by just setting it's value.
214 |
215 | .. code-block:: python
216 |
217 | state['food'] = 'available'
218 | print(state)
219 |
220 | state.namespace = "foobar"
221 |
222 | print(state)
223 |
224 | """
225 | return self._namespace_bytes.decode()
226 |
227 | @namespace.setter
228 | def namespace(self, namespace: str):
229 | # empty namespace is reserved for use by the framework iteself
230 | assert len(namespace) > 0, "'namespace' cannot be empty!"
231 |
232 | self._namespace_bytes = namespace.encode()
233 |
234 | #
235 | # state access
236 | #
237 |
238 | def _create_s_dealer(self) -> zmq.Socket:
239 | sock = self._zmq_ctx.socket(zmq.DEALER)
240 | self._identity = os.urandom(ZMQ_IDENTITY_LENGTH)
241 | sock.setsockopt(zmq.IDENTITY, self._identity)
242 | sock.connect(self.server_address)
243 | self._server_meta = util.req_server_meta(sock)
244 | return sock
245 |
246 | def _s_request_reply(self, request: Dict[int, Any]):
247 | request[Msgs.namespace] = self._namespace_bytes
248 | msg = serializer.dumps(request)
249 | return serializer.loads(
250 | util.strict_request_reply(msg, self._s_dealer.send, self._s_dealer.recv)
251 | )
252 |
253 | def set(self, value: dict):
254 | """
255 | Set the state, completely over-writing the previous value.
256 |
257 | .. caution::
258 |
259 | This kind of operation usually leads to a data race.
260 |
261 | Please take good care while using this.
262 |
263 | Use the :py:func:`atomic` deocrator if you're feeling anxious.
264 | """
265 | self._s_request_reply({Msgs.cmd: Cmds.set_state, Msgs.info: value})
266 |
267 | def copy(self) -> dict:
268 | """
269 | Return a deep-copy of the state as a ``dict``.
270 |
271 | (Unlike the shallow-copy returned by the inbuilt :py:meth:`dict.copy`).
272 | """
273 | return self._s_request_reply({Msgs.cmd: Cmds.get_state})
274 |
275 | def keys(self):
276 | return self.copy().keys()
277 |
278 | def values(self):
279 | return self.copy().values()
280 |
281 | def items(self):
282 | return self.copy().items()
283 |
284 | def ping(self, **kwargs):
285 | """
286 | Ping the zproc server corresponding to this State's Context
287 |
288 | :param kwargs: Keyword arguments that :py:func:`ping` takes, except ``server_address``.
289 | :return: Same as :py:func:`ping`
290 | """
291 | return tools.ping(self.server_address, **kwargs)
292 |
293 | #
294 | # state watcher
295 | #
296 |
297 | def time(self) -> float:
298 | return self._s_request_reply({Msgs.cmd: Cmds.time})
299 |
300 | def _create_w_dealer(self) -> zmq.Socket:
301 | sock = self._zmq_ctx.socket(zmq.DEALER)
302 | sock.connect(self._server_meta.watcher_router)
303 | return sock
304 |
305 | def when_change_raw(
306 | self,
307 | *,
308 | live: bool = False,
309 | timeout: float = None,
310 | identical_okay: bool = False,
311 | start_time: bool = None,
312 | count: int = None,
313 | ) -> StateWatcher:
314 | """
315 | A low-level hook that emits each and every state update.
316 | All other state watchers are built upon this only.
317 |
318 | .. include:: /api/state/get_raw_update.rst
319 | """
320 | return StateWatcher(
321 | state=self,
322 | live=live,
323 | timeout=timeout,
324 | identical_okay=identical_okay,
325 | start_time=start_time,
326 | count=count,
327 | )
328 |
329 | def when_change(
330 | self,
331 | *keys: Hashable,
332 | exclude: bool = False,
333 | live: bool = False,
334 | timeout: float = None,
335 | identical_okay: bool = False,
336 | start_time: bool = None,
337 | count: int = None,
338 | ) -> StateWatcher:
339 | """
340 | Block until a change is observed, and then return a copy of the state.
341 |
342 | .. include:: /api/state/get_when_change.rst
343 | """
344 | if not keys:
345 |
346 | def callback(update: StateUpdate) -> dict:
347 | return update.after
348 |
349 | else:
350 | if identical_okay:
351 | raise ValueError(
352 | "Passing both `identical_okay` and `keys` is not possible. "
353 | "(Hint: Omit `keys`)"
354 | )
355 |
356 | key_set = set(keys)
357 |
358 | def select(before, after):
359 | selected = {*before.keys(), *after.keys()}
360 | if exclude:
361 | return selected - key_set
362 | else:
363 | return selected & key_set
364 |
365 | def callback(update: StateUpdate) -> dict:
366 | before, after = update.before, update.after
367 | try:
368 | if not any(before[k] != after[k] for k in select(before, after)):
369 | raise _SkipStateUpdate
370 | except KeyError: # this indirectly implies that something has changed
371 | pass
372 | return update.after
373 |
374 | return StateWatcher(
375 | state=self,
376 | live=live,
377 | timeout=timeout,
378 | identical_okay=identical_okay,
379 | start_time=start_time,
380 | count=count,
381 | callback=callback,
382 | )
383 |
384 | def when(
385 | self,
386 | test_fn,
387 | *,
388 | args: Sequence = None,
389 | kwargs: Mapping = None,
390 | live: bool = False,
391 | timeout: float = None,
392 | identical_okay: bool = False,
393 | start_time: bool = None,
394 | count: int = None,
395 | ) -> StateWatcher:
396 | """
397 | Block until ``test_fn(snapshot)`` returns a "truthy" value,
398 | and then return a copy of the state.
399 |
400 | *Where-*
401 |
402 | ``snapshot`` is a ``dict``, containing a version of the state after this update was applied.
403 |
404 | .. include:: /api/state/get_when.rst
405 | """
406 | if args is None:
407 | args = []
408 | if kwargs is None:
409 | kwargs = {}
410 |
411 | def callback(update: StateUpdate) -> dict:
412 | snapshot = update.after
413 | if test_fn(snapshot, *args, **kwargs):
414 | return snapshot
415 | raise _SkipStateUpdate
416 |
417 | return StateWatcher(
418 | state=self,
419 | live=live,
420 | timeout=timeout,
421 | identical_okay=identical_okay,
422 | start_time=start_time,
423 | count=count,
424 | callback=callback,
425 | )
426 |
427 | def when_truthy(self, key: Hashable, **when_kwargs) -> StateWatcher:
428 | def _(snapshot):
429 | try:
430 | return snapshot[key]
431 | except KeyError:
432 | return False
433 |
434 | return self.when(_, **when_kwargs)
435 |
436 | def when_falsy(self, key: Hashable, **when_kwargs) -> StateWatcher:
437 | def _(snapshot):
438 | try:
439 | return not snapshot[key]
440 | except KeyError:
441 | return False
442 |
443 | return self.when(_, **when_kwargs)
444 |
445 | def when_equal(self, key: Hashable, value: Any, **when_kwargs) -> StateWatcher:
446 | """
447 | Block until ``state[key] == value``, and then return a copy of the state.
448 |
449 | .. include:: /api/state/get_when_equality.rst
450 | """
451 |
452 | def _(snapshot):
453 | try:
454 | return snapshot[key] == value
455 | except KeyError:
456 | return False
457 |
458 | return self.when(_, **when_kwargs)
459 |
460 | def when_not_equal(self, key: Hashable, value: Any, **when_kwargs) -> StateWatcher:
461 | """
462 | Block until ``state[key] != value``, and then return a copy of the state.
463 |
464 | .. include:: /api/state/get_when_equality.rst
465 | """
466 |
467 | def _(snapshot):
468 | try:
469 | return snapshot[key] != value
470 | except KeyError:
471 | return False
472 |
473 | return self.when(_, **when_kwargs)
474 |
475 | def when_none(self, key: Hashable, **when_kwargs) -> StateWatcher:
476 | """
477 | Block until ``state[key] is None``, and then return a copy of the state.
478 |
479 | .. include:: /api/state/get_when_equality.rst
480 | """
481 |
482 | def _(snapshot):
483 | try:
484 | return snapshot[key] is None
485 | except KeyError:
486 | return False
487 |
488 | return self.when(_, **when_kwargs)
489 |
490 | def when_not_none(self, key: Hashable, **when_kwargs) -> StateWatcher:
491 | """
492 | Block until ``state[key] is not None``, and then return a copy of the state.
493 |
494 | .. include:: /api/state/get_when_equality.rst
495 | """
496 |
497 | def _(snapshot):
498 | try:
499 | return snapshot[key] is not None
500 | except KeyError:
501 | return False
502 |
503 | return self.when(_, **when_kwargs)
504 |
505 | def when_available(self, key: Hashable, **when_kwargs) -> StateWatcher:
506 | """
507 | Block until ``key in state``, and then return a copy of the state.
508 |
509 | .. include:: /api/state/get_when_equality.rst
510 | """
511 | return self.when(lambda snapshot: key in snapshot, **when_kwargs)
512 |
513 | def __del__(self):
514 | try:
515 | self._s_dealer.close()
516 | self._w_dealer.close()
517 | util.close_zmq_ctx(self._zmq_ctx)
518 | except Exception:
519 | pass
520 |
521 |
522 | def atomic(fn: Callable) -> Callable:
523 | """
524 | Wraps a function, to create an atomic operation out of it.
525 |
526 | This contract guarantees, that while an atomic ``fn`` is running -
527 |
528 | - No one, except the "callee" may access the state.
529 | - If an ``Exception`` occurs while the ``fn`` is running, the state remains unaffected.
530 | - | If a signal is sent to the "callee", the ``fn`` remains unaffected.
531 | | (The state is not left in an incoherent state.)
532 |
533 | .. note::
534 | - The first argument to the wrapped function *must* be a :py:class:`State` object.
535 | - The wrapped ``fn`` receives a frozen version (snapshot) of state,
536 | which is a ``dict`` object, not a :py:class:`State` object.
537 | - It is not possible to call one atomic function from other.
538 |
539 | Please read :ref:`atomicity` for a detailed explanation.
540 |
541 | :param fn:
542 | The function to be wrapped, as an atomic function.
543 |
544 | :returns:
545 | A wrapper function.
546 |
547 | The wrapper function returns the value returned by the wrapped ``fn``.
548 |
549 | >>> import zproc
550 | >>>
551 | >>> @zproc.atomic
552 | ... def increment(snapshot):
553 | ... return snapshot['count'] + 1
554 | ...
555 | >>>
556 | >>> ctx = zproc.Context()
557 | >>> state = ctx.create_state({'count': 0})
558 | >>>
559 | >>> increment(state)
560 | 1
561 | """
562 | msg = {
563 | Msgs.cmd: Cmds.run_fn_atomically,
564 | Msgs.info: serializer.dumps_fn(fn),
565 | Msgs.args: (),
566 | Msgs.kwargs: {},
567 | }
568 |
569 | @wraps(fn)
570 | def wrapper(state: State, *args, **kwargs):
571 | msg[Msgs.args] = args
572 | msg[Msgs.kwargs] = kwargs
573 | return state._s_request_reply(msg)
574 |
575 | return wrapper
576 |
--------------------------------------------------------------------------------
/zproc/task/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/scientifichackers/zproc/cd385852c32afb76d3e3a558ba0c809466de0a60/zproc/task/__init__.py
--------------------------------------------------------------------------------
/zproc/task/map_plus.py:
--------------------------------------------------------------------------------
1 | from collections import Callable
2 |
3 |
4 | def map_plus(target: Callable, mi, ma, a, mk, k):
5 | """The builtin `map()`, but with superpowers."""
6 | if a is None:
7 | a = []
8 | if k is None:
9 | k = {}
10 |
11 | if mi is None and ma is None and mk is None:
12 | return []
13 | elif mi is None and ma is None:
14 | return [target(*a, **mki, **k) for mki in mk]
15 | elif ma is None and mk is None:
16 | return [target(mii, *a, **k) for mii in mi]
17 | elif mk is None and mi is None:
18 | return [target(*mai, *a, **k) for mai in ma]
19 | elif mi is None:
20 | return [target(*mai, *a, **mki, **k) for mai, mki in zip(ma, mk)]
21 | elif ma is None:
22 | return [target(mii, *a, **mki, **k) for mii, mki in zip(mi, mk)]
23 | elif mk is None:
24 | return [target(mii, *mai, *a, **k) for mii, mai in zip(mi, ma)]
25 | else:
26 | return [target(mii, *mai, *a, **mki, **k) for mii, mai, mki in zip(mi, ma, mk)]
27 |
--------------------------------------------------------------------------------
/zproc/task/result.py:
--------------------------------------------------------------------------------
1 | import zmq
2 |
3 | from zproc import util, serializer
4 |
5 |
6 | class _TaskResultBase:
7 | def __init__(self, server_address: str, task_id: bytes):
8 | #: Passed on from the constructor
9 | self.task_id = task_id
10 |
11 | self._zmq_ctx = util.create_zmq_ctx()
12 | self._server_meta = util.get_server_meta(self._zmq_ctx, server_address)
13 | self._dealer = self._create_dealer()
14 |
15 | def _create_dealer(self) -> zmq.Socket:
16 | sock = self._zmq_ctx.socket(zmq.DEALER)
17 | sock.connect(self._server_meta.task_router)
18 | return sock
19 |
20 | def _get_chunk(self, index: int):
21 | chunk_id = util.encode_chunk_id(self.task_id, index)
22 | return serializer.loads(
23 | util.strict_request_reply(chunk_id, self._dealer.send, self._dealer.recv)
24 | )
25 |
26 | def __del__(self):
27 | try:
28 | self._dealer.close()
29 | util.close_zmq_ctx(self._zmq_ctx)
30 | except Exception:
31 | pass
32 |
33 |
34 | class SimpleTaskResult(_TaskResultBase):
35 | def __init__(self, server_address: str, task_id: bytes):
36 | super().__init__(server_address, task_id)
37 |
38 | task_detail = util.deconstruct_task_id(self.task_id)
39 | if task_detail is not None:
40 | raise ValueError(
41 | "Invalid `task_id` for a %r. Did you mean to use %r?"
42 | % (self.__class__.__qualname__, SequenceTaskResult.__qualname__)
43 | )
44 |
45 | @property
46 | def value(self):
47 | return self._get_chunk(-1)
48 |
49 |
50 | class SequenceTaskResult(_TaskResultBase):
51 | _chunk_index = -1
52 | _iter_index = -1
53 | _max_ready_index = -1
54 |
55 | def __init__(self, server_address: str, task_id: bytes):
56 | super().__init__(server_address, task_id)
57 |
58 | task_detail = util.deconstruct_task_id(self.task_id)
59 | if task_detail is None:
60 | raise ValueError(
61 | "Invalid `task_id` for a %r. Did you mean to use %r?"
62 | % (self.__class__.__qualname__, SimpleTaskResult.__qualname__)
63 | )
64 |
65 | self._chunk_length, self._length, self._num_chunks = task_detail
66 | self._max_index = self._num_chunks - 1
67 | self._as_list = [None] * self._length
68 |
69 | def _get_next_chunk(self):
70 | if self._chunk_index >= self._max_index:
71 | raise StopIteration
72 |
73 | self._chunk_index += 1
74 | self._max_ready_index += self._chunk_length
75 |
76 | chunk = self._get_chunk(self._chunk_index)
77 | i, j = self._chunk_index, self._chunk_length
78 | self._as_list[i * j : (i + 1) * j] = chunk
79 |
80 | @property
81 | def as_list(self):
82 | try:
83 | while True:
84 | self._get_next_chunk()
85 | except StopIteration:
86 | return self._as_list
87 |
88 | def __len__(self):
89 | return self._length
90 |
91 | def __iter__(self):
92 | return self
93 |
94 | def __next__(self):
95 | self._iter_index += 1
96 | if self._iter_index > self._max_ready_index:
97 | self._get_next_chunk()
98 | return self._as_list[self._iter_index]
99 |
--------------------------------------------------------------------------------
/zproc/task/server.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | from collections import defaultdict, Callable, deque
3 | from typing import Dict, List
4 |
5 | import zmq
6 |
7 | from zproc import util, serializer
8 | from zproc.exceptions import RemoteException
9 |
10 |
11 | class TaskResultServer:
12 | result_store: Dict[bytes, Dict[int, bytes]]
13 | pending: Dict[bytes, deque]
14 |
15 | def __init__(self, router: zmq.Socket, result_pull: zmq.Socket):
16 | """
17 | The task server serves the results acquired from the workers.
18 |
19 | Such that,
20 | a result is never lost,
21 | and can be acquired again, by any of the clients.
22 |
23 | It also lets everyone know when a task's result has arrived.
24 | """
25 | self.router = router
26 | self.result_pull = result_pull
27 |
28 | self.result_store = defaultdict(dict)
29 | self.pending = defaultdict(deque)
30 |
31 | def recv_request(self):
32 | ident, chunk_id = self.router.recv_multipart()
33 | try:
34 | task_id, index = util.decode_chunk_id(chunk_id)
35 | # print("request->", task_id, index)
36 | task_store = self.result_store[task_id]
37 | try:
38 | chunk_result = task_store[index]
39 | except KeyError:
40 | self.pending[chunk_id].appendleft(ident)
41 | else:
42 | self.router.send_multipart([ident, chunk_result])
43 | except KeyboardInterrupt:
44 | raise
45 | except Exception:
46 | self.router.send_multipart([ident, serializer.dumps(RemoteException())])
47 |
48 | def resolve_pending(self, chunk_id: bytes, chunk_result: bytes):
49 | pending = self.pending[chunk_id]
50 | send = self.router.send_multipart
51 | msg = [None, chunk_result]
52 |
53 | while pending:
54 | msg[0] = pending.pop()
55 | send(msg)
56 |
57 | def recv_chunk_result(self):
58 | chunk_id, chunk_result = self.result_pull.recv_multipart()
59 | task_id, index = util.decode_chunk_id(chunk_id)
60 | self.result_store[task_id][index] = chunk_result
61 | self.resolve_pending(chunk_id, chunk_result)
62 | # print("stored->", task_id, index)
63 |
64 | def tick(self):
65 | for sock in zmq.select([self.result_pull, self.router], [], [])[0]:
66 | if sock is self.router:
67 | self.recv_request()
68 | elif sock is self.result_pull:
69 | self.recv_chunk_result()
70 |
71 |
72 | def _task_server(send_conn, _bind: Callable):
73 | with util.socket_factory(zmq.ROUTER, zmq.PULL) as (zmq_ctx, router, result_pull):
74 | with send_conn:
75 | try:
76 | send_conn.send_bytes(
77 | serializer.dumps([_bind(router), _bind(result_pull)])
78 | )
79 | server = TaskResultServer(router, result_pull)
80 | except Exception:
81 | send_conn.send_bytes(serializer.dumps(RemoteException()))
82 | return
83 | while True:
84 | try:
85 | server.tick()
86 | except KeyboardInterrupt:
87 | util.log_internal_crash("Task server")
88 | return
89 | except Exception:
90 | util.log_internal_crash("Task proxy")
91 |
92 |
93 | # This proxy server is used to forwared task requests to the workers.
94 | #
95 | # This way,
96 | # any client who wishes to get some task done on the workers,
97 | # only needs to have knowlege about the server.
98 | # Clients never need to talk to a worker directly.
99 |
100 |
101 | def _task_proxy(send_conn, _bind: Callable):
102 | with util.socket_factory(zmq.PULL, zmq.PUSH) as (zmq_ctx, proxy_in, proxy_out):
103 | with send_conn:
104 | try:
105 | send_conn.send_bytes(
106 | serializer.dumps([_bind(proxy_in), _bind(proxy_out)])
107 | )
108 | except Exception:
109 | send_conn.send_bytes(serializer.dumps(RemoteException()))
110 | try:
111 | zmq.proxy(proxy_in, proxy_out)
112 | except Exception:
113 | util.log_internal_crash("Task proxy")
114 |
115 |
116 | #
117 | # Helper functions to start servers, and get return values.
118 | #
119 |
120 |
121 | def _start_server(fn, _bind: Callable):
122 | recv_conn, send_conn = multiprocessing.Pipe()
123 | multiprocessing.Process(target=fn, args=[send_conn, _bind]).start()
124 | with recv_conn:
125 | return serializer.loads(recv_conn.recv_bytes())
126 |
127 |
128 | def start_task_server(_bind: Callable) -> List[str]:
129 | return _start_server(_task_server, _bind)
130 |
131 |
132 | def start_task_proxy(_bind: Callable) -> List[str]:
133 | return _start_server(_task_proxy, _bind)
134 |
--------------------------------------------------------------------------------
/zproc/task/swarm.py:
--------------------------------------------------------------------------------
1 | import multiprocessing
2 | from typing import List, Mapping, Sequence, Any, Callable
3 |
4 | import zmq
5 |
6 | from zproc import util, serializer
7 | from zproc.consts import DEFAULT_NAMESPACE, EMPTY_MULTIPART
8 | from zproc.server.tools import ping
9 | from .result import SequenceTaskResult, SimpleTaskResult
10 | from .worker import worker_process
11 |
12 |
13 | class Swarm:
14 | def __init__(self, server_address: str, *, namespace: str = DEFAULT_NAMESPACE):
15 | #: Passed on from the constructor.
16 | self.server_address = server_address
17 | #: Passed on from the constructor.
18 | self.namespace = namespace
19 | #: A ``list`` of :py:class:`multiprocessing.Process` objects for the wokers spawned.
20 | self.worker_list = [] # type: List[multiprocessing.Process]
21 |
22 | self._zmq_ctx = util.create_zmq_ctx()
23 | self._server_meta = util.get_server_meta(self._zmq_ctx, server_address)
24 | self._task_push = self._zmq_ctx.socket(zmq.PUSH)
25 | self._task_push.connect(self._server_meta.task_proxy_in)
26 |
27 | def ping(self, **kwargs):
28 | return ping(self.server_address, **kwargs)
29 |
30 | @property
31 | def count(self) -> int:
32 | """
33 | Returns the number of workers currently alive.
34 |
35 | This property can be set manully,
36 | in order to change the number of workers that *should* be alive.
37 | """
38 | return sum(1 for w in self.worker_list if w.is_alive())
39 |
40 | @count.setter
41 | def count(self, value: int):
42 | value -= self.count
43 | if value > 0:
44 | for _ in range(value):
45 | recv_conn, send_conn = multiprocessing.Pipe()
46 |
47 | process = multiprocessing.Process(
48 | target=worker_process, args=[self.server_address, send_conn]
49 | )
50 | process.start()
51 |
52 | with recv_conn:
53 | rep = recv_conn.recv_bytes()
54 | if rep:
55 | serializer.loads(rep)
56 |
57 | self.worker_list.append(process)
58 | elif value < 0:
59 | # Notify remaining workers to finish up, and close shop.
60 | for _ in range(-value):
61 | self._task_push.send_multipart(EMPTY_MULTIPART)
62 |
63 | def start(self, count: int = None):
64 | if count is None:
65 | self.count = multiprocessing.cpu_count()
66 | else:
67 | self.count = count
68 |
69 | def stop(self, force: bool = False):
70 | if force:
71 | for p in self.worker_list:
72 | p.terminate()
73 | else:
74 | self.count = 0
75 |
76 | def run(
77 | self,
78 | target: Callable = None,
79 | args: Sequence = None,
80 | kwargs: Mapping = None,
81 | *,
82 | pass_state: bool = False,
83 | lazy: bool = False,
84 | ):
85 | if target is None:
86 |
87 | def wrapper(*a, **k):
88 | return self.run(target, a, k, pass_state=pass_state, lazy=lazy)
89 |
90 | return wrapper
91 |
92 | task_id = util.generate_task_id()
93 | if args is None:
94 | args = ()
95 | if kwargs is None:
96 | kwargs = {}
97 | params = (None, None, args, None, kwargs)
98 | task = (target, params, pass_state, self.namespace)
99 |
100 | self._task_push.send_multipart(
101 | [util.encode_chunk_id(task_id, -1), serializer.dumps(task)]
102 | )
103 |
104 | res = SimpleTaskResult(self.server_address, task_id)
105 | if lazy:
106 | return res
107 | return res.value
108 |
109 | def map_lazy(
110 | self,
111 | target: Callable,
112 | map_iter: Sequence[Any] = None,
113 | *,
114 | map_args: Sequence[Sequence[Any]] = None,
115 | args: Sequence = None,
116 | map_kwargs: Sequence[Mapping[str, Any]] = None,
117 | kwargs: Mapping = None,
118 | pass_state: bool = False,
119 | num_chunks: int = None,
120 | ) -> SequenceTaskResult:
121 | r"""
122 | Functional equivalent of ``map()`` in-built function,
123 | but executed in a parallel fashion.
124 |
125 | Distributes the iterables,
126 | provided in the ``map_*`` arguments to ``num_chunks`` no of worker nodes.
127 |
128 | The idea is to:
129 | 1. Split the the iterables provided in the ``map_*`` arguments into ``num_chunks`` no of equally sized chunks.
130 | 2. Send these chunks to ``num_chunks`` number of worker nodes.
131 | 3. Wait for all these worker nodes to finish their task(s).
132 | 4. Combine the acquired results in the same sequence as provided in the ``map_*`` arguments.
133 | 5. Return the combined results.
134 |
135 | *Steps 3-5 can be done lazily, on the fly with the help of an iterator*
136 |
137 | :param target:
138 | The ``Callable`` to be invoked inside a :py:class:`Process`.
139 |
140 | *It is invoked with the following signature:*
141 |
142 | ``target(map_iter[i], *map_args[i], *args, **map_kwargs[i], **kwargs)``
143 |
144 | *Where:*
145 |
146 | - ``i`` is the index of n\ :sup:`th` element of the Iterable(s) provided in the ``map_*`` arguments.
147 |
148 | - ``args`` and ``kwargs`` are passed from the ``**process_kwargs``.
149 |
150 | The ``pass_state`` Keyword Argument of allows you to include the ``state`` arg.
151 | :param map_iter:
152 | A sequence whose elements are supplied as the *first* positional argument to the ``target``.
153 | :param map_args:
154 | A sequence whose elements are supplied as positional arguments (``*args``) to the ``target``.
155 | :param map_kwargs:
156 | A sequence whose elements are supplied as keyword arguments (``**kwargs``) to the ``target``.
157 | :param args:
158 | The argument tuple for ``target``, supplied after ``map_iter`` and ``map_args``.
159 |
160 | By default, it is an empty ``tuple``.
161 | :param kwargs:
162 | A dictionary of keyword arguments for ``target``.
163 |
164 | By default, it is an empty ``dict``.
165 | :param pass_state:
166 | Weather this process needs to access the state.
167 |
168 | If this is set to ``False``,
169 | then the ``state`` argument won't be provided to the ``target``.
170 |
171 | If this is set to ``True``,
172 | then a :py:class:`State` object is provided as the first Argument to the ``target``.
173 |
174 | Unlike :py:class:`Process` it is set to ``False`` by default.
175 | (To retain a similar API to in-built ``map()``)
176 | :param num_chunks:
177 | The number of worker nodes to use.
178 |
179 | By default, it is set to ``multiprocessing.cpu_count()``
180 | (The number of CPU cores on your system)
181 | :param lazy:
182 | Wheteher to return immediately put
183 | :return:
184 | The result is quite similar to ``map()`` in-built function.
185 |
186 | It returns a :py:class:`Iterable` which contatins,
187 | the return values of the ``target`` function,
188 | when applied to every item of the Iterables provided in the ``map_*`` arguments.
189 |
190 | The actual "processing" starts as soon as you call this function.
191 |
192 | The returned :py:class:`Iterable` only fetches the results from the worker processes.
193 |
194 | .. note::
195 | - If ``len(map_iter) != len(maps_args) != len(map_kwargs)``,
196 | then the results will be cut-off at the shortest Sequence.
197 |
198 | See :ref:`worker_map` for Examples.
199 | """
200 | if num_chunks is None:
201 | num_chunks = multiprocessing.cpu_count()
202 |
203 | lengths = [len(i) for i in (map_iter, map_args, map_kwargs) if i is not None]
204 | assert (
205 | lengths
206 | ), "At least one of `map_iter`, `map_args`, or `map_kwargs` must be provided as a non-empty Sequence."
207 |
208 | length = min(lengths)
209 |
210 | assert (
211 | length > num_chunks
212 | ), "`length`(%d) cannot be less than `num_chunks`(%d)" % (length, num_chunks)
213 |
214 | chunk_length, extra = divmod(length, num_chunks)
215 | if extra:
216 | chunk_length += 1
217 | task_id = util.generate_task_id((chunk_length, length, num_chunks))
218 |
219 | iter_chunks = util.make_chunks(map_iter, chunk_length, num_chunks)
220 | args_chunks = util.make_chunks(map_args, chunk_length, num_chunks)
221 | kwargs_chunks = util.make_chunks(map_kwargs, chunk_length, num_chunks)
222 |
223 | target_bytes = serializer.dumps_fn(target)
224 |
225 | for index in range(num_chunks):
226 | params = (
227 | iter_chunks[index],
228 | args_chunks[index],
229 | args,
230 | kwargs_chunks[index],
231 | kwargs,
232 | )
233 | task = (params, pass_state, self.namespace)
234 |
235 | self._task_push.send_multipart(
236 | [
237 | util.encode_chunk_id(task_id, index),
238 | target_bytes,
239 | serializer.dumps(task),
240 | ]
241 | )
242 |
243 | return SequenceTaskResult(self.server_address, task_id)
244 |
245 | def map(self, *args, **kwargs) -> list:
246 | return self.map_lazy(*args, **kwargs).as_list
247 |
248 | def __del__(self):
249 | try:
250 | self._task_push.close()
251 | util.close_zmq_ctx(self._zmq_ctx)
252 | except Exception:
253 | pass
254 |
--------------------------------------------------------------------------------
/zproc/task/worker.py:
--------------------------------------------------------------------------------
1 | from collections import Iterable
2 | from typing import Union, Callable
3 |
4 | import zmq
5 |
6 | from zproc import util, serializer
7 | from zproc.consts import EMPTY_MULTIPART
8 | from zproc.exceptions import RemoteException
9 | from zproc.state.state import State
10 | from .map_plus import map_plus
11 |
12 |
13 | def run_task(
14 | target: Callable, task: Iterable, state: State
15 | ) -> Union[list, RemoteException]:
16 | params, pass_state, namespace = task
17 | if pass_state:
18 | state.namespace = namespace
19 |
20 | def target_with_state(*args, **kwargs):
21 | return target(state, *args, **kwargs)
22 |
23 | target = target_with_state
24 |
25 | return map_plus(target, *params)
26 |
27 |
28 | def worker_process(server_address: str, send_conn):
29 | with util.socket_factory(zmq.PULL, zmq.PUSH) as (zmq_ctx, task_pull, result_push):
30 | server_meta = util.get_server_meta(zmq_ctx, server_address)
31 |
32 | try:
33 | task_pull.connect(server_meta.task_proxy_out)
34 | result_push.connect(server_meta.task_result_pull)
35 | state = State(server_address)
36 | except Exception:
37 | with send_conn:
38 | send_conn.send_bytes(serializer.dumps(RemoteException()))
39 | else:
40 | with send_conn:
41 | send_conn.send_bytes(b"")
42 |
43 | try:
44 | while True:
45 | msg = task_pull.recv_multipart()
46 | if msg == EMPTY_MULTIPART:
47 | return
48 | chunk_id, target_bytes, task_bytes = msg
49 |
50 | try:
51 | task = serializer.loads(task_bytes)
52 | target = serializer.loads_fn(target_bytes)
53 |
54 | result = run_task(target, task, state)
55 | except KeyboardInterrupt:
56 | raise
57 | except Exception:
58 | result = RemoteException()
59 | result_push.send_multipart([chunk_id, serializer.dumps(result)])
60 | except Exception:
61 | util.log_internal_crash("Worker process")
62 |
--------------------------------------------------------------------------------
/zproc/util.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pathlib
3 | import signal
4 | import struct
5 | import threading
6 | import time
7 | import uuid
8 | from collections import deque
9 | from contextlib import suppress, contextmanager, ExitStack
10 | from itertools import islice
11 | from textwrap import indent
12 | from traceback import format_exc
13 | from typing import Union, Iterable, Generator, Callable, Tuple, Sequence, Optional, Type
14 |
15 | import psutil
16 | import zmq
17 |
18 | from zproc import exceptions
19 | from zproc import serializer
20 | from zproc.__version__ import __version__
21 | from zproc.consts import (
22 | Msgs,
23 | Cmds,
24 | ServerMeta,
25 | DEFAULT_NAMESPACE,
26 | TASK_NONCE_LENGTH,
27 | TASK_INFO_FMT,
28 | CHUNK_INFO_FMT,
29 | TASK_ID_LENGTH,
30 | )
31 |
32 | IPC_BASE_DIR = pathlib.Path.home() / ".tmp" / "zproc"
33 | if not IPC_BASE_DIR.exists():
34 | IPC_BASE_DIR.mkdir(parents=True)
35 |
36 |
37 | def create_ipc_address(name: str) -> str:
38 | return "ipc://" + str(IPC_BASE_DIR / name)
39 |
40 |
41 | def get_server_meta(zmq_ctx: zmq.Context, server_address: str) -> ServerMeta:
42 | with zmq_ctx.socket(zmq.DEALER) as dealer:
43 | dealer.connect(server_address)
44 | return req_server_meta(dealer)
45 |
46 |
47 | _server_meta_req_cache = serializer.dumps(
48 | {Msgs.cmd: Cmds.get_server_meta, Msgs.namespace: DEFAULT_NAMESPACE}
49 | )
50 |
51 |
52 | def req_server_meta(dealer: zmq.Socket) -> ServerMeta:
53 | dealer.send(_server_meta_req_cache)
54 | server_meta = serializer.loads(dealer.recv())
55 | if server_meta.version != __version__:
56 | raise RuntimeError(
57 | "The server version didn't match. "
58 | "Please make sure the server (%r) is using the same version of ZProc as this client (%r)."
59 | % (server_meta.version, __version__)
60 | )
61 | return server_meta
62 |
63 |
64 | def to_catchable_exc(
65 | retry_for: Iterable[Union[signal.Signals, Type[BaseException]]]
66 | ) -> Generator[Type[BaseException], None, None]:
67 | if retry_for is None:
68 | return
69 |
70 | # catches all signals converted using `signal_to_exception()`
71 | yield exceptions.SignalException
72 |
73 | for e in retry_for:
74 | if isinstance(e, signal.Signals):
75 | exceptions.signal_to_exception(e)
76 | elif issubclass(e, BaseException):
77 | yield e
78 | else:
79 | raise ValueError(
80 | "The items of `retry_for` must either be a sub-class of `BaseException`, "
81 | f"or an instance of `signal.Signals`. Not `{e!r}`."
82 | )
83 |
84 |
85 | def bind_to_random_ipc(sock: zmq.Socket) -> str:
86 | address = "ipc://" + str(IPC_BASE_DIR / str(uuid.uuid1()))
87 | sock.bind(address)
88 | return address
89 |
90 |
91 | def bind_to_random_tcp(sock: zmq.Socket) -> str:
92 | port = sock.bind_to_random_port("tcp://*")
93 | address = "tcp://0.0.0.0:%d" % port
94 | return address
95 |
96 |
97 | def bind_to_random_address(sock: zmq.Socket) -> str:
98 | try:
99 | return bind_to_random_ipc(sock)
100 | except zmq.error.ZMQError:
101 | return bind_to_random_tcp(sock)
102 |
103 |
104 | def close_zmq_ctx(ctx: zmq.Context):
105 | ctx.destroy()
106 | ctx.term()
107 |
108 |
109 | def clean_process_tree(*signal_handler_args):
110 | """Stop all Processes in the current Process tree, recursively."""
111 | parent = psutil.Process()
112 | procs = parent.children(recursive=True)
113 | if procs:
114 | print(f"[ZProc] Cleaning up {parent.name()!r} ({os.getpid()})...")
115 |
116 | for p in procs:
117 | with suppress(psutil.NoSuchProcess):
118 | p.terminate()
119 | _, alive = psutil.wait_procs(procs, timeout=0.5) # 0.5 seems to work
120 | for p in alive:
121 | with suppress(psutil.NoSuchProcess):
122 | p.kill()
123 |
124 | try:
125 | signum = signal_handler_args[0]
126 | except IndexError:
127 | pass
128 | else:
129 | os._exit(signum)
130 |
131 |
132 | def make_chunks(seq: Optional[Sequence], length: int, num_chunks: int):
133 | if seq is None:
134 | return [None] * num_chunks
135 | else:
136 | return [seq[i * length : (i + 1) * length] for i in range(num_chunks)]
137 |
138 |
139 | def is_main_thread() -> bool:
140 | return threading.current_thread() == threading.main_thread()
141 |
142 |
143 | def create_zmq_ctx(*, linger=False) -> zmq.Context:
144 | ctx = zmq.Context()
145 | if not linger:
146 | ctx.setsockopt(zmq.LINGER, 0)
147 | return ctx
148 |
149 |
150 | def enclose_in_brackets(s: str) -> str:
151 | return f"<{s}>"
152 |
153 |
154 | def callable_repr(c: Callable) -> str:
155 | return repr(c.__module__ + "." + c.__qualname__)
156 |
157 |
158 | def generate_task_id(task_info: Tuple[int, int, int] = None) -> bytes:
159 | nonce = os.urandom(TASK_NONCE_LENGTH)
160 | if task_info is None:
161 | return nonce
162 | return nonce + struct.pack(TASK_INFO_FMT, *task_info)
163 |
164 |
165 | def deconstruct_task_id(task_id: bytes) -> Optional[tuple]:
166 | if len(task_id) == TASK_NONCE_LENGTH:
167 | return None
168 |
169 | return struct.unpack(TASK_INFO_FMT, task_id[TASK_NONCE_LENGTH:])
170 |
171 |
172 | def encode_chunk_id(task_id: bytes, index: int) -> bytes:
173 | return task_id + struct.pack(CHUNK_INFO_FMT, index)
174 |
175 |
176 | def decode_chunk_id(chunk: bytes) -> Tuple[bytes, int]:
177 | return (
178 | chunk[:TASK_ID_LENGTH],
179 | struct.unpack(CHUNK_INFO_FMT, chunk[TASK_ID_LENGTH:])[0],
180 | )
181 |
182 |
183 | def log_internal_crash(subsystem: str):
184 | basic_info = f"subsystem: {subsystem!r}\npid: {os.getpid()}"
185 | report = "\n\n".join((basic_info, format_exc()))
186 | report = indent(report, " " * 2)
187 | print(f"\n[ZProc] Internal crash report:\n{report}")
188 |
189 |
190 | @contextmanager
191 | def socket_factory(*sock_types):
192 | with ExitStack() as stack:
193 | ctx = stack.enter_context(zmq.Context())
194 | sockets = (stack.enter_context(ctx.socket(i)) for i in sock_types)
195 | yield (ctx, *sockets)
196 |
197 |
198 | @contextmanager
199 | def perf_counter():
200 | s = time.perf_counter()
201 | e = time.perf_counter()
202 | fr = e - s
203 | s = time.perf_counter()
204 | yield
205 | e = time.perf_counter()
206 | print((e - s), (e - s) / fr)
207 |
208 |
209 | def consume(iterator, n=None):
210 | """Advance the iterator n-steps ahead. If n is None, consume entirely."""
211 | # Use functions that consume iterators at C speed.
212 | if n is None:
213 | # feed the entire iterator into a zero-length deque
214 | deque(iterator, maxlen=0)
215 | else:
216 | # advance to the empty slice starting at position n
217 | next(islice(iterator, n, n), None)
218 |
219 |
220 | def strict_request_reply(msg, send: Callable, recv: Callable):
221 | """
222 | Ensures a strict req-reply loop,
223 | so that clients dont't receive out-of-order messages,
224 | if an exception occurs between request-reply.
225 | """
226 | try:
227 | send(msg)
228 | except Exception:
229 | raise
230 | try:
231 | return recv()
232 | except Exception:
233 | with suppress(zmq.error.Again):
234 | recv()
235 | raise
236 |
--------------------------------------------------------------------------------