├── .flake8
├── .gitignore
├── .pre-commit-config.yaml
├── CONTRIBUTING.md
├── LICENSE.md
├── MANIFEST.in
├── Makefile
├── README.md
├── bindep.txt
├── docs
├── Makefile
├── _static
│ ├── basic_receptor.png
│ ├── buffer_manager.png
│ ├── filebackedbuffer.png
│ ├── framedmessage.png
│ └── frames.png
├── conf.py
├── controller.rst
├── index.rst
├── install.rst
├── intro.rst
├── make.bat
├── managing.rst
├── messages.rst
├── plugins.rst
├── security.rst
└── source
│ ├── modules.rst
│ ├── receptor.buffers.rst
│ ├── receptor.connection.rst
│ ├── receptor.logstash_formatter.rst
│ ├── receptor.messages.rst
│ ├── receptor.rst
│ └── receptor.worker.rst
├── installer
├── install.yml
└── roles
│ └── receptor_install
│ ├── files
│ └── receptor.service
│ ├── tasks
│ └── main.yml
│ ├── templates
│ └── receptor_node.conf.j2
│ └── vars
│ └── main.yml
├── packaging
├── docker
│ ├── Dockerfile
│ ├── entrypoint.sh
│ └── receptor.conf
└── rpm
│ └── receptor.spec.j2
├── poetry.lock
├── pyproject.toml
├── receptor
├── __init__.py
├── __main__.py
├── bridgequeue.py
├── buffers
│ ├── __init__.py
│ └── file.py
├── config.py
├── connection
│ ├── __init__.py
│ ├── base.py
│ ├── manager.py
│ ├── sock.py
│ └── ws.py
├── controller.py
├── diagnostics.py
├── entrypoints.py
├── exceptions.py
├── fileio.py
├── logstash_formatter
│ ├── LICENSE
│ ├── __init__.py
│ └── logstash.py
├── messages
│ ├── __init__.py
│ ├── directive.py
│ └── framed.py
├── plugin_utils.py
├── receptor.py
├── router.py
├── serde.py
├── stats.py
├── work.py
└── worker
│ ├── __init__.py
│ └── demo.py
├── setup.py
├── test.ini
├── test
├── __init__.py
├── integration
│ └── test_main.py
├── perf
│ ├── __init__.py
│ ├── flat-mesh.yaml
│ ├── random-mesh.yaml
│ ├── test_ping.py
│ ├── test_ports.py
│ ├── test_route.py
│ ├── test_websockets.py
│ └── tree-mesh.yaml
└── unit
│ ├── __init__.py
│ ├── test_bridge_queue.py
│ ├── test_durable_buffer.py
│ ├── test_framedbuffer.py
│ ├── test_router.py
│ └── test_serde.py
└── tox.ini
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | ignore = E203, W503
3 | # line length is intentionally set to 80 here because black uses Bugbear
4 | # See https://github.com/psf/black/blob/master/README.md#line-length for more details
5 | max-line-length = 100
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Editor Stuff
2 | *~
3 | [._]*.s[a-v][a-z]
4 | [._]*.sw[a-p]
5 | [._]s[a-rt-v][a-z]
6 | [._]ss[a-gi-z]
7 | [._]sw[a-p]
8 | .vscode/*
9 | .idea/*
10 |
11 | # Byte-compiled / optimized / DLL files
12 | __pycache__/
13 | *.py[cod]
14 | *$py.class
15 |
16 | # C extensions
17 | *.so
18 |
19 | # Distribution / packaging
20 | .Python
21 | build/
22 | rpm-build/
23 | develop-eggs/
24 | dist/
25 | downloads/
26 | eggs/
27 | .eggs/
28 | lib/
29 | lib64/
30 | parts/
31 | sdist/
32 | var/
33 | wheels/
34 | *.egg-info/
35 | .installed.cfg
36 | *.egg
37 | MANIFEST
38 |
39 | # Installer logs
40 | pip-log.txt
41 | pip-delete-this-directory.txt
42 |
43 | # Unit test / coverage reports
44 | htmlcov/
45 | .tox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | .hypothesis/
53 | .pytest_cache/
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # pyenv
81 | .python-version
82 |
83 | # celery beat schedule file
84 | celerybeat-schedule
85 |
86 | # SageMath parsed files
87 | *.sage.py
88 |
89 | # Environments
90 | .env
91 | .venv
92 | env/
93 | venv/
94 | ENV/
95 | env.bak/
96 | venv.bak/
97 |
98 | # Spyder project settings
99 | .spyderproject
100 | .spyproject
101 |
102 | # Rope project settings
103 | .ropeproject
104 |
105 | # mkdocs documentation
106 | /site
107 |
108 | # mypy
109 | .mypy_cache/
110 | graph_*.dot
111 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | repos:
3 | - repo: https://github.com/pre-commit/pre-commit-hooks
4 | rev: v2.0.0
5 | hooks:
6 | - id: flake8
7 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Receptor Contributing Guidelines
2 |
3 | Hi there! We're excited to have you as a contributor.
4 |
5 | If you have questions about this document or anything not covered here? Come chat with us `#receptor` on irc.freenode.net
6 |
7 | ## Things to know prior to submitting code
8 |
9 | - All code and doc submissions are done through pull requests against the `devel` branch.
10 | - Bugfixes for a release should be submitted as a pull request against the
11 | release branch. Bugfixes in releases will selectively be merged back into
12 | devel.
13 | - Take care to make sure no merge commits are in the submission, and use `git rebase` vs `git merge` for this reason.
14 |
15 | ## Setting up your development environment
16 |
17 | We use [poetry](https://python-poetry.org) to develop **Receptor**.
18 |
19 | ```bash
20 | (host)$ poetry install
21 | ```
22 |
23 | ## Linting and Unit Tests
24 |
25 | * Use `flake8` for linting.
26 | * Use `pytest` for unit tests.
27 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Apache License
2 | ==============
3 |
4 | _Version 2.0, January 2004_
5 | _<>_
6 |
7 | ### Terms and Conditions for use, reproduction, and distribution
8 |
9 | #### 1. Definitions
10 |
11 | “License” shall mean the terms and conditions for use, reproduction, and
12 | distribution as defined by Sections 1 through 9 of this document.
13 |
14 | “Licensor” shall mean the copyright owner or entity authorized by the copyright
15 | owner that is granting the License.
16 |
17 | “Legal Entity” shall mean the union of the acting entity and all other entities
18 | that control, are controlled by, or are under common control with that entity.
19 | For the purposes of this definition, “control” means **(i)** the power, direct or
20 | indirect, to cause the direction or management of such entity, whether by
21 | contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
22 | outstanding shares, or **(iii)** beneficial ownership of such entity.
23 |
24 | “You” (or “Your”) shall mean an individual or Legal Entity exercising
25 | permissions granted by this License.
26 |
27 | “Source” form shall mean the preferred form for making modifications, including
28 | but not limited to software source code, documentation source, and configuration
29 | files.
30 |
31 | “Object” form shall mean any form resulting from mechanical transformation or
32 | translation of a Source form, including but not limited to compiled object code,
33 | generated documentation, and conversions to other media types.
34 |
35 | “Work” shall mean the work of authorship, whether in Source or Object form, made
36 | available under the License, as indicated by a copyright notice that is included
37 | in or attached to the work (an example is provided in the Appendix below).
38 |
39 | “Derivative Works” shall mean any work, whether in Source or Object form, that
40 | is based on (or derived from) the Work and for which the editorial revisions,
41 | annotations, elaborations, or other modifications represent, as a whole, an
42 | original work of authorship. For the purposes of this License, Derivative Works
43 | shall not include works that remain separable from, or merely link (or bind by
44 | name) to the interfaces of, the Work and Derivative Works thereof.
45 |
46 | “Contribution” shall mean any work of authorship, including the original version
47 | of the Work and any modifications or additions to that Work or Derivative Works
48 | thereof, that is intentionally submitted to Licensor for inclusion in the Work
49 | by the copyright owner or by an individual or Legal Entity authorized to submit
50 | on behalf of the copyright owner. For the purposes of this definition,
51 | “submitted” means any form of electronic, verbal, or written communication sent
52 | to the Licensor or its representatives, including but not limited to
53 | communication on electronic mailing lists, source code control systems, and
54 | issue tracking systems that are managed by, or on behalf of, the Licensor for
55 | the purpose of discussing and improving the Work, but excluding communication
56 | that is conspicuously marked or otherwise designated in writing by the copyright
57 | owner as “Not a Contribution.”
58 |
59 | “Contributor” shall mean Licensor and any individual or Legal Entity on behalf
60 | of whom a Contribution has been received by Licensor and subsequently
61 | incorporated within the Work.
62 |
63 | #### 2. Grant of Copyright License
64 |
65 | Subject to the terms and conditions of this License, each Contributor hereby
66 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
67 | irrevocable copyright license to reproduce, prepare Derivative Works of,
68 | publicly display, publicly perform, sublicense, and distribute the Work and such
69 | Derivative Works in Source or Object form.
70 |
71 | #### 3. Grant of Patent License
72 |
73 | Subject to the terms and conditions of this License, each Contributor hereby
74 | grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
75 | irrevocable (except as stated in this section) patent license to make, have
76 | made, use, offer to sell, sell, import, and otherwise transfer the Work, where
77 | such license applies only to those patent claims licensable by such Contributor
78 | that are necessarily infringed by their Contribution(s) alone or by combination
79 | of their Contribution(s) with the Work to which such Contribution(s) was
80 | submitted. If You institute patent litigation against any entity (including a
81 | cross-claim or counterclaim in a lawsuit) alleging that the Work or a
82 | Contribution incorporated within the Work constitutes direct or contributory
83 | patent infringement, then any patent licenses granted to You under this License
84 | for that Work shall terminate as of the date such litigation is filed.
85 |
86 | #### 4. Redistribution
87 |
88 | You may reproduce and distribute copies of the Work or Derivative Works thereof
89 | in any medium, with or without modifications, and in Source or Object form,
90 | provided that You meet the following conditions:
91 |
92 | * **(a)** You must give any other recipients of the Work or Derivative Works a copy of
93 | this License; and
94 | * **(b)** You must cause any modified files to carry prominent notices stating that You
95 | changed the files; and
96 | * **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
97 | all copyright, patent, trademark, and attribution notices from the Source form
98 | of the Work, excluding those notices that do not pertain to any part of the
99 | Derivative Works; and
100 | * **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
101 | Derivative Works that You distribute must include a readable copy of the
102 | attribution notices contained within such NOTICE file, excluding those notices
103 | that do not pertain to any part of the Derivative Works, in at least one of the
104 | following places: within a NOTICE text file distributed as part of the
105 | Derivative Works; within the Source form or documentation, if provided along
106 | with the Derivative Works; or, within a display generated by the Derivative
107 | Works, if and wherever such third-party notices normally appear. The contents of
108 | the NOTICE file are for informational purposes only and do not modify the
109 | License. You may add Your own attribution notices within Derivative Works that
110 | You distribute, alongside or as an addendum to the NOTICE text from the Work,
111 | provided that such additional attribution notices cannot be construed as
112 | modifying the License.
113 |
114 | You may add Your own copyright statement to Your modifications and may provide
115 | additional or different license terms and conditions for use, reproduction, or
116 | distribution of Your modifications, or for any such Derivative Works as a whole,
117 | provided Your use, reproduction, and distribution of the Work otherwise complies
118 | with the conditions stated in this License.
119 |
120 | #### 5. Submission of Contributions
121 |
122 | Unless You explicitly state otherwise, any Contribution intentionally submitted
123 | for inclusion in the Work by You to the Licensor shall be under the terms and
124 | conditions of this License, without any additional terms or conditions.
125 | Notwithstanding the above, nothing herein shall supersede or modify the terms of
126 | any separate license agreement you may have executed with Licensor regarding
127 | such Contributions.
128 |
129 | #### 6. Trademarks
130 |
131 | This License does not grant permission to use the trade names, trademarks,
132 | service marks, or product names of the Licensor, except as required for
133 | reasonable and customary use in describing the origin of the Work and
134 | reproducing the content of the NOTICE file.
135 |
136 | #### 7. Disclaimer of Warranty
137 |
138 | Unless required by applicable law or agreed to in writing, Licensor provides the
139 | Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
140 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
141 | including, without limitation, any warranties or conditions of TITLE,
142 | NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
143 | solely responsible for determining the appropriateness of using or
144 | redistributing the Work and assume any risks associated with Your exercise of
145 | permissions under this License.
146 |
147 | #### 8. Limitation of Liability
148 |
149 | In no event and under no legal theory, whether in tort (including negligence),
150 | contract, or otherwise, unless required by applicable law (such as deliberate
151 | and grossly negligent acts) or agreed to in writing, shall any Contributor be
152 | liable to You for damages, including any direct, indirect, special, incidental,
153 | or consequential damages of any character arising as a result of this License or
154 | out of the use or inability to use the Work (including but not limited to
155 | damages for loss of goodwill, work stoppage, computer failure or malfunction, or
156 | any and all other commercial damages or losses), even if such Contributor has
157 | been advised of the possibility of such damages.
158 |
159 | #### 9. Accepting Warranty or Additional Liability
160 |
161 | While redistributing the Work or Derivative Works thereof, You may choose to
162 | offer, and charge a fee for, acceptance of support, warranty, indemnity, or
163 | other liability obligations and/or rights consistent with this License. However,
164 | in accepting such obligations, You may act only on Your own behalf and on Your
165 | sole responsibility, not on behalf of any other Contributor, and only if You
166 | agree to indemnify, defend, and hold each Contributor harmless for any liability
167 | incurred by, or claims asserted against, such Contributor by reason of your
168 | accepting any such warranty or additional liability.
169 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include README.md
2 | include LICENSE.md
3 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | PYTHON ?= python
2 | ifeq ($(origin VIRTUAL_ENV), undefined)
3 | DIST_PYTHON ?= poetry run $(PYTHON)
4 | else
5 | DIST_PYTHON ?= $(PYTHON)
6 | endif
7 |
8 | NAME = receptor
9 | IMAGE_NAME ?= $(NAME)
10 | PIP_NAME = receptor
11 | VERSION := $(shell $(DIST_PYTHON) setup.py --version)
12 | ifeq ($(OFFICIAL),yes)
13 | RELEASE ?= 1
14 | else
15 | ifeq ($(origin RELEASE), undefined)
16 | RELEASE := 0.git_$(shell git rev-parse --short HEAD)
17 | endif
18 | endif
19 | DIST ?= el7
20 | ARCH ?= x86_64
21 |
22 | NVR_RELEASE = $(RELEASE).$(DIST)
23 | EPEL_DIST = $(subst el,epel-,$(DIST))
24 | NVR = $(NAME)-$(VERSION)-$(NVR_RELEASE)
25 | RESULTDIR = rpm-build/results-$(DIST)-$(ARCH)
26 |
27 | .PHONY: clean version release dist sdist image dev shell test \
28 | mock-rpm mock-srpm image docs
29 |
30 |
31 | clean:
32 | rm -rf dist
33 | rm -rf receptor.egg-info
34 | rm -rf rpm-build
35 |
36 | version:
37 | @echo $(VERSION)
38 |
39 | release:
40 | @echo $(RELEASE)
41 |
42 | dist:
43 | $(DIST_PYTHON) setup.py bdist_wheel --universal
44 |
45 | sdist: dist/$(NAME)-$(VERSION).tar.gz
46 |
47 | dist/$(NAME)-$(VERSION).tar.gz: $(shell find receptor -type f -name '*.py')
48 | $(DIST_PYTHON) setup.py sdist
49 |
50 | image: dist
51 | docker build --rm=true -t $(IMAGE_NAME) -f ./packaging/docker/Dockerfile .
52 |
53 | dev:
54 | poetry install
55 |
56 | shell:
57 | poetry shell
58 |
59 | test:
60 | tox
61 |
62 | docs:
63 | cd docs && make html
64 |
65 | dist/$(VERSION).tar.gz: dist/$(NAME)-$(VERSION).tar.gz
66 | cp dist/$(NAME)-$(VERSION).tar.gz dist/$(VERSION).tar.gz
67 |
68 | rpm-build/$(NVR).spec: packaging/rpm/$(NAME).spec.j2
69 | mkdir -p rpm-build
70 | ansible -i localhost, -c local all -m template \
71 | -a "src=packaging/rpm/$(NAME).spec.j2 dest=rpm-build/$(NVR).spec" \
72 | -e version=$(VERSION) \
73 | -e release=$(NVR_RELEASE)
74 |
75 | $(RESULTDIR)/$(NVR).src.rpm: dist/$(VERSION).tar.gz rpm-build/$(NVR).spec
76 | mock --buildsrpm --no-clean -r $(EPEL_DIST)-$(ARCH) --spec rpm-build/$(NVR).spec --sources dist/$(VERSION).tar.gz --resultdir $(RESULTDIR)
77 |
78 | $(RESULTDIR)/$(NVR).rpm: $(RESULTDIR)/$(NVR).src.rpm
79 | mock --rebuild --no-clean -r $(EPEL_DIST)-$(ARCH) $(RESULTDIR)/$(NVR).src.rpm --resultdir $(RESULTDIR)
80 | @touch $@
81 |
82 | mock-srpm: $(RESULTDIR)/$(NVR).src.rpm
83 |
84 | mock-rpm: $(RESULTDIR)/$(NVR).rpm
85 |
86 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Receptor
2 | ==============
3 |
4 | [](https://receptor.readthedocs.io/en/latest/)
5 | [](https://github.com/project-receptor/receptor/actions?query=workflow%3A%22Receptor+Testing%22)
6 |
7 |
8 | For the latest documentation see: [https://receptor.readthedocs.io](https://receptor.readthedocs.io/en/latest/)
9 |
10 | Get Involved
11 | ============
12 |
13 | * We use [GitHub issues](https://github.com/project-receptor/receptor/issues) to track bug report and feature ideas...
14 | * Want to contribute, check out our [guide](CONTRIBUTING.md)
15 | * Join us in the `#receptor` channel on Freenode IRC
16 |
--------------------------------------------------------------------------------
/bindep.txt:
--------------------------------------------------------------------------------
1 | # This is a cross-platform list tracking distribution packages needed by tests;
2 | # see http://docs.openstack.org/infra/bindep/ for additional information.
3 |
4 | gcc [test platform:rpm]
5 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SPHINXPROJ = Receptor
8 | SOURCEDIR = .
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/docs/_static/basic_receptor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/docs/_static/basic_receptor.png
--------------------------------------------------------------------------------
/docs/_static/buffer_manager.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/docs/_static/buffer_manager.png
--------------------------------------------------------------------------------
/docs/_static/filebackedbuffer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/docs/_static/filebackedbuffer.png
--------------------------------------------------------------------------------
/docs/_static/framedmessage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/docs/_static/framedmessage.png
--------------------------------------------------------------------------------
/docs/_static/frames.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/docs/_static/frames.png
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | import os
4 | import sys
5 |
6 | sys.path.insert(0, os.path.abspath("../"))
7 | sys.path.insert(0, os.path.abspath("."))
8 | sys.path.insert(0, os.path.abspath("./"))
9 |
10 | project = "Receptor"
11 | copyright = "2019, Red Hat"
12 | author = "Red Hat"
13 |
14 | version = ""
15 | release = "1.0.0"
16 |
17 | extensions = [
18 | "sphinx.ext.autodoc",
19 | "sphinx.ext.todo",
20 | "sphinx.ext.coverage",
21 | "sphinx.ext.doctest",
22 | "sphinx.ext.intersphinx",
23 | "sphinx.ext.viewcode",
24 | ]
25 |
26 | templates_path = ["_templates"]
27 |
28 | source_suffix = [".rst", ".md"]
29 |
30 | master_doc = "index"
31 |
32 | language = None
33 |
34 | exclude_patterns = []
35 |
36 | pygments_style = "sphinx"
37 |
38 | html_theme = "alabaster"
39 |
40 | html_static_path = ["_static"]
41 |
42 | htmlhelp_basename = "Receptordoc"
43 |
44 | latex_elements = {}
45 |
46 | latex_documents = [
47 | (master_doc, "Receptor.tex", "Receptor Documentation", "Red Hat", "manual",),
48 | ]
49 |
50 | man_pages = [(master_doc, "receptor", "Receptor Documentation", [author], 1)]
51 |
52 | texinfo_documents = [
53 | (
54 | master_doc,
55 | "Receptor",
56 | "Receptor Documentation",
57 | author,
58 | "Receptor",
59 | "One line description of project.",
60 | "Miscellaneous",
61 | ),
62 | ]
63 |
64 | todo_include_todos = True
65 |
--------------------------------------------------------------------------------
/docs/controller.rst:
--------------------------------------------------------------------------------
1 | .. _controller:
2 |
3 | Implementing the Controller
4 | ===========================
5 |
6 | As mentioned in :ref:`intro` Controllers are special types of *Nodes* that link a larger
7 | application or product to a Receptor mesh, their job is to send messages containing work
8 | to plugins on other nodes. You can have one or many of these on the mesh.
9 |
10 | Module details for the controller are available in :mod:`receptor.controller`
11 | The command line interface for Receptor also uses the controller, those entrypoints are
12 | great examples of how to start and run nodes :mod:`receptor.entrypoints`
13 |
14 | Writing a basic Controller
15 | --------------------------
16 |
17 | Receptor is written to use Python's asyncio system and because of that any interface needs to
18 | be asyncio aware. Lets look at the most basic usage of the Controller
19 |
20 | .. code-block:: python
21 | :linenos:
22 |
23 | import receptor
24 | config = receptor.ReceptorConfig() # Initialize the receptor configuration
25 | controller = receptor.Controller(config) # Initialize a Receptor Controller
26 | controller.run()
27 |
28 | The last line starts an asyncio event loop that will not return until the Controller is shut down.
29 |
30 | This isn't very interesting. We should start a listener that will accept connections from other
31 | receptor nodes
32 |
33 | .. code-block:: python
34 | :linenos:
35 | :lineno-start: 3
36 |
37 | controller = receptor.Controller(config)
38 | controller.enable_server(["rnp://0.0.0.0:8888"])
39 | controller.run()
40 |
41 | The server won't start listening until you call the ``run()`` method.
42 |
43 | Starting the service is useful for letting other Receptor nodes connect to the Controller but
44 | it's also possible to have the controller reach out to peers directly::
45 |
46 | controller.add_peer("rnp://10.0.0.1:8888")
47 |
48 | Once the event loop is started with ``run()`` the connection will be established.
49 |
50 | If the rest of your program is using asyncio you may already have an event loop, you can pass that
51 | in when you initialize the controller::
52 |
53 | controller = receptor.Controller(config, loop=my_event_loop)
54 |
55 | If you are managing the running of the event loop somewhere else in your code, you can pass the
56 | event loop to the Controller and omit calling ``.run()``
57 |
58 | Sending and Receiving Work
59 | --------------------------
60 |
61 | Now that we have the basics of initializing the Controller and starting a service lets look at
62 | sending and receiving work
63 |
64 | .. code-block:: python
65 |
66 | msgid = await controller.send(payload={"url": "https://github.com/status", "method": "GET"},
67 | recipient="othernode",
68 | directive="receptor_http:execute)
69 |
70 | When a message is constructed for sending via the Receptor mesh, an identifier is generated and
71 | returned. If you have sent several messages to the mesh you can use this identifier to distinguish
72 | responses from one request to another. You should have another task elsewhere that can receive
73 | responses, which we'll get to later. In the meantime
74 |
75 | .. code-block:: python
76 |
77 | message = await controller.recv()
78 | print(f"{message.header.in_response_to} : {message.payload.readall()})
79 |
80 | Plugins on Receptor nodes can send multiple messages in response to a single request and it's
81 | useful to know when a plugin is done performing work
82 |
83 | .. code-block:: python
84 |
85 | message = await controller.recv()
86 | print(f"{message.header.in_response_to} : {message.payload.readall()})
87 | if message.header.get("eof", False):
88 | print("Work finished!")
89 |
90 | Using asyncio tasks for Sending and Receiving
91 | ---------------------------------------------
92 |
93 | It may be necessary to set up asyncio tasks that are responsible for monitoring another part of the
94 | system for work that needs to be sent to Receptor nodes as well as watching for replies from the
95 | mesh.
96 |
97 | Tasks that send data
98 | ^^^^^^^^^^^^^^^^^^^^
99 |
100 | One approach that you can take is to create an async task that looks for work and pass that to the
101 | Controller's *run()* method. This way, when you are finished checking for work all you need to do
102 | is return and the Controller shuts down
103 |
104 | .. code-block:: python
105 | :linenos:
106 |
107 | def my_awesome_controller():
108 | async def relay_work():
109 | while True:
110 | work_thing = get_some_work()
111 | if work_thing:
112 | controller.send(
113 | payload=work_thing,
114 | recipient="my_other_receptor_node",
115 | directive="receptor_plugin:execute"
116 | )
117 | else:
118 | if am_i_done:
119 | break
120 | asyncio.sleep(0.1)
121 | print("All done, Controller shutting down!")
122 |
123 | config = receptor.ReceptorConfig()
124 | controller = receptor.Controller(config)
125 | controller.run(relay_work)
126 |
127 | Passing this task to *run()* is optional and it's entirely possible to just create this task and
128 | just have the runloop be persistent
129 |
130 | .. code-block:: python
131 | :linenos:
132 |
133 | def my_awesome_controller():
134 | async def relay_work():
135 | while True:
136 | work_thing = get_some_work()
137 | if work_thing:
138 | controller.send(
139 | payload=work_thing,
140 | recipient="my_other_receptor_node",
141 | directive="receptor_plugin:execute"
142 | )
143 | asyncio.sleep(0.1)
144 | print("All done, Controller shutting down!")
145 |
146 | config = receptor.ReceptorConfig()
147 | controller = receptor.Controller(config)
148 | controller.loop.create_task(relay_work)
149 | controller.run()
150 |
151 | Tasks that receive data
152 | ^^^^^^^^^^^^^^^^^^^^^^^
153 |
154 | Receiving data is very similar to sending data in that it allows you to take a few different
155 | approaches that match your use case. The Controller internally relies on an
156 | `AsyncIO Queue `_ if you have your own
157 | way of fetching events from this queue you can pass it to the Controller when you instantiate it
158 |
159 | .. code-block:: python
160 |
161 | controller = receptor.Controller(config, queue=my_asyncio_queue)
162 |
163 | Any responses will be received in the queue as they arrive to the Controller node. If you don't
164 | have an existing queue, one is automatically created for you and is available at *controller.queue*
165 |
166 | There is a helpful method on the controller that you can use to call and receive an event once they
167 | come in: :meth:`receptor.controller.Controller.recv` lets take a look at how we can create a task
168 | to consume an event one at a time from that queue
169 |
170 | .. code-block:: python
171 | :linenos:
172 |
173 | def my_awesome_controller():
174 | async def read_responses():
175 | while True:
176 | message = await controller.recv()
177 | sent_message_id = message.header.get("in_response_to", None)
178 | if message.payload and sent_message_id:
179 | print(f"I got a response and it said: {message.payload.readall().decode()}")
180 | print(f"It was in response to {sent_message_id}")
181 | if message.header.get("eof", False):
182 | print("The plugin was finished sending messages")
183 |
184 | config = receptor.ReceptorConfig()
185 | controller = receptor.Controller(config)
186 | controller.loop.create_task(read_responses())
187 | controller.run()
188 |
189 | Combine this response handling task with the message sending tasks from the section above and you
190 | have a complete Receptor controller system ready to be integrated.
191 |
192 | Getting information about the mesh
193 | ----------------------------------
194 |
195 | Each individual Node on a network has a view of the rest of the nodes and routes interconnecting
196 | the mesh and a Controller is no different. It may be necessary for a Controller's application to
197 | track and manage that information internally, as well as perform health and latency checks on other
198 | nodes.
199 |
200 | Route table and capabilities
201 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
202 |
203 | Once you have called *.run()* or have an event loop running, you'll be able to introspect:
204 |
205 | * Nodes that exist on the mesh::
206 |
207 | controller.receptor.router.get_nodes()
208 |
209 | * Edges between nodes::
210 |
211 | controller.receptor.router.get_edges()
212 |
213 | * Node capabilities::
214 |
215 | controller.receptor.router.node_capabilities()
216 |
217 | You can find more details in :mod:`receptor.router`
218 |
219 | Pings and Health Checks
220 | ^^^^^^^^^^^^^^^^^^^^^^^
221 |
222 | *ping* is a fundamental command to both node keepalives and health checking connected nodes
223 | anywhere in the mesh. Ping not only sends back timing information that can help you check
224 | mesh latency between the controller and a node, it also returns information about what work
225 | is currently being executed.
226 |
227 | Sending a ping works a lot like sending a normal message as in the examples above, except
228 | there is a special controller method for it: :meth:`receptor.controller.Controller.ping`::
229 |
230 | msg_id = controller.ping("some_other_node")
231 |
232 | The responses appear in the response queue if/when it's received. The *msg_id* will match
233 | the *in_response_to* key on the received message::
234 |
235 | message = await controller.recv()
236 | pprint(message.payload.readall().decode())
237 | {
238 | "initial_time":{
239 | "_type":"datetime.datetime",
240 | "value":1584663304.815156
241 | },
242 | "response_time":{
243 | "_type":"datetime.datetime",
244 | "value":1584663305.037581
245 | },
246 | "active_work":[
247 | {
248 | "id":291723580643927245576334265826187768140,
249 | "directive":"receptor_sleep:execute",
250 | "sender":"89abc47c-9d8f-41fe-be3b-23d655b0b73b"
251 | }
252 | ]
253 | }
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 |
2 | Receptor
3 | ========
4 |
5 | Receptor is a mesh network that connects services that have work to do with other
6 | nodes that can perform the work, typically because that work needs to be done closer
7 | to the target of the work which might not be directly accessible to the systems
8 | producing the work. It's intended to be used on any type of network topology across data
9 | centers and clouds, especially when passing between zones that might have
10 | different ingress patterns and security policies.
11 |
12 | .. toctree::
13 | :maxdepth: 3
14 | :caption: Contents:
15 |
16 | intro
17 | install
18 | controller
19 | plugins
20 | messages
21 | managing
22 | security
23 |
24 | Indices and tables
25 | ==================
26 |
27 | * :ref:`genindex`
28 | * :ref:`modindex`
29 | * :ref:`search`
30 |
--------------------------------------------------------------------------------
/docs/install.rst:
--------------------------------------------------------------------------------
1 | .. _install:
2 |
3 | Installing Receptor
4 | ===================
5 |
6 | Ansible
7 | -------
8 |
9 | In the ``/installer`` directory you'll find an Ansible Playbook and Role that will install
10 | Receptor nodes once you provide an ``inventory`` file that looks like this
11 |
12 | .. code-block:: yaml
13 |
14 | hostA server_port=32888
15 | hostB peers=["hostA"]
16 | hostC peers=["hostB", "hostA:32888"] server_port=8889
17 |
18 | This will deploy a 3 node cluster. There are some other ways of tuning the deployment on each node
19 | which you can see by reading the install playbook, run ansible to start the install::
20 |
21 | ansible-playbook -i inventory installer/install.yml
22 |
23 | If you would rather deploy manually the installer may still provide a good point of reference
24 | as it contains a template Receptor configuration file at
25 | ``installer/roles/receptor_install/templates`` and and a parameterized SystemD unit service
26 | file at ``installer/roles/receptor_install/files``
27 |
28 | Using pip
29 | ---------
30 |
31 | Python 3.6+ is required::
32 |
33 | $ pip install receptor
34 |
35 |
36 | From source
37 | -----------
38 |
39 | Check out the source code from `github `_::
40 |
41 | $ git clone git://github.com/project-receptor/receptor
42 |
43 | Then install::
44 |
45 | $ python setup.py install
46 |
47 | OR::
48 |
49 | $ pip install .
50 |
51 | .. _builddist:
52 |
53 | Building the distribution
54 | -------------------------
55 |
56 | To produce an installable ``wheel`` file::
57 |
58 | make dist
59 |
60 | To produce a distribution tarball::
61 |
62 | make sdist
63 |
64 | .. _buildcontimg:
65 |
66 | Building the base container image
67 | ---------------------------------
68 |
69 | TODO
70 |
71 | Building the RPM
72 | ----------------
73 |
74 | TODO
75 |
--------------------------------------------------------------------------------
/docs/intro.rst:
--------------------------------------------------------------------------------
1 | .. _intro:
2 |
3 | Introduction to Receptor
4 | ========================
5 |
6 | Nodes connect to each other either by accepting connections from other nodes
7 | or establishing connections to nodes themselves. Once these connections are
8 | established they are treated exactly the same in that they are connection
9 | points for the Receptor mesh network and messages pass over these transport
10 | links to their ultimate destinations regardless of how the connection was
11 | established.
12 |
13 | If a connection is lost between two nodes and is considered **stale**, messages
14 | that would normally be routed over that connection will be directed to a more
15 | optimal route. If a more optimal route doesn't exist then messages will be
16 | stored until connectivity is restored to that node, or a new route is created
17 | elsewhere.
18 |
19 | Once a message reaches its destination it's handed off to one of the installed
20 | plugins (see :ref:`plugins`) to perform the work. A plugin can send one or more
21 | response messages back to the sender via the mesh until it is finished.
22 |
23 | .. image:: _static/basic_receptor.png
24 | :align: center
25 | :target: https://github.com/projectreceptor/receptor
26 |
27 | .. _concepts:
28 |
29 | Terminology and Concepts
30 | ------------------------
31 |
32 | .. _term_nodes:
33 |
34 | Nodes
35 | ^^^^^
36 |
37 | A **node** is any instance of Receptor running in the mesh, regardless of the role
38 | or purpose it is serving. It is typically connected to other nodes either by
39 | establishing a connection to the other node or receiving a connection.
40 |
41 | Nodes broadcast their availability and capabilities to their neighbors on the
42 | mesh. These broadcasts are then relayed to other nodes such that any other node knows
43 | how to route traffic destined for a particular node or group of nodes.
44 |
45 | A Node is then capable of routing messages from one node that it knows about to
46 | other nodes that it knows about. If it is the target of the message, it can perform
47 | the work and send responses back to the sender.
48 |
49 | .. _term_controller:
50 |
51 | Controllers
52 | ^^^^^^^^^^^
53 |
54 | A **Controller** is a special type of **Node** in that it is responsible for sending
55 | work to other nodes. Typically a controller is some other system or program
56 | that imports the functionality from the **Receptor** codebase in order to link itself
57 | to the mesh of systems that could act on messages or work needed to be performed by
58 | the system.
59 |
60 | There can be one or many controllers present on the mesh. Since controllers are just
61 | another type of node, they can also be capable of performing work or routing messages.
62 |
63 | For more details on writing and integrating controllers see :ref:`controller`
64 |
65 | .. _term_work:
66 |
67 | Messages, Plugins, and Work
68 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
69 |
70 | The Receptor mesh is not opinionated about the messages that are
71 | sent across it. Controllers (that send messages and receive responses) and Plugins (that
72 | receive and act on messages) are expected to agree on a contract regarding the format.
73 |
74 | The Controller interface has multiple input options for taking data and formatting it into
75 | a message before sending it along to the mesh see :ref:`controller`
76 |
77 | Likewise, the plugin interface has multiple options to allow the plugin to inform the mesh
78 | node in how it wants the data delivered to the worker, see :ref:`plugins`
79 |
80 | Plugins are just python modules that expose a particular entrypoint. When the Receptor node
81 | starts up it will look for those plugins and import them and include their information
82 | when broadcasting the node's capabilities to its neighbors. All you should need to do to
83 | **install** a plugin is to use **pip** (or yum, dnf, apt, wherever you get your plugins from)
84 |
85 | .. _term_flow:
86 |
87 | Connections and Message Flow
88 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
89 |
90 | By default each Node is configured to start a listening service to accept incoming
91 | connections from other nodes. Each node can **also** be configured to connect
92 | directly to other nodes (called **peers**). This means that each node is likely to have
93 | possibly many connections to other nodes, and this is how the **mesh** is formed.
94 |
95 | Once these connections are established it makes NO difference in how and in which direction
96 | messages are routed. The mesh is formed and messages can be routed in any direction and
97 | through any other node.
98 |
99 | .. _term_reliability:
100 |
101 | Stability, Reliability, and Delivery Consistency
102 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
103 |
104 | Receptor makes every effort to deliver messages to the appropriate node, although it makes
105 | no guarantee on the path the message will take through the mesh. If a node along the route
106 | is down or offline then the message will be rerouted through other nodes. If there is no
107 | route available then the message will be stored on the last node that it made it to before
108 | route calculation failed to find another route. The amount of time a message will spend waiting
109 | on the mesh is configurable. If it reaches its timeout the message will be silently dropped.
110 |
111 | A Receptor node itself maintains an internal accounting of messages awaiting delivery but the
112 | mesh itself makes no guarantee that a message **will** be delivered or even ever make it to its
113 | destination. The Controller system should take steps to handle the case where it might not ever
114 | hear back from the mesh about a sent message.
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 | set SPHINXPROJ=Receptor
13 |
14 | if "%1" == "" goto help
15 |
16 | %SPHINXBUILD% >NUL 2>NUL
17 | if errorlevel 9009 (
18 | echo.
19 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
20 | echo.installed, then set the SPHINXBUILD environment variable to point
21 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
22 | echo.may add the Sphinx directory to PATH.
23 | echo.
24 | echo.If you don't have Sphinx installed, grab it from
25 | echo.http://sphinx-doc.org/
26 | exit /b 1
27 | )
28 |
29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
30 | goto end
31 |
32 | :help
33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
34 |
35 | :end
36 | popd
37 |
--------------------------------------------------------------------------------
/docs/managing.rst:
--------------------------------------------------------------------------------
1 | .. _managing:
2 |
3 | Managing the Receptor Mesh
4 | ==========================
5 |
--------------------------------------------------------------------------------
/docs/messages.rst:
--------------------------------------------------------------------------------
1 | .. _messages:
2 |
3 | Messages and Message Format
4 | ===========================
5 |
6 | Headers and Payloads
7 | --------------------
8 |
9 | At the highest level, most messages (both Controller-sent directives and Plugin replies) are
10 | composed of two parts: A header, and a payload.
11 |
12 | Headers contain metadata about the message, such as the destination and sender. The payload is the
13 | raw message containing the data that the node intended to send. Normally senders and consumers of
14 | the Receptor mesh don't have to worry about the details of the transmission format of Messages but
15 | Controller's need an understanding of some elements of the header and both Controllers and Plugins
16 | need to understand how payloads are accepted and transmitted.
17 |
18 | Headers on Response messages typically contain the following information
19 |
20 | in_response_to
21 | The message id of the original request that started the work.
22 |
23 | serial
24 | A number representing the numerical sequence of responses from a plugin.
25 |
26 | timestamp
27 | utc timestamp representing when the reply was sent.
28 |
29 | code
30 | If this value is 1, then a Receptor error occurred and the payload contains the details. A value
31 | of 0 represents a normal response that did not record a Receptor error.
32 |
33 | eof
34 | If true, this response represents the last message sent, it is emitted once the plugin returns
35 |
36 | Note that some messages will not have a payload and are represented only as headers. An EOF
37 | message response from a plugin is one such message, other messages used internally by Receptor
38 | also do not contain payloads.
39 |
40 | Message Transmission Architecture
41 | ---------------------------------
42 |
43 | Messages can be sent to the Receptor mesh either by calling the CLI command **receptor send** or
44 | invoking :meth:`receptor.controller.Controller.send`. Internally Messages are represented as a
45 | distinct data structure that encapsulates header information and the payload called a
46 | :class:`receptor.messages.framed.FramedMessage`
47 |
48 | .. image:: _static/framedmessage.png
49 | :align: center
50 | .. rst-class:: clear-both
51 |
52 | Message payloads aren't held in memory, depending on what kind of inputs are provided when sending
53 | a payload, they are stored on disk until they are ready to be sent over the network and are
54 | represented by an object called a :class:`receptor.messages.framed.FileBackedBuffer`
55 |
56 | .. image:: _static/filebackedbuffer.png
57 | :align: center
58 |
59 | Once a Message is ingested it is sent to the :mod:`receptor.router`. Each Node that we are
60 | connected to is allocated a buffer that outbound messages are put into while they wait their
61 | turn to be sent, this is represented by a :class:`receptor.buffers.file.DurableBuffer`. It is also
62 | likely that if we are relaying Messages from one node to another node that we'll shift the message
63 | into another **DurableBuffer** as soon as we receive it.
64 |
65 | .. image:: _static/buffer_manager.png
66 | :align: center
67 |
68 | When Messages are sent or received they are broken down into transmissable units involving a
69 | :class:`receptor.messages.framed.Frame` describing the actual data element that will follow it.
70 |
71 | The data element itself will be broken down into chunks in order to transmit smaller units.
72 | As a Receptor node receives data from the network these Frames and bytes are pushed into a
73 | :class:`receptor.messages.framed.FramedBuffer` where they are converted back into a
74 | **FramedMessage**. These are what are delivered as Response messages and it's what the work
75 | execution system itself uses to route work to plugins.
76 |
77 | .. image:: _static/frames.png
78 | :align: center
79 |
80 |
--------------------------------------------------------------------------------
/docs/plugins.rst:
--------------------------------------------------------------------------------
1 | .. _plugins:
2 |
3 | Writing a Receptor Plugin
4 | =========================
5 |
6 | The page :ref:`term_work` covers the basics of plugins and messages and how they related to nodes.
7 |
8 | A plugin is a python module that exposes one or more functions to the Receptor service. These
9 | plugins are installed in the same environment and on the same system as a Receptor node. The
10 | modules themselves don't have to run standalone, instead Receptor discovers them when it starts
11 | and advertises that to other nodes. When work comes into a node it includes some information
12 | about which plugin it should run and Receptor will import and call the requested function.
13 |
14 | Plugins don't specifically have to be aware that Runner is operating under asyncio, instead
15 | they are launched using a thread. Each plugin is given three parameters
16 |
17 | * The *message* data sent from the Controller
18 | * Any *configuration* specifically defined for it, which includes some details about the receptor
19 | node that sent the message
20 | * A *queue* to place responses into. These responses are delivered back to the Controller
21 | immediately when they are placed into the queue
22 |
23 | There are some great example plugins hosted in the
24 | `Receptor Github Organization `_
25 | The simplest ones are the `sleep plugin `_ and
26 | the `http plugin `_
27 |
28 | Module level documentation can be found in :mod:`receptor.plugin_utils`
29 |
30 | Defining the entrypoint
31 | -----------------------
32 |
33 | When writing a python module to be used as a Receptor plugin, there are two things you need to
34 | do, first is defining the *entrypoint*. This is typically done in your *setup.py* and recorded
35 | by the python environment at install time
36 |
37 | .. code-block:: python
38 |
39 | entry_points={
40 | 'receptor.worker':
41 | 'your_package_name = your_package_name.your_module',
42 | }
43 |
44 | Writing the entrypoint
45 | ----------------------
46 |
47 | The last thing you'll need to do is write the actual function that Receptor will call, when doing
48 | this you'll need to make sure this function is decorated with a special decorated that's provided
49 | by receptor itself
50 |
51 | .. code-block:: python
52 | :linenos:
53 |
54 | import receptor
55 |
56 | @receptor.plugin_export(payload_type=receptor.BYTES_PAYLOAD)
57 | def execute(message, config, result_queue):
58 | print(f"I just received {message.decode()}")
59 | result_queue.put("My plugin ran!")
60 |
61 | That's it! When a controller wants to use this plugin they'll use the directive
62 | **your_module:execute**
63 |
64 | In the above example we used the payload type *receptor.BYTES_PAYLOAD* which told Receptor that
65 | we wanted the incoming message delivered as `bytes`. You have 3 options here, depending on how you
66 | expect to handle the data:
67 |
68 | BYTES_PAYLOAD
69 | A value of type `bytes`.
70 |
71 | BUFFER_PAYLOAD
72 | A value that contains a `read()` method so that you can treat the payload as a file handle.
73 |
74 | FILE_PAYLOAD
75 | A temp file path that you can `open()` or do what you want with. This file will be removed once
76 | your function returns.
77 |
78 | Caveats and Expected Behavior
79 | -----------------------------
80 |
81 | Plugins are expected to not run forever, there's no way for a controller to send multiple messages
82 | to a single instance of a plugin, but a plugin can send multiple responses back to a controller.
83 |
84 | Multiple instances of a plugin can be run at the same time corresponding to the number of times
85 | the plugin is invoked by one or more controllers. There is an upper limit to the number of
86 | simultaneous instances allowed to run, other invocations will wait until older ones are finished.
87 |
88 | The plugin itself is intended to be mostly stateless and to receive all of the information it
89 | needs to perform work from a controller. If additional configuration is needed it's recommended
90 | that it read it fetch it at plugin invocation time.
91 |
92 | Whenever a plugin finishes by *returning*, a final *EOF* message is sent back to the controller
93 | so that caller knows when the work is done.
94 |
95 | Controllers can get details about work that is running a remote Receptor node by sending a *ping*
96 |
--------------------------------------------------------------------------------
/docs/security.rst:
--------------------------------------------------------------------------------
1 | .. _security:
2 |
3 | Mesh and Message Security
4 | =========================
--------------------------------------------------------------------------------
/docs/source/modules.rst:
--------------------------------------------------------------------------------
1 | receptor
2 | ========
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | receptor
8 |
--------------------------------------------------------------------------------
/docs/source/receptor.buffers.rst:
--------------------------------------------------------------------------------
1 | receptor.buffers package
2 | ========================
3 |
4 | Submodules
5 | ----------
6 |
7 | receptor.buffers.file module
8 | ----------------------------
9 |
10 | .. automodule:: receptor.buffers.file
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 |
16 | Module contents
17 | ---------------
18 |
19 | .. automodule:: receptor.buffers
20 | :members:
21 | :undoc-members:
22 | :show-inheritance:
23 |
--------------------------------------------------------------------------------
/docs/source/receptor.connection.rst:
--------------------------------------------------------------------------------
1 | receptor.connection package
2 | ===========================
3 |
4 | Submodules
5 | ----------
6 |
7 | receptor.connection.base module
8 | -------------------------------
9 |
10 | .. automodule:: receptor.connection.base
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | receptor.connection.manager module
16 | ----------------------------------
17 |
18 | .. automodule:: receptor.connection.manager
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | receptor.connection.sock module
24 | -------------------------------
25 |
26 | .. automodule:: receptor.connection.sock
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | receptor.connection.ws module
32 | -----------------------------
33 |
34 | .. automodule:: receptor.connection.ws
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 |
40 | Module contents
41 | ---------------
42 |
43 | .. automodule:: receptor.connection
44 | :members:
45 | :undoc-members:
46 | :show-inheritance:
47 |
--------------------------------------------------------------------------------
/docs/source/receptor.logstash_formatter.rst:
--------------------------------------------------------------------------------
1 | receptor.logstash\_formatter package
2 | ====================================
3 |
4 | Submodules
5 | ----------
6 |
7 | receptor.logstash\_formatter.logstash module
8 | --------------------------------------------
9 |
10 | .. automodule:: receptor.logstash_formatter.logstash
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 |
16 | Module contents
17 | ---------------
18 |
19 | .. automodule:: receptor.logstash_formatter
20 | :members:
21 | :undoc-members:
22 | :show-inheritance:
23 |
--------------------------------------------------------------------------------
/docs/source/receptor.messages.rst:
--------------------------------------------------------------------------------
1 | receptor.messages package
2 | =========================
3 |
4 | Submodules
5 | ----------
6 |
7 | receptor.messages.directive module
8 | ----------------------------------
9 |
10 | .. automodule:: receptor.messages.directive
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | receptor.messages.framed module
16 | -------------------------------
17 |
18 | .. automodule:: receptor.messages.framed
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 |
24 | Module contents
25 | ---------------
26 |
27 | .. automodule:: receptor.messages
28 | :members:
29 | :undoc-members:
30 | :show-inheritance:
31 |
--------------------------------------------------------------------------------
/docs/source/receptor.rst:
--------------------------------------------------------------------------------
1 | receptor package
2 | ================
3 |
4 | Subpackages
5 | -----------
6 |
7 | .. toctree::
8 |
9 | receptor.buffers
10 | receptor.connection
11 | receptor.logstash_formatter
12 | receptor.messages
13 | receptor.worker
14 |
15 | Submodules
16 | ----------
17 |
18 | receptor.bridgequeue module
19 | ---------------------------
20 |
21 | .. automodule:: receptor.bridgequeue
22 | :members:
23 | :undoc-members:
24 | :show-inheritance:
25 |
26 | receptor.config module
27 | ----------------------
28 |
29 | .. automodule:: receptor.config
30 | :members:
31 | :undoc-members:
32 | :show-inheritance:
33 |
34 | receptor.controller module
35 | --------------------------
36 |
37 | .. automodule:: receptor.controller
38 | :members:
39 | :undoc-members:
40 | :show-inheritance:
41 |
42 | receptor.entrypoints module
43 | ---------------------------
44 |
45 | .. automodule:: receptor.entrypoints
46 | :members:
47 | :undoc-members:
48 | :show-inheritance:
49 |
50 | receptor.exceptions module
51 | --------------------------
52 |
53 | .. automodule:: receptor.exceptions
54 | :members:
55 | :undoc-members:
56 | :show-inheritance:
57 |
58 | receptor.plugin\_utils module
59 | -----------------------------
60 |
61 | .. automodule:: receptor.plugin_utils
62 | :members:
63 | :undoc-members:
64 | :show-inheritance:
65 |
66 | receptor.receptor module
67 | ------------------------
68 |
69 | .. automodule:: receptor.receptor
70 | :members:
71 | :undoc-members:
72 | :show-inheritance:
73 |
74 | receptor.router module
75 | ----------------------
76 |
77 | .. automodule:: receptor.router
78 | :members:
79 | :undoc-members:
80 | :show-inheritance:
81 |
82 | receptor.serde module
83 | ---------------------
84 |
85 | .. automodule:: receptor.serde
86 | :members:
87 | :undoc-members:
88 | :show-inheritance:
89 |
90 | receptor.stats module
91 | ---------------------
92 |
93 | .. automodule:: receptor.stats
94 | :members:
95 | :undoc-members:
96 | :show-inheritance:
97 |
98 | receptor.work module
99 | --------------------
100 |
101 | .. automodule:: receptor.work
102 | :members:
103 | :undoc-members:
104 | :show-inheritance:
105 |
106 |
107 | Module contents
108 | ---------------
109 |
110 | .. automodule:: receptor
111 | :members:
112 | :undoc-members:
113 | :show-inheritance:
114 |
--------------------------------------------------------------------------------
/docs/source/receptor.worker.rst:
--------------------------------------------------------------------------------
1 | receptor.worker package
2 | =======================
3 |
4 | Submodules
5 | ----------
6 |
7 | receptor.worker.demo module
8 | ---------------------------
9 |
10 | .. automodule:: receptor.worker.demo
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 |
16 | Module contents
17 | ---------------
18 |
19 | .. automodule:: receptor.worker
20 | :members:
21 | :undoc-members:
22 | :show-inheritance:
23 |
--------------------------------------------------------------------------------
/installer/install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Deploy Receptor Mesh Network
3 | hosts: all
4 | vars:
5 | # could be "1.0", etc.
6 | receptor_version: git+https://github.com/project-receptor/receptor
7 | roles:
8 | - role: receptor_install
9 | become: true
10 |
--------------------------------------------------------------------------------
/installer/roles/receptor_install/files/receptor.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Receptor Node Service
3 |
4 | [Service]
5 | ExecStart=/usr/local/bin/receptor --node-id=%i -c /etc/receptor/receptor-%i.conf node
6 |
7 | [Install]
8 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/installer/roles/receptor_install/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Make sure Python 3 is installed
3 | package:
4 | name: python3
5 | state: present
6 |
7 | - name: Make sure Python 3 pip is installed
8 | package:
9 | name: python3-pip
10 | state: present
11 |
12 | - name: Install git if needed
13 | package:
14 | name: git
15 | state: present
16 | when: "'git' in receptor_version"
17 |
18 | - name: Install Receptor Package
19 | pip:
20 | name: "{{ receptor_version }}"
21 | state: forcereinstall
22 | executable: "pip3"
23 |
24 | # - name: Install Receptor Plugins
25 | # pip:
26 | # name: "{{ item }}"
27 | # state: forcereinstall
28 | # executable: "pip3"
29 | # loop: "{{ install_plugins | default([])}}"
30 |
31 | - name: Add receptor user
32 | user:
33 | name: "{{ receptor_user }}"
34 |
35 | - name: Ensure /etc/receptor exists
36 | file:
37 | path: /etc/receptor
38 | state: directory
39 | owner: "{{ receptor_user }}"
40 | mode: 0640
41 |
42 | - name: Create the receptor database directory
43 | file:
44 | path: /var/lib/receptor
45 | state: directory
46 | owner: "{{ receptor_user }}"
47 | mode: 0640
48 |
49 | - name: Install Receptor Configuration File
50 | template:
51 | src: receptor_node.conf.j2
52 | dest: /etc/receptor/receptor-{{ ansible_hostname }}.conf
53 | owner: "{{ receptor_user }}"
54 | mode: 0640
55 |
56 | - name: Add peer definitions to receptor configuration
57 | blockinfile:
58 | path: /etc/receptor/receptor-{{ ansible_hostname }}.conf
59 | block: |
60 | peers={{ node_peers | join(",") }}
61 | when: node_peers is defined and node_peers|length > 0
62 |
63 | - name: Install systemd units
64 | copy:
65 | src: "receptor.service"
66 | dest: "/etc/systemd/system/receptor@.service"
67 | mode: 0644
68 |
69 | - name: Reload systemd units
70 | systemd:
71 | daemon_reload: true
72 |
73 | - name: Start and enable a receptor node with the current hostname
74 | systemd:
75 | name: "receptor@{{ ansible_hostname }}"
76 | state: started
77 | enabled: true
78 |
--------------------------------------------------------------------------------
/installer/roles/receptor_install/templates/receptor_node.conf.j2:
--------------------------------------------------------------------------------
1 | [default]
2 | debug={{ server_debug|bool|default("False") }}
3 |
4 | [node]
5 | listen={{ server_address | default("0.0.0.0") }}:{{ service_port | default("8888") }}
6 | server_disable={{ server_disable|bool|default("False") }}
7 |
--------------------------------------------------------------------------------
/installer/roles/receptor_install/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | receptor_user: receptor
3 | service_port: 8888
4 | server_address: 0.0.0.0
5 | server_disable: false
6 | server_debug: false
7 |
--------------------------------------------------------------------------------
/packaging/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3 AS builder
2 | COPY receptor setup.* LICENSE.md README.md /receptor/
3 | WORKDIR /receptor
4 | RUN python setup.py bdist_wheel
5 |
6 | FROM fedora:31
7 |
8 | COPY --from=builder /receptor/dist/receptor-*.whl /tmp/
9 | ADD https://github.com/krallin/tini/releases/latest/download/tini /bin/tini
10 | ADD packaging/docker/entrypoint.sh /bin/entrypoint
11 | ADD packaging/docker/receptor.conf /tmp/receptor.conf
12 |
13 | RUN dnf update -y &&\
14 | dnf install -y python3 python3-pip &&\
15 | dnf clean all
16 |
17 | RUN chmod +x /bin/tini /bin/entrypoint &&\
18 | rm -rf /var/cache/yum
19 |
20 | RUN pip3 install --no-cache-dir python-dateutil &&\
21 | pip3 install --no-cache-dir /tmp/receptor-*.whl &&\
22 | rm /tmp/receptor-*.whl
23 |
24 | RUN mkdir /var/lib/receptor
25 | VOLUME /var/lib/receptor
26 |
27 | ENV LANG=en_US.UTF-8
28 | ENV LANGUAGE=en_US:en
29 | ENV LC_ALL=en_US.UTF-8
30 | ENV HOME=/var/lib/receptor
31 | EXPOSE 8888/tcp
32 | WORKDIR /var/lib/receptor
33 | ENTRYPOINT ["entrypoint"]
34 | CMD ["receptor", "-c", "/var/lib/receptor/receptor.conf", "node"]
35 |
--------------------------------------------------------------------------------
/packaging/docker/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # In OpenShift, containers are run as a random high number uid
4 | # that doesn't exist in /etc/passwd, but Ansible module utils
5 | # require a named user. So if we're in OpenShift, we need to make
6 | # one before Ansible runs.
7 | if [ `id -u` -ge 500 ] || [ -z "${CURRENT_UID}" ]; then
8 |
9 | cat << EOF > /tmp/passwd
10 | root:x:0:0:root:/root:/bin/bash
11 | receptor:x:`id -u`:`id -g`:,,,:/receptor:/bin/bash
12 | EOF
13 |
14 | cat /tmp/passwd > /etc/passwd
15 | rm /tmp/passwd
16 | fi
17 |
18 | if [ ! -f /var/lib/receptor/receptor.conf ]; then
19 | cp /tmp/receptor.conf /var/lib/receptor/receptor.conf
20 | fi
21 |
22 | exec tini -- "${@}"
23 |
--------------------------------------------------------------------------------
/packaging/docker/receptor.conf:
--------------------------------------------------------------------------------
1 | [default]
2 | data_dir=/receptor/data
3 | debug=True
4 |
--------------------------------------------------------------------------------
/packaging/rpm/receptor.spec.j2:
--------------------------------------------------------------------------------
1 | %define name receptor
2 | %define version {{ version }}
3 | %define release {{ release }}
4 |
5 | Summary: UNKNOWN
6 | Name: %{name}
7 | Version: %{version}
8 | Release: %{release}
9 | Source0: https://github.com/project-receptor/%{name}/archive/%{version}.tar.gz
10 |
11 | License: Apache
12 | Group: Development/Libraries
13 | BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
14 | Prefix: %{_prefix}
15 | BuildArch: noarch
16 | Vendor: Red Hat
17 | Url: https://github.com/project-receptor/receptor
18 |
19 | BuildRequires: python3
20 | BuildRequires: python3-setuptools
21 | Requires: python3
22 | Requires: python3-setuptools
23 | Requires: python3-aiohttp
24 | Requires: python3-prometheus-client
25 | Requires: python3-dateutil
26 |
27 | %description
28 | Receptor
29 | ==============
30 | Project Receptor is a flexible multi-service relayer with remote execution and orchestration capabilities linking controllers with executors across a mesh of nodes. For the latest documentation see: [https://receptor.readthedocs.io](https://receptor.readthedocs.io/en/latest/)
31 |
32 | %prep
33 | %setup -n %{name}-%{version} -n %{name}-%{version}
34 |
35 | %build
36 | python3 setup.py build
37 |
38 | %install
39 | python3 setup.py install --single-version-externally-managed -O1 --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES
40 |
41 | %clean
42 | rm -rf $RPM_BUILD_ROOT
43 |
44 | %files -f INSTALLED_FILES
45 | %defattr(-,root,root)
46 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = ["poetry>=1.0.5"]
3 | build-backend = "poetry.masonry.api"
4 |
5 | [tool.poetry]
6 | authors = ["Matthew Jones ", "Jesse Jaggars "]
7 | description = "A flexible multi-service relayer with remote execution and orchestration capabilities linking controllers with executors across a mesh of nodes."
8 | license = "Apache-2.0"
9 | name = "receptor"
10 | repository = "https://github.com/project-receptor/receptor"
11 | version = "1.0.0"
12 |
13 | [tool.poetry.dependencies]
14 | aiohttp = ">= 3.6.2, < 4.0"
15 | prometheus_client = ">= 0.7.1, < 0.9"
16 | python = "^3.6"
17 |
18 | [tool.poetry.dev-dependencies]
19 | attrs = "^19.3.0"
20 | click = "^7.0"
21 | flake8 = "^3.7.9"
22 | pylint = "^2.4.4"
23 | pyparsing = "^2.4.5"
24 | pytest = "^5.3.2"
25 | pytest-asyncio = "^0.10.0"
26 | pyyaml = "^5.2"
27 | requests = "^2.22.0"
28 | wait-for = "^1.1.1"
29 | receptor-affinity = { git = "https://github.com/project-receptor/affinity.git" }
30 | tox = "^3.14.5"
31 | yamllint = "^1.20.0"
32 | black = "^19.10b0"
33 | psutil = "^5.7.0"
34 | sphinx = "^2.4.4"
35 |
36 | [tool.poetry.scripts]
37 | receptor = 'receptor.__main__:main'
38 |
39 | [tool.black]
40 | line-length = 100
41 | target-version = ['py36', 'py37', 'py38']
42 | exclude = '''
43 | /(
44 | \.eggs
45 | | \.git
46 | | \.hg
47 | | \.mypy_cache
48 | | \.tox
49 | | \.venv
50 | | _build
51 | | buck-out
52 | | build
53 | | dist
54 | | docs
55 | | installer
56 | | packaging
57 | )/
58 | '''
59 |
--------------------------------------------------------------------------------
/receptor/__init__.py:
--------------------------------------------------------------------------------
1 | from .controller import Controller # noqa
2 | from .config import ReceptorConfig # noqa
3 | from .plugin_utils import plugin_export, BYTES_PAYLOAD, BUFFER_PAYLOAD, FILE_PAYLOAD # noqa
4 |
--------------------------------------------------------------------------------
/receptor/__main__.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 | import logging.config
4 | import sys
5 |
6 | from .config import ReceptorConfig
7 | from .diagnostics import log_buffer
8 | from .logstash_formatter.logstash import LogstashFormatter
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | def main(args=None):
14 |
15 | try:
16 | config = ReceptorConfig(args)
17 | except Exception as e:
18 | logger.error("An error occured while validating the configuration options:\n%s" % (str(e),))
19 | sys.exit(1)
20 |
21 | logging.config.dictConfig(
22 | {
23 | "version": 1,
24 | "disable_existing_loggers": False,
25 | "formatters": {
26 | "simple": {
27 | "format": "{levelname} {asctime} {node_id} {module} {message}",
28 | "style": "{",
29 | },
30 | "structured": {"()": LogstashFormatter},
31 | },
32 | "handlers": {
33 | "console": {
34 | "class": "logging.StreamHandler",
35 | "formatter": "structured"
36 | if config.default_logging_format == "structured"
37 | else "simple",
38 | }
39 | },
40 | "loggers": {
41 | "receptor": {
42 | "handlers": ["console"],
43 | "level": "DEBUG" if config.default_debug else "WARN",
44 | }
45 | },
46 | }
47 | )
48 |
49 | def _f(record):
50 | record.node_id = config.default_node_id
51 | if record.levelno == logging.ERROR:
52 | log_buffer.appendleft(record)
53 | return True
54 |
55 | for h in logging.getLogger("receptor").handlers:
56 | h.addFilter(_f)
57 |
58 | try:
59 | config.go()
60 | except asyncio.CancelledError:
61 | pass
62 | except Exception:
63 | logger.exception("main: an error occured while running receptor")
64 | sys.exit(1)
65 |
66 |
67 | if __name__ == "__main__":
68 | # We were run with python -m
69 | main()
70 |
--------------------------------------------------------------------------------
/receptor/bridgequeue.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import queue
3 |
4 |
5 | class BridgeQueue(queue.Queue):
6 | """
7 | BridgeQueue is a queue.Queue subclass intended to 'bridge' a real thread
8 | and a coroutine. Where the thread is the producer and the coroutine is
9 | the consumer.
10 |
11 | The queue implements the async iterator protocol for the consuming
12 | coroutine and exposes the normal queue.Queue for threads.
13 |
14 | Additionally, there is a sentinel value assigned to the queue that can be
15 | used to indicate when iteration should cease.
16 | """
17 |
18 | sentinel = object()
19 |
20 | def __aiter__(self):
21 | return self
22 |
23 | async def __anext__(self):
24 | sleep_time = 0.0
25 | while True:
26 | try:
27 | item = self.get_nowait()
28 | sleep_time = 0.0
29 | if item is self.sentinel:
30 | raise StopAsyncIteration
31 | else:
32 | return item
33 | except queue.Empty:
34 | await asyncio.sleep(sleep_time)
35 | sleep_time = min(1.0, sleep_time + 0.1)
36 |
37 | @classmethod
38 | def one(cls, item):
39 | """
40 | Constructs a BridgeQueue with the single provided item followed by
41 | the sentinel value. This function does not block.
42 | """
43 | q = cls()
44 | q.put_nowait(item)
45 | q.close()
46 | return q
47 |
48 | def close(self):
49 | self.put_nowait(self.sentinel)
50 |
51 | def read_from(self, path, chunk_size=2 ** 12):
52 | """
53 | Reads from a file-like object in chunk_size blocks and puts the bytes
54 | into the queue.
55 |
56 | Once the file has been read completely, the queue's sentinel value is
57 | placed into the queue, signaling to the consumer that all data has
58 | been read.
59 | """
60 | with open(path, "rb") as fp:
61 | chunk = fp.read(chunk_size)
62 | while chunk:
63 | self.put(chunk)
64 | chunk = fp.read(chunk_size)
65 | self.put(self.sentinel)
66 |
--------------------------------------------------------------------------------
/receptor/buffers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/receptor/buffers/__init__.py
--------------------------------------------------------------------------------
/receptor/buffers/file.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import datetime
3 | import logging
4 | import os
5 | import uuid
6 | from collections import defaultdict
7 | from json.decoder import JSONDecodeError
8 |
9 | from .. import fileio
10 | from .. import serde as json
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 |
15 | class DurableBuffer:
16 | def __init__(self, dir_, key, loop, write_time=1.0):
17 | self._base_path = os.path.join(os.path.expanduser(dir_))
18 | self._message_path = os.path.join(self._base_path, "messages")
19 | self._manifest_path = os.path.join(self._base_path, f"manifest-{key}")
20 | self._loop = loop
21 | self.q = asyncio.Queue(loop=self._loop)
22 | self.deferrer = fileio.Deferrer(loop=self._loop)
23 | self._manifest_lock = asyncio.Lock(loop=self._loop)
24 | self._manifest_dirty = asyncio.Event(loop=self._loop)
25 | self._manifest_clean = asyncio.Event(loop=self._loop)
26 | self._write_time = write_time
27 | self.ready = asyncio.Event(loop=self._loop)
28 | self._loop.create_task(self.start_manifest())
29 |
30 | def clean(self):
31 | self._manifest_dirty.clear()
32 | self._manifest_clean.set()
33 |
34 | def dirty(self):
35 | self._manifest_dirty.set()
36 | self._manifest_clean.clear()
37 |
38 | async def start_manifest(self):
39 | try:
40 | os.makedirs(self._message_path, mode=0o700)
41 | except Exception:
42 | pass
43 |
44 | loaded_items = await self._read_manifest()
45 |
46 | for item in loaded_items:
47 | await self.q.put(item)
48 |
49 | self.ready.set()
50 | self._loop.create_task(self.manifest_writer(self._write_time))
51 |
52 | async def put(self, framed_message):
53 | await self.ready.wait()
54 | path = os.path.join(self._message_path, str(uuid.uuid4()))
55 | item = {
56 | "path": path,
57 | "expire_time": datetime.datetime.utcnow() + datetime.timedelta(minutes=5),
58 | }
59 |
60 | if isinstance(framed_message, bytes):
61 | await fileio.write(path, framed_message)
62 | else:
63 | await fileio.writelines(path, framed_message)
64 |
65 | await self.put_ident(item)
66 |
67 | async def put_ident(self, ident):
68 | await self.q.put(ident)
69 | self.dirty()
70 |
71 | async def get(self):
72 | await self.ready.wait()
73 | while True:
74 | item = await self.q.get()
75 | self.dirty()
76 | try:
77 | if self.is_expired(item):
78 | await self.expire(item)
79 | continue
80 | return item
81 | except (TypeError, KeyError):
82 | logger.debug(
83 | "Something bad was in the durable buffer manifest: %s", item, exc_info=True
84 | )
85 |
86 | async def _read_manifest(self):
87 | try:
88 | data = await fileio.read(self._manifest_path, mode="r")
89 | except FileNotFoundError:
90 | return []
91 | else:
92 | try:
93 | return json.loads(data)
94 | except JSONDecodeError:
95 | logger.error("failed to decode manifest: %s", data)
96 | except Exception:
97 | logger.exception("Unknown failure in decoding manifest: %s", data)
98 | finally:
99 | return []
100 |
101 | def _remove_path(self, path):
102 | if os.path.exists(path):
103 | os.remove(path)
104 | else:
105 | logger.info("Can't remove {}, doesn't exist".format(path))
106 |
107 | def is_expired(self, item):
108 | return item["expire_time"] < datetime.datetime.utcnow()
109 |
110 | async def expire(self, item):
111 | # TODO: we should do something more than just log expirations
112 | # Consider sending a message to the sender
113 | logger.info("Expiring message %s", item["path"])
114 | await self._deferrer.defer(self._remove_path, item["path"])
115 |
116 | async def expire_all(self):
117 | async with self._manifest_lock:
118 | old, self.q = self.q, asyncio.Queue(loop=self._loop)
119 | while old.qsize() > 0:
120 | item = await old.get()
121 | if self.is_expired(item):
122 | await self.expire(item)
123 | else:
124 | await self.q.put(item)
125 | self.dirty()
126 |
127 | async def manifest_writer(self, write_time):
128 | while True:
129 | await self._manifest_dirty.wait()
130 | async with self._manifest_lock:
131 | try:
132 | data = json.dumps(list(self.q._queue))
133 | await fileio.write(self._manifest_path, data, mode="w")
134 | self.clean()
135 | except Exception:
136 | logger.exception("Failed to write manifest for %s", self._manifest_path)
137 | await asyncio.sleep(write_time)
138 |
139 |
140 | class FileBufferManager(defaultdict):
141 | def __init__(self, path, loop=asyncio.get_event_loop()):
142 | self.path = path
143 | self.loop = loop
144 |
145 | def __missing__(self, key):
146 | self[key] = DurableBuffer(self.path, key, self.loop)
147 | return self[key]
148 |
--------------------------------------------------------------------------------
/receptor/config.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import configparser
3 | import logging
4 | import os
5 | import ssl
6 |
7 | from .entrypoints import run_as_node, run_as_ping, run_as_send, run_as_status
8 | from .exceptions import ReceptorRuntimeError, ReceptorConfigError
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 | SINGLETONS = {}
13 | SUBCOMMAND_EXTRAS = {
14 | "node": {"hint": "Run a Receptor node", "entrypoint": run_as_node, "is_ephemeral": False},
15 | "ping": {"hint": "Ping a Receptor node", "entrypoint": run_as_ping, "is_ephemeral": True},
16 | "send": {"hint": "Send a directive to a node", "entrypoint": run_as_send, "is_ephemeral": True},
17 | "status": {
18 | "hint": "Display status of the Receptor network",
19 | "entrypoint": run_as_status,
20 | "is_ephemeral": True,
21 | },
22 | }
23 |
24 |
25 | class ConfigOption:
26 | def __init__(self, value, value_type, listof=None):
27 | self.value = value
28 | self.value_type = value_type
29 | self.listof = listof
30 |
31 |
32 | class ReceptorConfig:
33 | """
34 | The primary configuration object for Receptor
35 |
36 | This will be passed to the :class:`receptor.controller.Controller` in order to set up certain
37 | properties of the connection. An instance of this class is also responsible for reading and
38 | parsing the Receptor config file and any environment variables. Overrides can be specified by
39 | the caller by passing them in a dictionary to args::
40 |
41 | config = receptor.ReceptorConfig(args=dict(default_config="/opt/receptor.conf"))
42 | config.default_data_dir = "/var/run/"
43 | controller = receptor.Controller(config)
44 |
45 | Some options are only relevant when running as a node from the command line. When invoking
46 | the :class:`receptor.controller.Controller` interface in your own code, options such as peers
47 | and listen addresses will be set up using Controller methods.
48 | """
49 |
50 | def __init__(self, args=None):
51 | self._config_options = {}
52 | self._cli_args = argparse.ArgumentParser("receptor")
53 | self._cli_sub_args = self._cli_args.add_subparsers()
54 | self._parsed_args = None
55 | self._config_file = configparser.ConfigParser(allow_no_value=True, delimiters=("=",))
56 | self._is_ephemeral = False
57 |
58 | # Default options, which apply to all sub-commands.
59 | self.add_config_option(
60 | section="default",
61 | key="node_id",
62 | default_value="",
63 | value_type="str",
64 | hint="""Set/override node identifier. If unspecified here or in a config file,
65 | one will be automatically generated.""",
66 | )
67 | self.add_config_option(
68 | section="default",
69 | key="config",
70 | short_option="-c",
71 | default_value="/etc/receptor/receptor.conf",
72 | value_type="path",
73 | hint="Path to the Receptor configuration file.",
74 | )
75 | self.add_config_option(
76 | section="default",
77 | key="data_dir",
78 | short_option="-d",
79 | default_value=None,
80 | value_type="path",
81 | hint="Path to the directory where Receptor stores its database and metadata.",
82 | )
83 | self.add_config_option(
84 | section="default",
85 | key="debug",
86 | default_value=None,
87 | set_value=True,
88 | value_type="bool",
89 | hint="Emit debugging output.",
90 | )
91 | default_max_workers = min(32, os.cpu_count() + 4)
92 | self.add_config_option(
93 | section="default",
94 | key="max_workers",
95 | default_value=default_max_workers,
96 | value_type="int",
97 | hint=f"""Size of the thread pool for worker threads. If unspecified,
98 | defaults to {default_max_workers}""",
99 | )
100 | self.add_config_option(
101 | section="default",
102 | key="logging_format",
103 | default_value="simple",
104 | value_type="str",
105 | hint='''Format of logging output. Options are "simple" and "structured",
106 | default is "simple"''',
107 | )
108 | # Auth section options. This is a new section for the config file only,
109 | # so all of these options use `subparse=False`.
110 | self.add_config_option(
111 | section="auth",
112 | key="server_cert",
113 | default_value="",
114 | value_type="str",
115 | subparse=False,
116 | hint="Path to the SSL/TLS server certificate file.",
117 | )
118 | self.add_config_option(
119 | section="auth",
120 | key="server_key",
121 | default_value="",
122 | value_type="str",
123 | subparse=False,
124 | hint="Path to the SSL/TLS server certificate key file.",
125 | )
126 | self.add_config_option(
127 | section="auth",
128 | key="server_ca_bundle",
129 | default_value=None,
130 | value_type="str",
131 | subparse=False,
132 | hint="Path to the CA bundle used by clients to verify servers.",
133 | )
134 | self.add_config_option(
135 | section="auth",
136 | key="client_cert",
137 | default_value="",
138 | value_type="str",
139 | subparse=False,
140 | hint="Path to the SSL/TLS client certificate file.",
141 | )
142 | self.add_config_option(
143 | section="auth",
144 | key="client_key",
145 | default_value="",
146 | value_type="str",
147 | subparse=False,
148 | hint="Path to the SSL/TLS client certificate key file.",
149 | )
150 | self.add_config_option(
151 | section="auth",
152 | key="client_verification_ca",
153 | default_value=None,
154 | value_type="str",
155 | subparse=False,
156 | hint="Path to the CA bundle used by servers to verify clients.",
157 | )
158 | self.add_config_option(
159 | section="auth",
160 | key="server_cipher_list",
161 | default_value=None,
162 | value_type="str",
163 | subparse=False,
164 | hint="TLS cipher list for use by the server.",
165 | )
166 | self.add_config_option(
167 | section="auth",
168 | key="client_cipher_list",
169 | default_value=None,
170 | value_type="str",
171 | subparse=False,
172 | hint="TLS cipher list for use by the client.",
173 | )
174 | # Receptor node options
175 | self.add_config_option(
176 | section="node",
177 | key="listen",
178 | default_value=["rnp://0.0.0.0:8888"],
179 | value_type="list",
180 | hint="""Set/override IP address and port to listen on. If not set here
181 | or in a config file, the default is rnp://0.0.0.0:8888.""",
182 | )
183 | self.add_config_option(
184 | section="node",
185 | key="peers",
186 | short_option="-p",
187 | long_option="--peer",
188 | default_value=[],
189 | value_type="list",
190 | listof="str",
191 | hint="Set/override peer nodes to connect to. Use multiple times for multiple peers.",
192 | )
193 | self.add_config_option(
194 | section="node",
195 | key="server_disable",
196 | long_option="--server-disable",
197 | default_value=False,
198 | set_value=True,
199 | value_type="bool",
200 | hint="Disable the server function and only connect to configured peers",
201 | )
202 | self.add_config_option(
203 | section="node",
204 | key="stats_enable",
205 | default_value=None,
206 | set_value=True,
207 | value_type="bool",
208 | hint="Enable Prometheus style stats port",
209 | )
210 | self.add_config_option(
211 | section="node",
212 | key="stats_port",
213 | default_value=8889,
214 | value_type="int",
215 | hint="Port to listen for requests to show stats",
216 | )
217 | self.add_config_option(
218 | section="node",
219 | key="keepalive_interval",
220 | default_value=-1,
221 | value_type="int",
222 | hint="""If specified, the node will ping all other known nodes in the mesh
223 | every N seconds. The default is -1, meaning no pings are sent.""",
224 | )
225 | self.add_config_option(
226 | section="node",
227 | key="groups",
228 | short_option="-g",
229 | long_option="--group",
230 | default_value=[],
231 | value_type="list",
232 | listof="str",
233 | hint="Define membership in one or more groups to aid in message routing",
234 | )
235 | self.add_config_option(
236 | section="node",
237 | key="ws_extra_headers",
238 | long_option="--ws_extra_header",
239 | default_value=[],
240 | value_type="key-value-list",
241 | hint="Set additional headers to provide when connecting to websocket peers.",
242 | )
243 | self.add_config_option(
244 | section="node",
245 | key="ws_heartbeat",
246 | long_option="--ws_heartbeat",
247 | default_value=None,
248 | value_type="int",
249 | hint="Set heartbeat interval for websocket connections.",
250 | )
251 | # ping options
252 | self.add_config_option(
253 | section="ping",
254 | key="peer",
255 | default_value="localhost:8888",
256 | value_type="str",
257 | hint="""The peer to relay the ping directive through. If unspecified here or
258 | in a config file, localhost:8888 will be used.""",
259 | )
260 | self.add_config_option(
261 | section="ping",
262 | key="count",
263 | default_value=4,
264 | value_type="int",
265 | hint="""Number of pings to send. If set to zero, pings will be continuously
266 | sent until interrupted.""",
267 | )
268 | self.add_config_option(
269 | section="ping",
270 | key="delay",
271 | default_value=1,
272 | value_type="float",
273 | hint="The delay (in seconds) to wait between pings. If set to zero,"
274 | "pings will be sent as soon as the previous response is received.",
275 | )
276 | self.add_config_option(
277 | section="ping",
278 | key="recipient",
279 | long_option="ping_recipient",
280 | default_value="",
281 | value_type="str",
282 | hint="Node ID of the Receptor node to ping.",
283 | )
284 | self.add_config_option(
285 | section="ping",
286 | key="ws_extra_headers",
287 | long_option="--ws_extra_header",
288 | default_value=[],
289 | value_type="key-value-list",
290 | hint="Set additional headers to provide when connecting to websocket peers.",
291 | )
292 | self.add_config_option(
293 | section="ping",
294 | key="ws_heartbeat",
295 | long_option="--ws_heartbeat",
296 | default_value=None,
297 | value_type="int",
298 | hint="Set heartbeat interval for websocket connections.",
299 | )
300 | # send options
301 | self.add_config_option(
302 | section="send",
303 | key="peer",
304 | default_value="localhost:8888",
305 | value_type="str",
306 | hint="""The peer to relay the directive through. If unspecified here or in a config
307 | file, localhost:8888 will be used.""",
308 | )
309 | self.add_config_option(
310 | section="send",
311 | key="directive",
312 | default_value="",
313 | value_type="str",
314 | hint="Directive to send.",
315 | )
316 | self.add_config_option(
317 | section="send",
318 | key="recipient",
319 | long_option="send_recipient",
320 | default_value="",
321 | value_type="str",
322 | hint="Node ID of the Receptor node to ping.",
323 | )
324 | self.add_config_option(
325 | section="send",
326 | key="payload",
327 | long_option="send_payload",
328 | default_value="",
329 | value_type="str",
330 | hint="""Payload of the directive to send. Use - for stdin or give the path
331 | to a file to transmit the file contents.""",
332 | )
333 | self.add_config_option(
334 | section="send",
335 | key="ws_extra_headers",
336 | long_option="--ws_extra_header",
337 | default_value=[],
338 | value_type="list",
339 | hint="Set additional headers to provide when connecting to websocket peers.",
340 | )
341 | self.add_config_option(
342 | section="send",
343 | key="ws_heartbeat",
344 | long_option="--ws_heartbeat",
345 | default_value=None,
346 | value_type="int",
347 | hint="Set heartbeat interval for websocket connections.",
348 | )
349 | # status options
350 | self.add_config_option(
351 | section="status",
352 | key="peer",
353 | default_value="localhost:8888",
354 | value_type="str",
355 | hint="""The peer to access the mesh through. If unspecified here or in a config file,
356 | localhost:8888 will be used.""",
357 | )
358 | self.add_config_option(
359 | section="status",
360 | key="ws_extra_headers",
361 | long_option="--ws_extra_header",
362 | default_value=[],
363 | value_type="key-value-list",
364 | listof="str",
365 | hint="Set additional headers to provide when connecting to websocket peers.",
366 | )
367 | self.add_config_option(
368 | section="status",
369 | key="show_ephemeral",
370 | default_value=None,
371 | set_value=True,
372 | value_type="bool",
373 | hint="Show ephemeral nodes in output",
374 | )
375 | self.add_config_option(
376 | section="status",
377 | key="ws_heartbeat",
378 | long_option="--ws_heartbeat",
379 | default_value=None,
380 | value_type="int",
381 | hint="Set heartbeat interval for websocket connections.",
382 | )
383 | self.parse_options(args)
384 |
385 | def add_config_option(
386 | self,
387 | section,
388 | key,
389 | cli=True,
390 | short_option="",
391 | long_option="",
392 | default_value=None,
393 | set_value=None,
394 | value_type=None,
395 | listof=None,
396 | subparse=True,
397 | hint=None,
398 | ):
399 | config_entry = "%s_%s" % (section, key)
400 | if cli:
401 | # for lists, we switch the action from 'store' to 'append'
402 | action = "store"
403 | if value_type == "list" or value_type == "key-value-list":
404 | action = "append"
405 | if value_type == "bool":
406 | action = "store_const"
407 | # unless specified, the long_option name is the key
408 | # (with underscores turned into dashes)
409 | if not long_option:
410 | long_option = "--%s" % (key.replace("_", "-"),)
411 | # because we're building this on the fly, it's easier to create the args/kwargs
412 | # for argparse like this instead of trying to actually call the method with all
413 | # of the positional args correctly
414 | args = []
415 | if short_option:
416 | args.append(short_option)
417 | args.append(long_option)
418 | kwargs = {"help": hint, "action": action}
419 | # if the long option doesn't start with '--' it's a positional arg, in which
420 | # case we don't want to use dest= because it will cause an argparse conflict
421 | if long_option.startswith("--"):
422 | kwargs["dest"] = config_entry
423 | # some special handling for bools to make sure we don't always override lower
424 | # precedence options with the cli default (which happens if we tried to use
425 | # store_true or store_false for bools).
426 | if value_type == "bool":
427 | kwargs["const"] = set_value
428 | # if we're in the default section, or if we explictly don't want this section
429 | # turned into a subparser, we add the option directly, otherwise we put it in
430 | # a subparser based on the section name
431 | if section == "default" or not subparse:
432 | self._cli_args.add_argument(*args, **kwargs)
433 | else:
434 | try:
435 | subparser = self._cli_sub_args.choices[section]
436 | except KeyError:
437 | sub_extra = SUBCOMMAND_EXTRAS.get(section, None)
438 | if sub_extra:
439 | subparser = self._cli_sub_args.add_parser(section, help=sub_extra["hint"])
440 | subparser.set_defaults(func=sub_extra["entrypoint"])
441 | subparser.set_defaults(ephemeral=sub_extra["is_ephemeral"])
442 | subparser.add_argument(*args, **kwargs)
443 |
444 | # finally, we add the ConfigOption to the internal dict for tracking
445 | self._config_options[config_entry] = ConfigOption(default_value, value_type, listof)
446 |
447 | def _get_config_value(self, key, ignore_config_file=False):
448 | value = None
449 |
450 | # lowest precedence is the config file
451 | (section, section_key) = key.split("_", 1)
452 | if not ignore_config_file and section in self._config_file and self._config_file[section]:
453 | try:
454 | value = self._config_file[section][section_key]
455 | except KeyError:
456 | pass
457 | # next layer of precedence is environment variables. All env
458 | # variable names are of the form RECEPTOR_SECTION_{KEY_NAME}
459 | # (the 'key' variable contains both the section and key name)
460 | env_name = "RECEPTOR_" + key.upper()
461 | env_value = os.environ.get(env_name, None)
462 | if env_value is not None:
463 | value = env_value
464 | # finally, the cli args are the highest level of precedence
465 | cli_value = getattr(self._parsed_args, key, None)
466 | if cli_value is not None:
467 | value = cli_value
468 | # finally return whatever the value was set to (or not)
469 | return value
470 |
471 | def parse_options(self, args):
472 | # first we parse the cli args
473 | self._parsed_args = self._cli_args.parse_args(args)
474 | # we manually force the config entry to be parsed first, since
475 | # we need it before we do anything else
476 | config_entry = self._config_options["default_config"]
477 | config_path = self._get_config_value("default_config", ignore_config_file=True)
478 | if config_path is not None:
479 | config_entry.value = config_path
480 | self._enforce_entry_type(config_entry)
481 | # next we read the config file
482 | self._config_file.read([config_entry.value])
483 | # then we loop through our config options, based on the option
484 | # precedence of CLI > environment > config file
485 | for key in self._config_options:
486 | # we already did this, so lets not waste time doing it over
487 | if key == "default_config":
488 | continue
489 | entry = self._config_options[key]
490 | value = self._get_config_value(key)
491 | if value is not None:
492 | entry.value = value
493 | # because env variables and configparser do not enforce the
494 | # value type, we do it now to ensure we have the type we want
495 | self._enforce_entry_type(entry)
496 | # Parse plugin_ sections to populate plugin configuration
497 | self._config_options["plugins"] = {}
498 | if self._config_file:
499 | for section in filter(lambda x: x.startswith("plugin_"), self._config_file.sections()):
500 | self._config_options["plugins"][section.replace("plugin_", "")] = dict(
501 | self._config_file[section]
502 | )
503 | # If we did not get a data_dir from anywhere else, use a default
504 | if self._config_options["default_data_dir"].value is None:
505 | if self._is_ephemeral:
506 | self._config_options["default_data_dir"].value = "/tmp/receptor"
507 | else:
508 | self._config_options["default_data_dir"].value = "/var/lib/receptor"
509 |
510 | def _enforce_entry_type(self, entry):
511 | if entry.value is not None:
512 | if entry.value_type == "list" or entry.value_type == "key-value-list":
513 | if not isinstance(entry.value, list):
514 | entry.value = entry.value.split(",")
515 | if entry.value_type == "key-value-list":
516 | entry.value = [
517 | (key.strip(), value.strip())
518 | for key, sep, value in [s.partition(":") for s in entry.value]
519 | ]
520 | else:
521 | for idx, value in enumerate(entry.value):
522 | entry.value[idx] = self._enforce_value_type(value, entry.listof)
523 | else:
524 | entry.value = self._enforce_value_type(entry.value, entry.value_type)
525 |
526 | def _enforce_value_type(self, value, value_type):
527 | try:
528 | if callable(value_type):
529 | return value_type(value)
530 | elif value_type == "int" and not isinstance(value, int):
531 | return int(value)
532 | elif value_type == "float" and not isinstance(value, float):
533 | return float(value)
534 | elif value_type == "str" and not isinstance(value, str):
535 | return "%s" % (value,)
536 | elif value_type == "bool" and not isinstance(value, bool):
537 | if isinstance(value, str):
538 | if value.lower() in ("yes", "y", "true"):
539 | return True
540 | else:
541 | return False
542 | elif isinstance(value, int):
543 | if value != 0:
544 | return True
545 | else:
546 | return False
547 | else:
548 | raise Exception(
549 | "could not convert '%s' (type: %s) to a boolean value"
550 | % (value, type(value))
551 | )
552 | elif value_type == "path":
553 | # FIXME: implement, or do we care if it's really a path and not just a string?
554 | return os.path.expanduser(value)
555 | else:
556 | return value
557 | except Exception as e:
558 | if value is None:
559 | return None
560 | raise ReceptorConfigError(e)
561 |
562 | def go(self):
563 | if not self._parsed_args:
564 | raise ReceptorRuntimeError("there are no parsed args yet")
565 | elif not hasattr(self._parsed_args, "func"):
566 | raise ReceptorRuntimeError(
567 | "you must specify a subcommand (%s)." % (", ".join(SUBCOMMAND_EXTRAS.keys()),)
568 | )
569 | self._is_ephemeral = self._parsed_args.ephemeral
570 | self._parsed_args.func(self)
571 |
572 | def get_ssl_context(self, context_type):
573 | if context_type == "server":
574 | return self.get_server_ssl_context()
575 | elif context_type == "client":
576 | return self.get_client_ssl_context()
577 | else:
578 | raise ReceptorRuntimeError(f"Unknown SSL context type {context_type}")
579 |
580 | def get_client_ssl_context(self):
581 | logger.debug("Loading TLS Client Context")
582 | ca_bundle = self.auth_server_ca_bundle
583 | ca_bundle = (
584 | ca_bundle if ca_bundle else None
585 | ) # Make false-like values like '' explicitly None
586 | sc = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
587 | sc.options |= ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
588 | if self.auth_client_cipher_list:
589 | sc.set_ciphers(self.auth_client_cipher_list)
590 | sc.verify_mode = ssl.CERT_REQUIRED
591 | if self.auth_server_ca_bundle:
592 | sc.load_verify_locations(self.auth_server_ca_bundle)
593 | else:
594 | sc.load_default_certs(ssl.Purpose.SERVER_AUTH)
595 | if self.auth_client_cert and self.auth_client_key:
596 | sc.load_cert_chain(self.auth_client_cert, self.auth_client_key)
597 | return sc
598 |
599 | def get_server_ssl_context(self):
600 | logger.debug("Loading TLS Server Context")
601 | ca_bundle = self.auth_client_verification_ca
602 | ca_bundle = (
603 | ca_bundle if ca_bundle else None
604 | ) # Make false-like values like '' explicitly None
605 | sc = ssl.SSLContext(ssl.PROTOCOL_TLS)
606 | sc.options |= ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
607 | if self.auth_server_cipher_list:
608 | sc.set_ciphers(self.auth_server_cipher_list)
609 | if self.auth_client_verification_ca:
610 | sc.load_verify_locations(self.auth_client_verification_ca)
611 | sc.verify_mode = ssl.CERT_REQUIRED
612 | sc.check_hostname = False
613 | else:
614 | sc.load_default_certs(ssl.Purpose.CLIENT_AUTH)
615 | sc.load_cert_chain(self.auth_server_cert, self.auth_server_key)
616 | return sc
617 |
618 | def __getattr__(self, key):
619 | value = self._config_options[key]
620 | if type(value) is dict:
621 | return value
622 | else:
623 | return self._config_options[key].value
624 |
--------------------------------------------------------------------------------
/receptor/connection/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/receptor/connection/__init__.py
--------------------------------------------------------------------------------
/receptor/connection/base.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 | import os
4 | from abc import abstractmethod, abstractproperty
5 | from collections.abc import AsyncIterator
6 |
7 | from .. import fileio
8 | from ..bridgequeue import BridgeQueue
9 | from ..messages.framed import FramedBuffer
10 | from ..stats import bytes_recv
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 |
15 | def log_ssl_detail(transport):
16 | peername = transport.get_extra_info("peername")
17 | if transport.get_extra_info("ssl_object", None):
18 | cipher = transport.get_extra_info("cipher")
19 | peercert = transport.get_extra_info("peercert")
20 | logger.debug(
21 | f"""{cipher[1]} connection with {str(peername)} using cipher {cipher[0]}
22 | and certificate {str(peercert)}."""
23 | )
24 | else:
25 | logger.debug(f"Unencrypted connection with {str(peername)}.")
26 |
27 |
28 | class Transport(AsyncIterator):
29 | @abstractmethod
30 | async def close(self):
31 | pass
32 |
33 | @abstractproperty
34 | def closed(self):
35 | pass
36 |
37 | @abstractmethod
38 | def send(self, q):
39 | pass
40 |
41 |
42 | class Worker:
43 | def __init__(self, receptor, loop):
44 | self.receptor = receptor
45 | self.loop = loop
46 | self.conn = None
47 | self.buf = FramedBuffer(loop=self.loop)
48 | self.remote_id = None
49 | self.read_task = None
50 | self.handle_task = None
51 | self.write_task = None
52 | self.outbound = None
53 | self.deferrer = fileio.Deferrer(loop=self.loop)
54 |
55 | def start_receiving(self):
56 | self.read_task = self.loop.create_task(self.receive())
57 |
58 | async def receive(self):
59 | try:
60 | async for msg in self.conn:
61 | if self.conn.closed:
62 | break
63 | bytes_recv.inc(len(msg))
64 | await self.buf.put(msg)
65 | except ConnectionResetError:
66 | logger.debug("receive: other side closed the connection")
67 | except asyncio.CancelledError:
68 | logger.debug("receive: cancel request received")
69 | except Exception:
70 | logger.exception("receive")
71 |
72 | async def register(self):
73 | await self.receptor.update_connections(self.conn, id_=self.remote_id)
74 |
75 | async def unregister(self):
76 | await self.receptor.remove_connection(self.conn, id_=self.remote_id)
77 | self._cancel(self.read_task)
78 | self._cancel(self.handle_task)
79 | self._cancel(self.write_task)
80 |
81 | def _cancel(self, task):
82 | if task:
83 | task.cancel()
84 |
85 | async def hello(self):
86 | msg = self.receptor._say_hi().serialize()
87 | await self.conn.send(BridgeQueue.one(msg))
88 |
89 | async def start_processing(self):
90 | await self.receptor.recalculate_and_send_routes_soon()
91 | logger.debug("starting normal loop")
92 | self.handle_task = self.loop.create_task(self.receptor.message_handler(self.buf))
93 | self.outbound = self.receptor.buffer_mgr[self.remote_id]
94 | self.write_task = self.loop.create_task(self.watch_queue())
95 | return await self.write_task
96 |
97 | async def close(self):
98 | if self.conn is not None and not self.conn.closed:
99 | return await self.conn.close()
100 |
101 | async def watch_queue(self):
102 | try:
103 | logger.debug(f"Watching queue {str(self.conn)}")
104 | while not self.conn.closed:
105 | try:
106 | item = await asyncio.wait_for(self.outbound.get(), 5.0)
107 | except asyncio.TimeoutError:
108 | continue
109 | except Exception:
110 | logger.exception("watch_queue: error getting data from buffer")
111 | continue
112 | else:
113 | # TODO: I think we need to wait for this to finish before
114 | # starting another .get
115 | asyncio.ensure_future(self.drain_buf(item))
116 |
117 | except asyncio.CancelledError:
118 | logger.debug("watch_queue: cancel request received")
119 | await self.close()
120 |
121 | async def drain_buf(self, item):
122 | try:
123 | if self.conn.closed:
124 | logger.debug("Message not sent: connection already closed")
125 | else:
126 | q = BridgeQueue(maxsize=1)
127 | await asyncio.gather(
128 | self.deferrer.defer(q.read_from, item["path"]), self.conn.send(q)
129 | )
130 | except Exception:
131 | # TODO: Break out these exceptions to deal with file problems
132 | # and network problem separately?
133 | logger.exception("watch_queue: error received trying to write")
134 | await self.outbound.put_ident(item)
135 | return await self.close()
136 | else:
137 | try:
138 | await self.deferrer.defer(os.remove, item["path"])
139 | except TypeError:
140 | logger.exception("failed to os.remove %s", item["path"])
141 | pass # some messages aren't actually files
142 |
143 | async def _wait_handshake(self):
144 | logger.debug("waiting for HI")
145 | response = await self.buf.get(timeout=20.0)
146 | self.remote_id = response.header["id"]
147 | await self.register()
148 | await self.receptor.recalculate_and_send_routes_soon()
149 |
150 | async def client(self, transport):
151 | try:
152 | self.conn = transport
153 | self.start_receiving()
154 | await self.hello()
155 | await self._wait_handshake()
156 | await self.start_processing()
157 | logger.debug("normal exit")
158 | finally:
159 | await self.unregister()
160 |
161 | async def server(self, transport):
162 | try:
163 | self.conn = transport
164 | self.start_receiving()
165 | await self._wait_handshake()
166 | await self.hello()
167 | await self.start_processing()
168 | finally:
169 | await self.unregister()
170 |
--------------------------------------------------------------------------------
/receptor/connection/manager.py:
--------------------------------------------------------------------------------
1 | import functools
2 | import asyncio
3 | from urllib.parse import urlparse
4 |
5 | from . import sock, ws
6 |
7 | default_scheme_ports = {"rnp": 8888, "rnps": 8899, "ws": 80, "wss": 443}
8 |
9 |
10 | def parse_peer(peer, role):
11 | if "://" not in peer:
12 | peer = f"rnp://{peer}"
13 | if peer.startswith("receptor://"):
14 | peer = peer.replace("receptor", "rnp", 1)
15 | parsed_peer = urlparse(peer)
16 | if (parsed_peer.scheme not in default_scheme_ports) or (
17 | role == "server"
18 | and (parsed_peer.path or parsed_peer.params or parsed_peer.query or parsed_peer.fragment)
19 | ):
20 | raise RuntimeError(f"Invalid Receptor peer specified: {peer}")
21 | return parsed_peer
22 |
23 |
24 | class Manager:
25 | def __init__(self, factory, ssl_context_factory, loop=None):
26 | self.factory = factory
27 | self.ssl_context_factory = ssl_context_factory
28 | self.loop = loop or asyncio.get_event_loop()
29 |
30 | def get_listener(self, listen_url):
31 | service = parse_peer(listen_url, "server")
32 | ssl_context = (
33 | self.ssl_context_factory("server") if service.scheme in ("rnps", "wss") else None
34 | )
35 | if service.scheme in ("rnp", "rnps"):
36 | return asyncio.start_server(
37 | functools.partial(sock.serve, factory=self.factory),
38 | host=service.hostname,
39 | port=service.port or default_scheme_ports[service.scheme],
40 | ssl=ssl_context,
41 | )
42 | elif service.scheme in ("ws", "wss"):
43 | return self.loop.create_server(
44 | ws.app(self.factory).make_handler(),
45 | service.hostname,
46 | service.port or default_scheme_ports[service.scheme],
47 | ssl=ssl_context,
48 | )
49 | else:
50 | raise RuntimeError(f"Unknown URL scheme {service.scheme}")
51 |
52 | def get_peer(self, peer, reconnect=True, ws_extra_headers=None, ws_heartbeat=None):
53 | service = parse_peer(peer, "client")
54 | ssl_context = (
55 | self.ssl_context_factory("client") if service.scheme in ("rnps", "wss") else None
56 | )
57 | if service.scheme in ("rnp", "rnps"):
58 | return self.loop.create_task(
59 | sock.connect(
60 | service.hostname,
61 | service.port or default_scheme_ports[service.scheme],
62 | self.factory,
63 | self.loop,
64 | ssl_context,
65 | reconnect,
66 | )
67 | )
68 | elif service.scheme in ("ws", "wss"):
69 | return self.loop.create_task(
70 | ws.connect(
71 | peer,
72 | self.factory,
73 | self.loop,
74 | ssl_context,
75 | reconnect,
76 | ws_extra_headers,
77 | ws_heartbeat,
78 | )
79 | )
80 | else:
81 | raise RuntimeError(f"Unknown URL scheme {service.scheme}")
82 |
--------------------------------------------------------------------------------
/receptor/connection/sock.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 |
4 | from .base import Transport, log_ssl_detail
5 |
6 | logger = logging.getLogger(__name__)
7 |
8 |
9 | class RawSocket(Transport):
10 | def __init__(self, reader, writer, chunk_size=2 ** 16):
11 | self.reader = reader
12 | self.writer = writer
13 | self._closed = False
14 | self.chunk_size = chunk_size
15 |
16 | async def __anext__(self):
17 | bytes_ = await self.reader.read(self.chunk_size)
18 | if not bytes_:
19 | self.close()
20 | return bytes_
21 |
22 | @property
23 | def closed(self):
24 | return self._closed
25 |
26 | def close(self):
27 | self._closed = True
28 | self.writer.close()
29 |
30 | async def send(self, q):
31 | async for chunk in q:
32 | self.writer.write(chunk)
33 | await self.writer.drain()
34 |
35 | def _diagnostics(self):
36 | t = self.writer._transport.get_extra_info
37 | addr, port = t("peername", (None, None))
38 | return {
39 | "address": addr,
40 | "port": port,
41 | "compression": t("compression"),
42 | "cipher": t("cipher"),
43 | "peercert": t("peercert"),
44 | "sslcontext": t("sslcontext"),
45 | "closed": self.closed,
46 | "chunk_size": self.chunk_size,
47 | }
48 |
49 |
50 | async def connect(host, port, factory, loop=None, ssl=None, reconnect=True):
51 | if not loop:
52 | loop = asyncio.get_event_loop()
53 |
54 | worker = factory()
55 | try:
56 | r, w = await asyncio.open_connection(host, port, loop=loop, ssl=ssl)
57 | log_ssl_detail(w._transport)
58 | t = RawSocket(r, w)
59 | await worker.client(t)
60 | except Exception as ex:
61 | logger.info(f"sock.connect: connection failed, {str(ex)}")
62 | if not reconnect:
63 | return False
64 | finally:
65 | if reconnect:
66 | await asyncio.sleep(5)
67 | logger.debug("sock.connect: reconnection")
68 | loop.create_task(connect(host, port, factory, loop))
69 | return True
70 |
71 |
72 | async def serve(reader, writer, factory):
73 | log_ssl_detail(writer._transport)
74 | t = RawSocket(reader, writer)
75 | await factory().server(t)
76 |
--------------------------------------------------------------------------------
/receptor/connection/ws.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import functools
3 | import logging
4 |
5 | import aiohttp
6 | import aiohttp.web
7 | from aiohttp.helpers import proxies_from_env
8 | from urllib.parse import urlparse
9 |
10 | from .base import Transport, log_ssl_detail
11 |
12 | logger = logging.getLogger(__name__)
13 |
14 |
15 | class WebSocket(Transport):
16 | def __init__(self, ws):
17 | self.ws = ws
18 |
19 | async def __anext__(self):
20 | msg = await self.ws.__anext__()
21 | return msg.data
22 |
23 | async def close(self):
24 | return await self.ws.close()
25 |
26 | @property
27 | def closed(self):
28 | return self.ws.closed
29 |
30 | async def send(self, q):
31 | async for chunk in q:
32 | await self.ws.send_bytes(chunk)
33 |
34 |
35 | async def connect(
36 | uri,
37 | factory,
38 | loop=None,
39 | ssl_context=None,
40 | reconnect=True,
41 | ws_extra_headers=None,
42 | ws_heartbeat=None,
43 | ):
44 | if not loop:
45 | loop = asyncio.get_event_loop()
46 |
47 | worker = factory()
48 | try:
49 | proxy_scheme = {"ws": "http", "wss": "https"}[urlparse(uri).scheme]
50 | proxies = proxies_from_env()
51 | if proxy_scheme in proxies:
52 | proxy = proxies[proxy_scheme].proxy
53 | proxy_auth = proxies[proxy_scheme].proxy_auth
54 | else:
55 | proxy = None
56 | proxy_auth = None
57 | async with aiohttp.ClientSession().ws_connect(
58 | uri,
59 | ssl=ssl_context,
60 | headers=ws_extra_headers,
61 | proxy=proxy,
62 | proxy_auth=proxy_auth,
63 | heartbeat=ws_heartbeat,
64 | ) as ws:
65 | log_ssl_detail(ws)
66 | t = WebSocket(ws)
67 | await worker.client(t)
68 | except Exception:
69 | logger.exception("ws.connect")
70 | return False
71 | finally:
72 | if reconnect:
73 | await asyncio.sleep(5)
74 | logger.debug("ws.connect: reconnecting")
75 | loop.create_task(
76 | connect(
77 | uri,
78 | factory=factory,
79 | loop=loop,
80 | ssl_context=ssl_context,
81 | ws_extra_headers=ws_extra_headers,
82 | ws_heartbeat=ws_heartbeat,
83 | )
84 | )
85 | return True
86 |
87 |
88 | async def serve(request, factory):
89 | ws = aiohttp.web.WebSocketResponse()
90 | log_ssl_detail(request.transport)
91 | await ws.prepare(request)
92 |
93 | t = WebSocket(ws)
94 | await factory().server(t)
95 |
96 |
97 | def app(factory):
98 | handler = functools.partial(serve, factory=factory)
99 | app = aiohttp.web.Application()
100 | app.add_routes([aiohttp.web.get("/", handler)])
101 | return app
102 |
--------------------------------------------------------------------------------
/receptor/controller.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import datetime
3 | import io
4 | import logging
5 | import os
6 | import shutil
7 | from contextlib import suppress
8 |
9 | from .connection.base import Worker
10 | from .connection.manager import Manager
11 | from .diagnostics import status
12 | from .messages.framed import FileBackedBuffer, FramedMessage
13 | from .receptor import Receptor
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 |
18 | class Controller:
19 | """
20 | This class is the mechanism by which a larger system would interface with the Receptor
21 | mesh as a Controller. For more details on writing Controllers see :ref:`controller` Good
22 | examples of its usage can be found in :mod:`receptor.entrypoints`
23 |
24 | :param config: Overall Receptor configuration
25 | :param loop: An asyncio eventloop, if not provided the current event loop will be fetched
26 | :param queue: A queue that responses will be placed into as they are received
27 |
28 | :type config: :class:`receptor.config.ReceptorConfig`
29 | :type loop: asyncio event loop
30 | :type queue: asyncio.Queue
31 | """
32 |
33 | def __init__(self, config, loop=asyncio.get_event_loop(), queue=None):
34 | self.receptor = Receptor(config)
35 | self.loop = loop
36 | self.connection_manager = Manager(
37 | lambda: Worker(self.receptor, loop), self.receptor.config.get_ssl_context, loop
38 | )
39 | self.queue = queue
40 | if self.queue is None:
41 | self.queue = asyncio.Queue(loop=loop)
42 | self.receptor.response_queue = self.queue
43 | self.status_task = loop.create_task(status(self.receptor))
44 |
45 | async def shutdown_loop(self):
46 | tasks = [
47 | task for task in asyncio.Task.all_tasks() if task is not asyncio.Task.current_task()
48 | ]
49 | # Retrieve and throw away all exceptions that happen after
50 | # the decision to shut down was made.
51 | for task in tasks:
52 | task.cancel()
53 | with suppress(Exception):
54 | await task
55 | await asyncio.gather(*tasks)
56 | self.loop.stop()
57 |
58 | async def exit_on_exceptions_in(self, tasks):
59 | try:
60 | for task in tasks:
61 | await task
62 | except Exception as e:
63 | logger.exception(str(e))
64 | self.loop.create_task(self.shutdown_loop())
65 |
66 | def enable_server(self, listen_urls):
67 | """
68 | Enables a listening server to receive *Peer* connections, multiple servers can be started.
69 |
70 | Examples of supported formats:
71 |
72 | * rnps://0.0.0.0:8888 - Secure receptor protocol bound on all interfaces port 8888
73 | * rnp://1.2.3.4:8888 - Insecure receptor protocol bound to the interface of 1.2.3.4
74 | port 8888
75 | * wss://0.0.0.0:443 - Secure websocket protocol bound on all interfaces port 443
76 |
77 | The services are started as asyncio tasks and will start listening once
78 | :meth:`receptor.controller.Controller.run` is called.
79 |
80 | :param listen_urls: A list of listener urls
81 | """
82 | tasks = list()
83 | for url in listen_urls:
84 | listener = self.connection_manager.get_listener(url)
85 | logger.info("Serving on %s", url)
86 | tasks.append(self.loop.create_task(listener))
87 | return tasks
88 |
89 | def add_peer(self, peer, ws_extra_headers=None, ws_heartbeat=None):
90 | """
91 | Adds a Receptor Node *Peer*. A connection will be established to this node once
92 | :meth:`receptor.controller.Controller.run` is called.
93 |
94 | Example format:
95 | rnps://10.0.1.1:8888
96 |
97 | :param peer: remote peer url
98 | """
99 | logger.info("Connecting to peer {}".format(peer))
100 | return self.connection_manager.get_peer(
101 | peer,
102 | reconnect=not self.receptor.config._is_ephemeral,
103 | ws_extra_headers=ws_extra_headers,
104 | ws_heartbeat=ws_heartbeat,
105 | )
106 |
107 | async def recv(self):
108 | """
109 | Fetch a single response message from the response queue, this method blocks
110 | and should be *await* ed or assigned to a Future
111 |
112 | :return: A single response message
113 | :rtype: :class:`receptor.messages.framed.FramedMessage`
114 | """
115 | return await self.receptor.response_queue.get()
116 |
117 | async def send(self, payload, recipient, directive, expect_response=True):
118 | """
119 | Sends a payload to a recipient *Node* to execute under a given *directive*.
120 |
121 | This method is intended to take these inputs and craft a
122 | :class:`receptor.messages.framed.FramedMessage` that can then be sent along to the mesh.
123 |
124 | The payload input type is highly flexible and intends on getting out of the way of the
125 | contract made between the producer/sender of the data and the plugin on the destination
126 | node that is intended on executing it. As such the payload data type can be one of:
127 |
128 | * A file path
129 | * str, or bytes - Strings will be converted to bytes before transmission
130 | * dict - This will be serialized to json before transmission
131 | * io.BytesIO - This can be any type that is based on *io.BytesIO* and supports read()
132 |
133 | The *directive* should be a string and take the form of ``:`_ would take
135 | the form of ``receptor-http:execute``
136 |
137 | This method returns a message identifier, that message identifier can be used to reference
138 | responses returned from the plugin as having originated from the message sent by this
139 | request.
140 |
141 | :param payload: See above
142 | :param recipient: The node id of a Receptor Node on the mesh
143 | :param directive: See above
144 | :param expect_response: Optional Whether it is expected that the plugin will emit a
145 | response.
146 |
147 | :return: a message-id that can be used to reference responses
148 | """
149 | if os.path.exists(payload):
150 | buffer = FileBackedBuffer.from_path(payload)
151 | elif isinstance(payload, (str, bytes)):
152 | buffer = FileBackedBuffer.from_data(payload)
153 | elif isinstance(payload, dict):
154 | buffer = FileBackedBuffer.from_dict(payload)
155 | elif isinstance(payload, io.BytesIO):
156 | buffer = FileBackedBuffer.from_buffer(payload)
157 | message = FramedMessage(
158 | header=dict(
159 | sender=self.receptor.node_id,
160 | recipient=recipient,
161 | timestamp=datetime.datetime.utcnow(),
162 | directive=directive,
163 | ),
164 | payload=buffer,
165 | )
166 | await self.receptor.router.send(message, expected_response=expect_response)
167 | return message.msg_id
168 |
169 | async def ping(self, destination, expected_response=True):
170 | """
171 | Sends a ping message to a remote Receptor node with the expectation that it will return
172 | information about when it received the ping, what its capabilities are and what work it
173 | is currently doing.
174 |
175 | A good example of a standalone Controller that just implements ping can be found at
176 | :meth:`receptor.entrypoints.run_as_ping`
177 |
178 | :param destination: The node id of the target node
179 | :returns: a message-id that can be used to reference responses
180 | """
181 | return await self.receptor.router.ping_node(destination, expected_response)
182 |
183 | def run(self, app=None):
184 | """
185 | Starts the Controller's event loop, this method will not return until the event loop is
186 | stopped. An optional async function can be given, This will cause the Controller's event
187 | loop to run until that function returns.
188 |
189 | :param app: optional; async function that will run and shut the loop down when it returns
190 | """
191 | try:
192 | if app is None:
193 | app = self.receptor.shutdown_handler
194 | self.loop.run_until_complete(app())
195 | except KeyboardInterrupt:
196 | pass
197 | finally:
198 | self.loop.stop()
199 |
200 | def cleanup_tmpdir(self):
201 | try:
202 | is_ephemeral = self.receptor.config._is_ephemeral
203 | base_path = self.receptor.base_path
204 | except AttributeError:
205 | return
206 | if is_ephemeral:
207 | try:
208 | logger.debug(f"Removing temporary directory {base_path}")
209 | shutil.rmtree(base_path)
210 | except Exception:
211 | logger.error(f"Error while removing temporary directory {base_path}", exc_info=True)
212 |
--------------------------------------------------------------------------------
/receptor/diagnostics.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import inspect
3 | import json
4 | import logging
5 | import os
6 | import signal
7 | import traceback as tb
8 | import types
9 | from collections import defaultdict, deque
10 | from datetime import datetime
11 | from functools import singledispatch
12 |
13 | from prometheus_client import generate_latest
14 |
15 | from . import fileio
16 | from .logstash_formatter.logstash import LogstashFormatter
17 |
18 | logger = logging.getLogger(__name__)
19 |
20 | log_buffer = deque(maxlen=10)
21 | fmt = LogstashFormatter()
22 | trigger = asyncio.Event()
23 |
24 | signal.signal(signal.SIGHUP, lambda n, h: trigger.set())
25 |
26 |
27 | @singledispatch
28 | def extract_module(o):
29 | return o.__module__
30 |
31 |
32 | @extract_module.register(types.CoroutineType)
33 | def extract_coro(coro):
34 | return inspect.getmodule(coro, coro.cr_code.co_filename).__name__
35 |
36 |
37 | @extract_module.register(types.GeneratorType)
38 | def extract_gen(gen):
39 | return inspect.getmodule(gen, gen.gi_code.co_filename).__name__
40 |
41 |
42 | @singledispatch
43 | def encode(o):
44 | return json.JSONEncoder().default(o)
45 |
46 |
47 | @encode.register(set)
48 | def encode_set(s):
49 | return list(s)
50 |
51 |
52 | @encode.register(bytes)
53 | def decode_bytes(b):
54 | return b.decode("utf-8")
55 |
56 |
57 | @encode.register(types.FunctionType)
58 | def encode_function_type(func):
59 | return f"{func.__module__}.{func.__qualname__}"
60 |
61 |
62 | @encode.register(datetime)
63 | def encode_datetime(o):
64 | return o.isoformat()
65 |
66 |
67 | def structure_task(task):
68 | coro = task._coro
69 | try:
70 | mod = extract_module(coro)
71 | except Exception:
72 | mod = ""
73 |
74 | out = {"state": task._state, "name": f"{mod}.{coro.__qualname__}", "stack": []}
75 |
76 | try:
77 | stack = tb.extract_stack(task.get_stack()[0])
78 | out["stack"] = [
79 | {"filename": fs.filename, "line": fs.line, "lineno": fs.lineno} for fs in stack
80 | ]
81 | except IndexError:
82 | pass
83 |
84 | return out
85 |
86 |
87 | def tasks():
88 | d = defaultdict(list)
89 | for t in asyncio.Task.all_tasks():
90 | st = structure_task(t)
91 | state = st.pop("state")
92 | d[state].append(st)
93 | return [{"state": state, "items": tasks} for state, tasks in d.items()]
94 |
95 |
96 | def format_connection(node_id, connection, capabilities):
97 | d = connection._diagnostics()
98 | d["node_id"] = node_id
99 | d["capabilities"] = capabilities
100 | return d
101 |
102 |
103 | def format_router(router):
104 | edges = [
105 | {"left": edge[0], "right": edge[1], "cost": cost} for edge, cost in router._edges.items()
106 | ]
107 |
108 | neighbors = [
109 | {"node_id": node_id, "items": values} for node_id, values in router._neighbors.items()
110 | ]
111 |
112 | table = [
113 | {"destination_node_id": node_id, "next_hop": v[0], "cost": v[1]}
114 | for node_id, v in router.routing_table.items()
115 | ]
116 |
117 | return {"nodes": router._nodes, "edges": edges, "neighbors": neighbors, "table": table}
118 |
119 |
120 | async def status(receptor_object):
121 | path = os.path.join(receptor_object.base_path, "diagnostics.json")
122 | doc = {}
123 | doc["config"] = receptor_object.config._parsed_args.__dict__
124 | doc["node_id"] = receptor_object.node_id
125 | while True:
126 | trigger.clear()
127 | doc["datetime"] = datetime.utcnow()
128 | doc["recent_errors"] = list(fmt._record_to_dict(r) for r in log_buffer)
129 | doc["connections"] = [
130 | format_connection(node_id, conn, receptor_object.node_capabilities[node_id])
131 | for node_id, connections in receptor_object.connections.items()
132 | for conn in connections
133 | ]
134 | doc["routes"] = format_router(receptor_object.router)
135 | doc["tasks"] = tasks()
136 | doc["metrics"] = generate_latest()
137 | try:
138 | await fileio.write(path, json.dumps(doc, default=encode), mode="w")
139 | except Exception:
140 | logger.exception("failed to dump diagnostic data")
141 |
142 | # run every 30 seconds, or when triggered
143 | try:
144 | await asyncio.wait_for(trigger.wait(), 30)
145 | except asyncio.TimeoutError:
146 | pass
147 |
--------------------------------------------------------------------------------
/receptor/entrypoints.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import logging
3 | import sys
4 | import time
5 |
6 | from prometheus_client import start_http_server
7 |
8 | from .controller import Controller
9 |
10 | logger = logging.getLogger(__name__)
11 |
12 |
13 | def run_as_node(config):
14 | async def node_keepalive():
15 | # NOTE: I'm not really happy with this, I'd love to be able to await Peer(node).ping()
16 | # and then verify the status under a timeout rather than just throw away the result and
17 | # rely on the connection logic
18 | for node_id in controller.receptor.router.get_nodes():
19 | await controller.ping(node_id, expected_response=False)
20 | absolute_call_time = (
21 | ((int(controller.loop.time()) + 1) // config.node_keepalive_interval) + 1
22 | ) * config.node_keepalive_interval
23 | controller.loop.call_at(absolute_call_time, controller.loop.create_task, node_keepalive())
24 |
25 | try:
26 | controller = Controller(config)
27 | logger.info(f"Running as Receptor node with ID: {controller.receptor.node_id}")
28 | if config.node_stats_enable:
29 | logger.info(f"Starting stats on port {config.node_stats_port}")
30 | start_http_server(config.node_stats_port)
31 | if not config.node_server_disable:
32 | listen_tasks = controller.enable_server(config.node_listen)
33 | controller.loop.create_task(controller.exit_on_exceptions_in(listen_tasks))
34 | for peer in config.node_peers:
35 | controller.add_peer(
36 | peer,
37 | ws_extra_headers=config.node_ws_extra_headers,
38 | ws_heartbeat=config.node_ws_heartbeat,
39 | )
40 | if config.node_keepalive_interval > 1:
41 | controller.loop.create_task(node_keepalive())
42 | controller.loop.create_task(
43 | controller.receptor.connection_manifest.watch_expire(controller.receptor.buffer_mgr)
44 | )
45 | controller.run()
46 | finally:
47 | controller.cleanup_tmpdir()
48 |
49 |
50 | async def run_oneshot_command(
51 | controller, peer, recipient, ws_extra_headers, ws_heartbeat, send_func, read_func
52 | ):
53 | if (not recipient) or (recipient != controller.receptor.node_id):
54 | add_peer_task = controller.add_peer(
55 | peer, ws_extra_headers=ws_extra_headers, ws_heartbeat=ws_heartbeat
56 | )
57 | start_wait = time.time()
58 | while True:
59 | if add_peer_task and add_peer_task.done() and not add_peer_task.result():
60 | print("Connection failed. Exiting.")
61 | return False
62 | if (
63 | (not recipient or controller.receptor.router.node_is_known(recipient))
64 | and controller.receptor.route_send_time is not None
65 | and time.time() - controller.receptor.route_send_time > 2.0
66 | ):
67 | break
68 | if time.time() - start_wait > 10:
69 | print("Connection timed out. Exiting.")
70 | if not add_peer_task.done():
71 | add_peer_task.cancel()
72 | return False
73 | await asyncio.sleep(0.5)
74 | read_task = controller.loop.create_task(read_func())
75 | await send_func()
76 | await read_task
77 | return True
78 |
79 |
80 | def run_as_ping(config):
81 | def ping_iter():
82 | if config.ping_count:
83 | for x in range(config.ping_count):
84 | yield x
85 | else:
86 | while True:
87 | yield 0
88 |
89 | async def ping_entrypoint():
90 | return await run_oneshot_command(
91 | controller,
92 | config.ping_peer,
93 | config.ping_recipient,
94 | config.ping_ws_extra_headers,
95 | config.ping_ws_heartbeat,
96 | send_pings,
97 | read_responses,
98 | )
99 |
100 | async def read_responses():
101 | for _ in ping_iter():
102 | message = await controller.recv()
103 | print(message.payload.readall().decode())
104 |
105 | async def send_pings():
106 | for x in ping_iter():
107 | await controller.ping(config.ping_recipient)
108 | if x + 1 < config.ping_count:
109 | await asyncio.sleep(config.ping_delay)
110 |
111 | try:
112 | logger.info(f"Sending ping to {config.ping_recipient} via {config.ping_peer}.")
113 | controller = Controller(config)
114 | controller.run(ping_entrypoint)
115 | finally:
116 | controller.cleanup_tmpdir()
117 |
118 |
119 | def run_as_send(config):
120 | async def send_entrypoint():
121 | return await run_oneshot_command(
122 | controller,
123 | config.send_peer,
124 | config.send_recipient,
125 | config.send_ws_extra_headers,
126 | config.send_ws_heartbeat,
127 | send_message,
128 | read_responses,
129 | )
130 |
131 | async def send_message():
132 | if config.send_payload == "-":
133 | data = sys.stdin.buffer.read()
134 | else:
135 | data = config.send_payload
136 | await controller.send(
137 | payload=data, recipient=config.send_recipient, directive=config.send_directive
138 | )
139 |
140 | async def read_responses():
141 | while True:
142 | message = await controller.recv()
143 | logger.debug(f"{message}")
144 | if message.header.get("in_response_to", None):
145 | logger.debug("Received response message")
146 | if message.header.get("eof", False):
147 | logger.info("Received EOF")
148 | if message.header.get("code", 0) != 0:
149 | logger.error(f"EOF was an error result")
150 | if message.payload:
151 | print(f"ERROR: {message.payload.readall().decode()}")
152 | else:
153 | print(f"No EOF Error Payload")
154 | break
155 | elif message.payload:
156 | print(message.payload.readall().decode())
157 | else:
158 | print("---")
159 | else:
160 | logger.warning(f"Received unknown message {message}")
161 |
162 | try:
163 | logger.info(
164 | f"""Sending directive {config.send_directive} to {config.send_recipient}
165 | via {config.send_peer}"""
166 | )
167 | controller = Controller(config)
168 | controller.run(send_entrypoint)
169 | finally:
170 | controller.cleanup_tmpdir()
171 |
172 |
173 | def run_as_status(config):
174 | async def status_entrypoint():
175 | return await run_oneshot_command(
176 | controller,
177 | config.status_peer,
178 | None,
179 | config.status_ws_extra_headers,
180 | config.status_ws_heartbeat,
181 | print_status,
182 | noop,
183 | )
184 |
185 | async def print_status():
186 |
187 | # This output should be formatted so as to be parseable as YAML
188 |
189 | r = controller.receptor
190 | print("Nodes:")
191 | print(" Myself:", r.router.node_id)
192 | print(" Others:")
193 | for node in r.router.get_nodes():
194 | print(" -", node)
195 | print()
196 | print("Route Map:")
197 | for edge in r.router.get_edges():
198 | print("-", str(tuple(edge)))
199 | print()
200 | print("Known Node Capabilities:")
201 | for node, node_data in r.known_nodes.items():
202 | print(" ", node, ":", sep="")
203 | for cap, cap_value in node_data["capabilities"].items():
204 | print(" ", cap, ": ", str(cap_value), sep="")
205 |
206 | async def noop():
207 | return
208 |
209 | try:
210 | controller = Controller(config)
211 | controller.run(status_entrypoint)
212 | finally:
213 | controller.cleanup_tmpdir()
214 |
--------------------------------------------------------------------------------
/receptor/exceptions.py:
--------------------------------------------------------------------------------
1 | class ReceptorRuntimeError(RuntimeError):
2 | pass
3 |
4 |
5 | class ReceptorBufferError(ReceptorRuntimeError):
6 | pass
7 |
8 |
9 | class ReceptorMessageError(ValueError):
10 | pass
11 |
12 |
13 | class ReceptorConfigError(Exception):
14 | pass
15 |
16 |
17 | class UnknownDirective(ReceptorMessageError):
18 | pass
19 |
20 |
21 | class InvalidDirectiveAction(ReceptorMessageError):
22 | pass
23 |
24 |
25 | class UnknownMessageType(ReceptorMessageError):
26 | pass
27 |
28 |
29 | class UnrouteableError(ReceptorMessageError):
30 | pass
31 |
--------------------------------------------------------------------------------
/receptor/fileio.py:
--------------------------------------------------------------------------------
1 | import atexit
2 | import asyncio
3 | from concurrent.futures import ThreadPoolExecutor
4 |
5 | pool = ThreadPoolExecutor()
6 |
7 |
8 | def shutdown_pool():
9 | for thread in pool._threads:
10 | thread._tstate_lock.release()
11 |
12 |
13 | atexit.register(shutdown_pool)
14 |
15 |
16 | class Deferrer:
17 | def __init__(self, loop=None):
18 | self.loop = loop or asyncio.get_event_loop()
19 |
20 | async def defer(self, func, *args):
21 | """defers execution of the callable to the loop"""
22 | return await self.loop.run_in_executor(pool, func, *args)
23 |
24 |
25 | async def read(path, mode="rb"):
26 | def _f():
27 | with open(path, mode) as fp:
28 | return fp.read()
29 |
30 | return await Deferrer().defer(_f)
31 |
32 |
33 | async def writelines(path, data, mode="wb"):
34 | def _f():
35 | with open(path, mode) as fp:
36 | fp.writelines(data)
37 |
38 | return await Deferrer().defer(_f)
39 |
40 |
41 | async def write(path, data, mode="wb"):
42 | def _f():
43 | with open(path, mode) as fp:
44 | fp.write(data)
45 |
46 | return await Deferrer().defer(_f)
47 |
--------------------------------------------------------------------------------
/receptor/logstash_formatter/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2012 Exoscale SA
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining
4 | a copy of this software and associated documentation files (the
5 | "Software"), to deal in the Software without restriction, including
6 | without limitation the rights to use, copy, modify, merge, publish,
7 | distribute, sublicense, and/or sell copies of the Software, and to
8 | permit persons to whom the Software is furnished to do so, subject to
9 | the following conditions:
10 |
11 | The above copyright notice and this permission notice shall be
12 | included in all copies or substantial portions of the Software.
13 |
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
/receptor/logstash_formatter/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/receptor/logstash_formatter/__init__.py
--------------------------------------------------------------------------------
/receptor/logstash_formatter/logstash.py:
--------------------------------------------------------------------------------
1 | """
2 | This module was taken in part from:
3 | https://github.com/ulule/python-logstash-formatter
4 | """
5 | import datetime
6 | import json
7 | import logging
8 | import socket
9 | import traceback as tb
10 |
11 |
12 | def _default_json_default(obj):
13 | """
14 | Coerce everything to strings.
15 | All objects representing time get output as ISO8601.
16 | """
17 | if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
18 | return obj.isoformat()
19 | else:
20 | return str(obj)
21 |
22 |
23 | class LogstashFormatter(logging.Formatter):
24 | """
25 | A custom formatter to prepare logs to be
26 | shipped out to logstash.
27 | """
28 |
29 | def __init__(
30 | self, fmt=None, datefmt=None, style="%", json_cls=None, json_default=_default_json_default
31 | ):
32 | """
33 | :param fmt: Config as a JSON string, allowed fields;
34 | extra: provide extra fields always present in logs
35 | source_host: override source host name
36 | :param datefmt: Date format to use (required by logging.Formatter
37 | interface but not used)
38 | :param json_cls: JSON encoder to forward to json.dumps
39 | :param json_default: Default JSON representation for unknown types,
40 | by default coerce everything to a string
41 | """
42 |
43 | if fmt is not None:
44 | self._fmt = json.loads(fmt)
45 | else:
46 | self._fmt = {}
47 | self.json_default = json_default
48 | self.json_cls = json_cls
49 | if "extra" not in self._fmt:
50 | self.defaults = {}
51 | else:
52 | self.defaults = self._fmt["extra"]
53 | if "source_host" in self._fmt:
54 | self.source_host = self._fmt["source_host"]
55 | else:
56 | try:
57 | self.source_host = socket.gethostname()
58 | except Exception:
59 | self.source_host = ""
60 |
61 | def _record_to_dict(self, record):
62 | fields = record.__dict__.copy()
63 |
64 | if isinstance(record.msg, dict):
65 | fields.update(record.msg)
66 | fields.pop("msg")
67 | msg = ""
68 | else:
69 | msg = record.getMessage()
70 |
71 | try:
72 | msg = msg.format(**fields)
73 | except (KeyError, IndexError, ValueError):
74 | pass
75 | except Exception:
76 | # in case we can not format the msg properly we log it as is instead of crashing
77 | msg = msg
78 |
79 | if "msg" in fields:
80 | fields.pop("msg")
81 |
82 | if "exc_info" in fields:
83 | if fields["exc_info"]:
84 | formatted = tb.format_exception(*fields["exc_info"])
85 | fields["exception"] = formatted
86 | fields.pop("exc_info")
87 |
88 | if "exc_text" in fields and not fields["exc_text"]:
89 | fields.pop("exc_text")
90 |
91 | logr = self.defaults.copy()
92 |
93 | # remove nulls
94 | fields = {k: v for k, v in fields.items() if v}
95 |
96 | logr.update(
97 | {
98 | "@message": msg,
99 | "@timestamp": datetime.datetime.utcnow().isoformat(),
100 | "@source_host": self.source_host,
101 | "@fields": self._build_fields(logr, fields),
102 | }
103 | )
104 | return logr
105 |
106 | def format(self, record):
107 | """
108 | Format a log record to JSON, if the message is a dict
109 | assume an empty message and use the dict as additional
110 | fields.
111 | """
112 |
113 | logr = self._record_to_dict(record)
114 | return json.dumps(logr, default=self.json_default, cls=self.json_cls)
115 |
116 | def _build_fields(self, defaults, fields):
117 | """Return provided fields including any in defaults
118 | >>> f = LogstashFormatter()
119 | # Verify that ``fields`` is used
120 | >>> f._build_fields({}, {'foo': 'one'}) == \
121 | {'foo': 'one'}
122 | True
123 | # Verify that ``@fields`` in ``defaults`` is used
124 | >>> f._build_fields({'@fields': {'bar': 'two'}}, {'foo': 'one'}) == \
125 | {'foo': 'one', 'bar': 'two'}
126 | True
127 | # Verify that ``fields`` takes precedence
128 | >>> f._build_fields({'@fields': {'foo': 'two'}}, {'foo': 'one'}) == \
129 | {'foo': 'one'}
130 | True
131 | """
132 | return dict(list(defaults.get("@fields", {}).items()) + list(fields.items()))
133 |
--------------------------------------------------------------------------------
/receptor/messages/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/receptor/messages/__init__.py
--------------------------------------------------------------------------------
/receptor/messages/directive.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import logging
3 |
4 | from ..exceptions import UnknownDirective
5 | from .framed import FileBackedBuffer, FramedMessage
6 |
7 | logger = logging.getLogger(__name__)
8 |
9 |
10 | class Directive:
11 | def __init__(self, type_, payload):
12 | self.type_ = type_
13 | self.payload = payload
14 |
15 |
16 | class Control:
17 | CONTROL_DIRECTIVES = ["ping"]
18 |
19 | async def __call__(self, router, msg):
20 | _, action = msg.header["directive"].split(":", 1)
21 | if action not in self.CONTROL_DIRECTIVES:
22 | raise UnknownDirective(f"Unknown control directive: {action}")
23 | action_method = getattr(self, action)
24 | serial = 0
25 | async for response in action_method(router.receptor, msg):
26 | serial += 1
27 | resp_msg = FramedMessage(
28 | header=dict(
29 | recipient=msg.header["sender"], in_response_to=msg.msg_id, serial=serial
30 | ),
31 | payload=FileBackedBuffer.from_dict(response),
32 | )
33 | await router.send(resp_msg)
34 |
35 | async def ping(self, receptor, msg):
36 | logger.info(f'Received ping from {msg.header["sender"]}')
37 | yield dict(
38 | initial_time=msg.header["timestamp"],
39 | response_time=datetime.datetime.utcnow(),
40 | active_work=receptor.work_manager.get_work(),
41 | )
42 |
43 |
44 | control = Control()
45 |
--------------------------------------------------------------------------------
/receptor/messages/framed.py:
--------------------------------------------------------------------------------
1 | """
2 | This module provides classes to build framed messages as well as consume a
3 | stream of framed messages into descrete messages.
4 |
5 | There are two configurations of framed messages, single and dual part::
6 |
7 | FramedMessage--------------------------------
8 | Frame (Header)
9 | {json data}
10 | Frame (Payload)
11 | FileBackedBuffer
12 | ---------------------------------------------
13 |
14 | FramedMessage--------------------------------
15 | Frame (Command)
16 | {json data}
17 | ---------------------------------------------
18 | """
19 | import asyncio
20 | import functools
21 | import io
22 | import logging
23 | import os
24 | import struct
25 | import tempfile
26 | import uuid
27 | from enum import IntEnum
28 |
29 | from .. import serde as json
30 | from ..exceptions import ReceptorRuntimeError
31 |
32 | logger = logging.getLogger(__name__)
33 |
34 | MAX_INT64 = 2 ** 64 - 1
35 |
36 |
37 | class Frame:
38 | """
39 | A Frame represents the minimal metadata about a transmission.
40 |
41 | Usually you should not create one directly, but rather use the
42 | FramedMessage class.
43 | """
44 |
45 | class Types(IntEnum):
46 | HEADER = 0
47 | PAYLOAD = 1
48 | COMMAND = 2
49 |
50 | fmt = struct.Struct(">ccIIQQ")
51 |
52 | __slots__ = ("type", "version", "length", "msg_id", "id")
53 |
54 | def __init__(self, type_, version, length, msg_id, id_):
55 | self.type = type_
56 | self.version = version
57 | self.length = length
58 | self.msg_id = msg_id
59 | self.id = id_
60 |
61 | def __repr__(self):
62 | return f"Frame({self.type}, {self.version}, {self.length}, {self.msg_id}, {self.id})"
63 |
64 | def serialize(self):
65 | return self.fmt.pack(
66 | bytes([self.type]),
67 | bytes([self.version]),
68 | self.id,
69 | self.length,
70 | *split_uuid(self.msg_id),
71 | )
72 |
73 | @classmethod
74 | def deserialize(cls, buf):
75 | t, v, i, length, hi, lo = Frame.fmt.unpack(buf)
76 | msg_id = join_uuid(hi, lo)
77 | return cls(Frame.Types(ord(t)), ord(v), length, msg_id, i)
78 |
79 | @classmethod
80 | def from_data(cls, data):
81 | return cls.deserialize(data[: Frame.fmt.size]), data[Frame.fmt.size :]
82 |
83 | @classmethod
84 | def wrap(cls, data, type_=Types.PAYLOAD, msg_id=None):
85 | """
86 | Returns a frame for the passed data.
87 | """
88 | if not msg_id:
89 | msg_id = uuid.uuid4().int
90 |
91 | return cls(type_, 1, len(data), msg_id, 1)
92 |
93 |
94 | def split_uuid(data):
95 | "Splits a 128 bit int into two 64 bit words for binary encoding"
96 | return ((data >> 64) & MAX_INT64, data & MAX_INT64)
97 |
98 |
99 | def join_uuid(hi, lo):
100 | "Joins two 64 bit words into a 128bit int from binary encoding"
101 | return (hi << 64) | lo
102 |
103 |
104 | class FileBackedBuffer:
105 | def __init__(self, fp, length=0, min_chunk=2 ** 12, max_chunk=2 ** 20):
106 | self.length = length
107 | self.fp = fp
108 | self._min_chunk = min_chunk
109 | self._max_chunk = max_chunk
110 |
111 | @classmethod
112 | def from_temp(cls, dir=None, delete=True):
113 | return cls(tempfile.NamedTemporaryFile(dir=dir, delete=delete))
114 |
115 | @classmethod
116 | def from_buffer(cls, buffered_io, dir=None, delete=False):
117 | if not isinstance(buffered_io, io.BytesIO):
118 | raise ReceptorRuntimeError("buffer must be of type io.BytesIO")
119 | return cls(fp=buffered_io, length=buffered_io.getbuffer().nbytes)
120 |
121 | @classmethod
122 | def from_data(cls, raw_data, dir=None, delete=True):
123 | if isinstance(raw_data, str):
124 | raw_data = raw_data.encode()
125 | fbb = cls.from_temp(dir=dir, delete=delete)
126 | fbb.write(raw_data)
127 | return fbb
128 |
129 | @classmethod
130 | def from_dict(cls, raw_data, dir=None, delete=True):
131 | try:
132 | d = json.dumps(raw_data).encode("utf-8")
133 | except Exception as e:
134 | raise ReceptorRuntimeError("failed to encode raw data into json") from e
135 | fbb = cls.from_temp(dir=dir, delete=delete)
136 | fbb.write(d)
137 | return fbb
138 |
139 | @classmethod
140 | def from_path(cls, path):
141 | return cls(open(path, "rb"), os.path.getsize(path))
142 |
143 | @property
144 | def name(self):
145 | return self.fp.name
146 |
147 | @property
148 | def chunksize(self):
149 | """
150 | Returns a chunksize to be used when reading the data.
151 |
152 | Attempts to create 1024 chunks bounded by min and max chunk sizes.
153 | """
154 | return min(self._max_chunk, max(self._min_chunk, self.length // 1024))
155 |
156 | def write(self, data):
157 | written = self.fp.write(data)
158 | self.length += written
159 | return written
160 |
161 | def seek(self, offset):
162 | self.fp.seek(offset)
163 |
164 | def read(self, size=-1):
165 | return self.fp.read(size)
166 |
167 | def readall(self):
168 | pos = self.fp.tell()
169 | try:
170 | self.fp.seek(0)
171 | return self.fp.read()
172 | finally:
173 | self.fp.seek(pos)
174 |
175 | def flush(self):
176 | self.fp.flush()
177 |
178 | def __len__(self):
179 | return self.length
180 |
181 | def __str__(self):
182 | return f""
183 |
184 |
185 | class FramedMessage:
186 | """
187 | FramedMessage is a container for a header and optional payload that
188 | encapsulates serialization for transmission across the network.
189 |
190 | :param msg_id: should be an integer representation of a type4 uuid
191 | :param header: should be a mapping
192 | :param payload: if set, should be a file-like object that exposes seek() and
193 | read() that accepts a size argument.
194 | """
195 |
196 | __slots__ = ("msg_id", "header", "payload")
197 |
198 | def __init__(self, msg_id=None, header=None, payload=None):
199 | if msg_id is None:
200 | msg_id = uuid.uuid4().int
201 | self.msg_id = msg_id
202 | self.header = header
203 | self.payload = payload
204 |
205 | def __repr__(self):
206 | return f"FramedMessage(msg_id={self.msg_id}, header={self.header}, payload={self.payload})"
207 |
208 | def __iter__(self):
209 | header_bytes = json.dumps(self.header).encode("utf-8")
210 | yield Frame.wrap(
211 | header_bytes,
212 | type_=Frame.Types.HEADER if self.payload else Frame.Types.COMMAND,
213 | msg_id=self.msg_id,
214 | ).serialize()
215 | yield header_bytes
216 | if self.payload:
217 | yield Frame.wrap(self.payload, msg_id=self.msg_id).serialize()
218 | self.payload.seek(0)
219 | reader = functools.partial(self.payload.read, size=self.payload.chunksize)
220 | for chunk in iter(reader, b""):
221 | yield chunk
222 |
223 | def serialize(self):
224 | return b"".join(self)
225 |
226 |
227 | class FramedBuffer:
228 | """
229 | A buffer that accumulates frames and bytes to produce a header and a
230 | payload.
231 |
232 | This buffer assumes that an entire message (denoted by msg_id) will be
233 | sent before another message is sent.
234 | """
235 |
236 | def __init__(self, loop=None):
237 | self.q = asyncio.Queue(loop=loop)
238 | self.header = None
239 | self.framebuffer = bytearray()
240 | self.bb = FileBackedBuffer.from_temp()
241 | self.current_frame = None
242 | self.to_read = 0
243 |
244 | async def put(self, data):
245 | if not self.to_read:
246 | return await self.handle_frame(data)
247 | await self.consume(data)
248 |
249 | async def handle_frame(self, data):
250 | try:
251 | self.framebuffer += data
252 | frame, rest = Frame.from_data(self.framebuffer)
253 | except struct.error:
254 | return # We don't have enough data yet
255 | else:
256 | self.framebuffer = bytearray()
257 |
258 | if frame.type not in Frame.Types:
259 | raise Exception("Unknown Frame Type")
260 |
261 | self.current_frame = frame
262 | self.to_read = self.current_frame.length
263 | await self.consume(rest)
264 |
265 | async def consume(self, data):
266 | data, rest = data[: self.to_read], data[self.to_read :]
267 | self.to_read -= self.bb.write(data)
268 | if self.to_read == 0:
269 | await self.finish()
270 | if rest:
271 | await self.handle_frame(rest)
272 |
273 | async def finish(self):
274 | if self.current_frame.type == Frame.Types.HEADER:
275 | self.bb.seek(0)
276 | self.header = json.load(self.bb)
277 | elif self.current_frame.type == Frame.Types.PAYLOAD:
278 | await self.q.put(
279 | FramedMessage(self.current_frame.msg_id, header=self.header, payload=self.bb)
280 | )
281 | self.header = None
282 | elif self.current_frame.type == Frame.Types.COMMAND:
283 | self.bb.seek(0)
284 | await self.q.put(
285 | FramedMessage(msg_id=self.current_frame.msg_id, header=json.load(self.bb))
286 | )
287 | else:
288 | raise Exception("Unknown Frame Type")
289 | self.to_read = 0
290 | self.bb = FileBackedBuffer.from_temp()
291 |
292 | async def get(self, timeout=None):
293 | return await asyncio.wait_for(self.q.get(), timeout)
294 |
295 | def get_nowait(self):
296 | return self.q.get_nowait()
297 |
--------------------------------------------------------------------------------
/receptor/plugin_utils.py:
--------------------------------------------------------------------------------
1 | BYTES_PAYLOAD = "bytes"
2 | """
3 | Inform Receptor that the given plugin expects BYTES for the message data
4 | """
5 |
6 | BUFFER_PAYLOAD = "buffer"
7 | """
8 | Inform Receptor that the given plugin expects a buffered reader for the message data
9 | """
10 |
11 | FILE_PAYLOAD = "file"
12 | """
13 | Inform Receptor that the given plugin expects a file path for the message data
14 | """
15 |
16 |
17 | def plugin_export(payload_type):
18 | """
19 | A decorator intended to be used by Receptor plugins in conjunction with
20 | entrypoints typically defined in your setup.py file::
21 |
22 | entry_points={
23 | 'receptor.worker':
24 | 'your_package_name = your_package_name.your_module',
25 | }
26 |
27 | ``your_package_name.your_module`` should then contain a function decorated with
28 | ``plugin_export`` as such::
29 |
30 | @receptor.plugin_export(payload_type=receptor.BYTES_PAYLOAD):
31 | def execute(message, config, result_queue):
32 | result_queue.put("My plugin ran!")
33 |
34 | You can then send messages to this plugin across the Receptor mesh with the directive
35 | ``your_package_name:execute``
36 |
37 | Depending on what kind of data you expect to receive you can select from one
38 | of 3 different incoming payload types. This determines the incoming type of the
39 | ``message`` data type:
40 |
41 | * BYTES_PAYLOAD: This will give you literal python bytes that you can then read
42 | and interpret.
43 | * BUFFER_PAYLOAD: This will send you a buffer that you can read(). This buffer
44 | will be automatically closed and its contents discarded when your plugin returns.
45 | * FILE_PAYLOAD: This will return you a file path that you can open() or manage
46 | in any way you see fit. It will be automatically removed after your plugin returns.
47 |
48 | For more information about developing plugins see :ref:`plugins`.
49 | """
50 |
51 | def decorator(func):
52 | func.receptor_export = True
53 | func.payload_type = payload_type
54 | return func
55 |
56 | return decorator
57 |
--------------------------------------------------------------------------------
/receptor/receptor.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import collections
3 | import json
4 | import logging
5 | import os
6 | import time
7 | import uuid
8 |
9 | import pkg_resources
10 |
11 | from . import exceptions, fileio, stats
12 | from .buffers.file import FileBufferManager
13 | from .exceptions import ReceptorMessageError
14 | from .messages import directive, framed
15 | from .router import MeshRouter
16 | from .work import WorkManager
17 |
18 | RECEPTOR_DIRECTIVE_NAMESPACE = "receptor"
19 | logger = logging.getLogger(__name__)
20 |
21 |
22 | class Manifest:
23 | def __init__(self, path):
24 | self.path = path
25 | self.lock = asyncio.Lock()
26 |
27 | async def watch_expire(self, buffer_mgr):
28 | while True:
29 | async with self.lock:
30 | current_manifest = await self.get()
31 | to_remove = set()
32 | for connection in current_manifest:
33 | buffer = buffer_mgr[connection["id"]]
34 | await buffer.expire_all()
35 | if connection["last"] + 86400 < time.time():
36 | to_remove.add(connection["id"])
37 | if to_remove:
38 | await self.write([c for c in current_manifest if c["id"] not in to_remove])
39 | await asyncio.sleep(600)
40 |
41 | async def get(self):
42 | if not os.path.exists(self.path):
43 | return []
44 | try:
45 | data = await fileio.read(self.path, "r")
46 | return json.loads(data)
47 | except Exception as e:
48 | logger.warn("Failed to read connection manifest: %s", e)
49 | logger.exception("damn")
50 | return []
51 |
52 | async def write(self, manifest):
53 | await fileio.write(self.path, json.dumps(manifest), mode="w")
54 |
55 | async def update(self, connection):
56 | async with self.lock:
57 | manifest = await self.get()
58 | found = False
59 | for node in manifest:
60 | if node["id"] == connection:
61 | node["last"] = time.time()
62 | found = True
63 | break
64 | if not found:
65 | manifest.append(dict(id=connection, last=time.time()))
66 | await self.write(manifest)
67 |
68 | async def remove(self, connection):
69 | async with self.lock:
70 | logger.info("Expiring connection %s", connection)
71 | current = await self.get()
72 | manifest = [m for m in current if m["id"] != connection]
73 | await self.write(manifest)
74 |
75 |
76 | class Receptor:
77 | """ Owns all connections and maintains adding and removing them. """
78 |
79 | def __init__(
80 | self, config, node_id=None, router_cls=None, work_manager_cls=None, response_queue=None
81 | ):
82 | self.config = config
83 | self.node_id = node_id or self.config.default_node_id or self._find_node_id()
84 | self.router = (router_cls or MeshRouter)(self)
85 | self.route_sender_task = None
86 | self.route_send_time = time.time()
87 | self.last_sent_seq = None
88 | self.route_adv_seen = dict()
89 | self.work_manager = (work_manager_cls or WorkManager)(self)
90 | self.connections = dict()
91 | self.response_queue = response_queue
92 | self.base_path = os.path.join(self.config.default_data_dir, self.node_id)
93 | if not os.path.exists(self.base_path):
94 | os.makedirs(os.path.join(self.config.default_data_dir, self.node_id))
95 | self.connection_manifest = Manifest(os.path.join(self.base_path, "connection_manifest"))
96 | path = os.path.join(os.path.expanduser(self.base_path))
97 | self.buffer_mgr = FileBufferManager(path)
98 | self.stop = False
99 | self.known_nodes = collections.defaultdict(
100 | lambda: dict(capabilities=dict(), sequence=0, seq_epoch=0.0, connections=dict())
101 | )
102 | self.known_nodes[self.node_id]["seq_epoch"] = time.time()
103 | self.known_nodes[self.node_id]["capabilities"] = self.work_manager.get_capabilities()
104 | try:
105 | receptor_dist = pkg_resources.get_distribution("receptor")
106 | receptor_version = receptor_dist.version
107 | except pkg_resources.DistributionNotFound:
108 | receptor_version = "unknown"
109 | stats.receptor_info.info(dict(node_id=self.node_id, receptor_version=receptor_version))
110 |
111 | def _find_node_id(self):
112 | if "RECEPTOR_NODE_ID" in os.environ:
113 | return os.environ["RECEPTOR_NODE_ID"]
114 |
115 | node_id = uuid.uuid4()
116 | if os.path.exists(os.path.join(os.getcwd(), "Pipfile")):
117 | with open(os.path.join(os.getcwd(), ".env"), "w+") as ofs:
118 | ofs.write(f"\nRECEPTOR_NODE_ID={node_id}\n")
119 | return str(node_id)
120 |
121 | async def message_handler(self, buf):
122 | logger.debug("spawning message_handler")
123 | while True:
124 | try:
125 | data = await buf.get()
126 | except asyncio.CancelledError:
127 | logger.debug("message_handler: cancel request received")
128 | break
129 | except Exception:
130 | logger.exception("message_handler")
131 | break
132 | else:
133 | if "cmd" in data.header and data.header["cmd"].startswith("ROUTE"):
134 | await self.handle_route_advertisement(data.header)
135 | else:
136 | asyncio.ensure_future(self.handle_message(data))
137 |
138 | async def update_connections(self, protocol_obj, id_=None):
139 | if id_ is None:
140 | id_ = protocol_obj.id
141 |
142 | routing_changed = False
143 | if id_ in self.connections:
144 | if protocol_obj not in self.connections[id_]:
145 | self.connections[id_].append(protocol_obj)
146 | routing_changed = True
147 | else:
148 | self.connections[id_] = [protocol_obj]
149 | routing_changed = True
150 | await self.connection_manifest.update(id_)
151 |
152 | if routing_changed:
153 | await self.recalculate_and_send_routes_soon()
154 |
155 | stats.connected_peers_gauge.inc()
156 |
157 | async def remove_ephemeral(self, node):
158 | logger.debug(f"Removing ephemeral node {node}")
159 | changed = False
160 | if node in self.connections:
161 | await self.connection_manifest.remove(node)
162 | changed = True
163 | if node in self.known_nodes:
164 | del self.known_nodes[node]
165 | changed = True
166 | if changed:
167 | await self.recalculate_and_send_routes_soon()
168 |
169 | async def remove_connection(self, protocol_obj, id_=None):
170 | routing_changed = False
171 | for connection_node in self.connections:
172 | if protocol_obj in self.connections[connection_node]:
173 | routing_changed = True
174 | logger.info(f"Removing connection for node {connection_node}")
175 | if self.is_ephemeral(connection_node):
176 | self.connections[connection_node].remove(protocol_obj)
177 | await self.remove_ephemeral(connection_node)
178 | else:
179 | self.connections[connection_node].remove(protocol_obj)
180 | await self.connection_manifest.update(connection_node)
181 | if routing_changed:
182 | await self.recalculate_and_send_routes_soon()
183 | stats.connected_peers_gauge.dec()
184 |
185 | def is_ephemeral(self, id_):
186 | return (
187 | id_ in self.known_nodes
188 | and "ephemeral" in self.known_nodes[id_]["capabilities"]
189 | and self.known_nodes[id_]["capabilities"]["ephemeral"]
190 | )
191 |
192 | async def remove_connection_by_id(self, id_, loop=None):
193 | if id_ in self.connections:
194 | for protocol_obj in self.connections[id_]:
195 | await self.remove_connection(protocol_obj, id_)
196 |
197 | async def shutdown_handler(self):
198 | while True:
199 | if self.stop:
200 | return
201 | await asyncio.sleep(1)
202 |
203 | def _say_hi(self):
204 | return framed.FramedMessage(
205 | header={
206 | "cmd": "HI",
207 | "id": self.node_id,
208 | "expire_time": time.time() + 10,
209 | "meta": dict(
210 | capabilities=self.work_manager.get_capabilities(),
211 | groups=self.config.node_groups,
212 | work=self.work_manager.get_work(),
213 | ),
214 | }
215 | )
216 |
217 | async def recalculate_routes(self):
218 | """Construct local routing table from source data"""
219 | edge_costs = dict()
220 | logger.debug("Constructing routing table")
221 | for node in self.connections:
222 | if self.connections[node]:
223 | edge_costs[tuple(sorted([self.node_id, node]))] = 1
224 | manifest = await self.connection_manifest.get()
225 | for node in manifest:
226 | node_key = tuple(sorted([self.node_id, node["id"]]))
227 | if node_key not in edge_costs:
228 | edge_costs[node_key] = 100
229 | for node in self.known_nodes:
230 | if node == self.node_id:
231 | continue
232 | for conn, cost in self.known_nodes[node]["connections"].items():
233 | node_key = tuple(sorted([node, conn]))
234 | if node_key not in edge_costs:
235 | edge_costs[node_key] = cost
236 | pass
237 | new_edges = [(key[0], key[1], value) for key, value in edge_costs.items()]
238 | if new_edges == self.router.get_edges():
239 | logger.debug(f" Routing not changed. Existing table: {self.router.get_edges()}")
240 | return False
241 | else:
242 | self.router.add_or_update_edges(new_edges, replace_all=True)
243 | logger.debug(f" Routing updated. New table: {self.router.get_edges()}")
244 | return True
245 |
246 | async def send_routes(self):
247 | """Send routing update to connected peers"""
248 | route_adv_id = str(uuid.uuid4())
249 | seq = self.known_nodes[self.node_id]["sequence"] + 1
250 | self.known_nodes[self.node_id]["sequence"] = seq
251 | logger.debug(f"Sending route advertisement {route_adv_id} seq {seq}")
252 | self.last_sent_seq = seq
253 |
254 | advertised_connections = dict()
255 | for node1, node2, cost in self.router.get_edges():
256 | other_node = (
257 | node1 if node2 == self.node_id else node2 if node1 == self.node_id else None
258 | )
259 | if other_node:
260 | advertised_connections[other_node] = cost
261 | logger.debug(f" Advertised connections: {advertised_connections}")
262 |
263 | for node_id in self.connections:
264 | if not self.connections[node_id]:
265 | continue
266 | buf = self.buffer_mgr[node_id]
267 | try:
268 | msg = framed.FramedMessage(
269 | header={
270 | "cmd": "ROUTE2",
271 | "recipient": node_id,
272 | "id": self.node_id,
273 | "origin": self.node_id,
274 | "route_adv_id": route_adv_id,
275 | "connections": advertised_connections,
276 | "seq_epoch": self.known_nodes[self.node_id]["seq_epoch"],
277 | "sequence": seq,
278 | "node_capabilities": {
279 | node: value["capabilities"]
280 | for (node, value) in self.known_nodes.items()
281 | },
282 | }
283 | )
284 | await buf.put(msg.serialize())
285 | logger.debug(f" Sent to {node_id}")
286 | except Exception as e:
287 | logger.exception("Error trying to send route update: {}".format(e))
288 |
289 | async def route_send_check(self, force_send=False):
290 | while time.time() < self.route_send_time:
291 | await asyncio.sleep(self.route_send_time - time.time())
292 | self.route_sender_task = None
293 | routes_changed = await self.recalculate_routes()
294 | if (
295 | force_send
296 | or routes_changed
297 | or self.known_nodes[self.node_id]["sequence"] != self.last_sent_seq
298 | ):
299 | await self.send_routes()
300 |
301 | async def recalculate_and_send_routes_soon(self, force_send=False):
302 | self.route_send_time = time.time() + 0.1
303 | if not self.route_sender_task:
304 | self.route_sender_task = asyncio.ensure_future(self.route_send_check(force_send))
305 |
306 | async def handle_route_advertisement(self, data):
307 |
308 | # Sanity checks of the message
309 | if "origin" in data:
310 | origin = data["origin"]
311 | else:
312 | raise exceptions.UnknownMessageType("Malformed route advertisement: No origin")
313 | if (
314 | "cmd" not in data
315 | or "route_adv_id" not in data
316 | or "seq_epoch" not in data
317 | or "sequence" not in data
318 | or data["cmd"] != "ROUTE2"
319 | ):
320 | raise exceptions.UnknownMessageType(
321 | f"Unknown route advertisement protocol received from {origin}"
322 | )
323 | logger.debug(
324 | f"Route advertisement {data['route_adv_id']} seq {data['sequence']} "
325 | + f"received From {origin} via {data['id']}"
326 | )
327 |
328 | # Check if we received an update about ourselves
329 | if origin == self.node_id:
330 | logger.debug(f"Ignoring route advertisement {data['sequence']} from ourselves")
331 | return
332 |
333 | # Check that we have not seen this exact update before
334 | expire_time = time.time() - 600
335 | self.route_adv_seen = {
336 | raid: exp for (raid, exp) in self.route_adv_seen.items() if exp > expire_time
337 | }
338 | if data["route_adv_id"] in self.route_adv_seen:
339 | logger.debug(f"Ignoring already-seen route advertisement {data['route_adv_id']}")
340 | return
341 | self.route_adv_seen[data["route_adv_id"]] = time.time()
342 |
343 | # If this is the first time we've seen this node, advertise ourselves to it
344 | if origin not in self.known_nodes:
345 | await self.recalculate_and_send_routes_soon(force_send=True)
346 |
347 | # Check that the epoch and sequence epoch are not older than what we already have
348 | if origin in self.known_nodes and (
349 | self.known_nodes[origin]["seq_epoch"] > data["seq_epoch"]
350 | or self.known_nodes[origin]["sequence"] >= data["sequence"]
351 | ):
352 | logger.warn(
353 | f"Ignoring routing update {data['route_adv_id']} from {origin} "
354 | + f"epoch {data['seq_epoch']} seq {data['sequence']} because we already have "
355 | + f"epoch {self.known_nodes[origin]['seq_epoch']} "
356 | + f"seq {self.known_nodes[origin['sequence']]}"
357 | )
358 | return
359 |
360 | # TODO: don't just assume this is all correct
361 | if "node_capabilities" in data:
362 | for node, caps in data["node_capabilities"].items():
363 | self.known_nodes[node]["capabilities"] = caps
364 |
365 | # Remove any orphaned leaf nodes
366 | unreachable = set()
367 | for node in self.known_nodes:
368 | if node == self.node_id:
369 | continue
370 | if (
371 | len(self.known_nodes[node]["connections"]) == 1
372 | and origin in self.known_nodes[node]["connections"]
373 | and node not in data["connections"]
374 | ):
375 | unreachable.add(node)
376 | for node in unreachable:
377 | logger.debug(f"Removing orphaned node {node}")
378 | del self.known_nodes[node]
379 |
380 | # Update our own routing table based on the data we just received
381 | self.known_nodes[origin]["connections"] = data["connections"]
382 | self.known_nodes[origin]["seq_epoch"] = data["seq_epoch"]
383 | self.known_nodes[origin]["sequence"] = data["sequence"]
384 | await self.recalculate_routes()
385 |
386 | # Re-send the routing update to all our connections except the one it came in on
387 | for conn in self.connections:
388 | if conn == data["id"]:
389 | continue
390 | send_data = dict(data)
391 | buf = self.buffer_mgr[conn]
392 | try:
393 | send_data["id"] = self.node_id
394 | send_data["recipient"] = conn
395 | msg = framed.FramedMessage(header=send_data)
396 | await buf.put(msg.serialize())
397 | except Exception as e:
398 | logger.exception("Error trying to forward route broadcast: {}".format(e))
399 |
400 | async def handle_directive(self, msg):
401 | try:
402 | namespace, _ = msg.header["directive"].split(":", 1)
403 | logger.debug(f"directive namespace is {namespace}")
404 | if namespace == RECEPTOR_DIRECTIVE_NAMESPACE:
405 | await directive.control(self.router, msg)
406 | else:
407 | # TODO: other namespace/work directives
408 | await self.work_manager.handle(msg)
409 | except ReceptorMessageError as e:
410 | logger.error(f"Receptor Message Error '{e}''")
411 | except ValueError:
412 | logger.error(
413 | f"""error in handle_message: Invalid directive -> '{msg}'. Sending failure
414 | response back."""
415 | )
416 | err_resp = framed.FramedMessage(
417 | header=dict(
418 | recipient=msg.header["sender"],
419 | in_response_to=msg.msg_ig,
420 | serial=msg.header["serial"] + 1,
421 | code=1,
422 | ),
423 | payload="An invalid directive ('{}') was specified.".format(
424 | msg.header["directive"]
425 | ),
426 | )
427 | await self.router.send(err_resp)
428 | except Exception as e:
429 | logger.error("error in handle_message: '%s'. Sending failure response back.", str(e))
430 | err_resp = framed.FramedMessage(
431 | header=dict(
432 | recipient=msg.header["sender"],
433 | in_response_to=msg.msg_id,
434 | serial=msg.header["serial"] + 1,
435 | code=1,
436 | ),
437 | payload=f"{e}",
438 | )
439 | await self.router.send(err_resp)
440 |
441 | async def handle_response(self, msg):
442 | logger.debug("handle_response: %s", msg)
443 | in_response_to = msg.header["in_response_to"]
444 | if in_response_to in self.router.response_registry:
445 | logger.info(f"Handling response to {in_response_to} with callback.")
446 | await self.response_queue.put(msg)
447 | else:
448 | logger.warning(f"Received response to {in_response_to} but no record of sent message.")
449 |
450 | async def handle_message(self, msg):
451 | try:
452 | stats.messages_received_counter.inc()
453 |
454 | if msg.header["recipient"] != self.node_id:
455 | next_hop = self.router.next_hop(msg.header["recipient"])
456 | return await self.router.forward(msg, next_hop)
457 |
458 | if "in_response_to" in msg.header:
459 | await self.handle_response(msg)
460 | elif "directive" in msg.header:
461 | await self.handle_directive(msg)
462 | else:
463 | raise exceptions.UnknownMessageType(
464 | f"Failed to determine message type for data: {msg}"
465 | )
466 | except Exception:
467 | logger.exception("handle_message")
468 |
--------------------------------------------------------------------------------
/receptor/router.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import datetime
3 | import heapq
4 | import itertools
5 | import logging
6 | import sys
7 | from collections import defaultdict
8 |
9 | from .exceptions import ReceptorBufferError, UnrouteableError
10 | from .messages.framed import FramedMessage
11 | from .stats import route_counter, route_info
12 |
13 | logger = logging.getLogger(__name__)
14 |
15 |
16 | class PriorityQueue:
17 |
18 | REMOVED = "$$$%%%%%%$$$"
19 |
20 | def __init__(self):
21 | self.heap = list()
22 | self.entry_finder = dict()
23 | self.counter = itertools.count()
24 |
25 | def add_with_priority(self, item, priority):
26 | """Adds an item to the queue, or changes the priority of an existing item."""
27 | if item in self.entry_finder:
28 | self.remove_item(item)
29 | count = next(self.counter)
30 | entry = [priority, count, item]
31 | self.entry_finder[item] = entry
32 | heapq.heappush(self.heap, entry)
33 |
34 | def remove_item(self, item):
35 | """Removes an item from the queue."""
36 | entry = self.entry_finder.pop(item)
37 | entry[-1] = self.REMOVED
38 |
39 | def pop_item(self):
40 | """Returns the item from the queue with the lowest sort order."""
41 | while self.heap:
42 | priority, count, item = heapq.heappop(self.heap)
43 | if item is not self.REMOVED:
44 | del self.entry_finder[item]
45 | return item
46 | raise KeyError("Pop from empty PriorityQueue")
47 |
48 | def is_empty(self):
49 | """Returns True if the queue is empty."""
50 | for entry in self.heap:
51 | if entry[-1] is not self.REMOVED:
52 | return False
53 | return True
54 |
55 |
56 | class MeshRouter:
57 | def __init__(self, receptor=None, node_id=None):
58 | self._nodes = set()
59 | self._edges = dict()
60 | self._neighbors = defaultdict(set)
61 | self.response_registry = dict()
62 | self.receptor = receptor
63 | if node_id:
64 | self.node_id = node_id
65 | elif receptor:
66 | self.node_id = receptor.node_id
67 | else:
68 | raise RuntimeError("Unknown node_id")
69 | self.routing_table = dict()
70 | route_info.info(dict(edges="()"))
71 |
72 | def node_is_known(self, node_id):
73 | return node_id in self._nodes or node_id == self.node_id
74 |
75 | def add_or_update_edges(self, edges, replace_all=False):
76 | """
77 | Adds a list of edges supplied as (node1, node2, cost) tuples.
78 | Already-existing edges have their cost updated.
79 | Supplying a cost of None removes the edge.
80 | """
81 | if replace_all:
82 | self._nodes = set()
83 | self._edges = dict()
84 | self._neighbors = defaultdict(set)
85 | for left, right, cost in edges:
86 | edge_key = tuple(sorted([left, right]))
87 | if edge_key not in self._edges:
88 | self._neighbors[left].add(right)
89 | self._neighbors[right].add(left)
90 | for node in edge_key:
91 | if node != self.node_id:
92 | self._nodes.add(node)
93 | self._edges[edge_key] = cost
94 | elif cost is None:
95 | del self._edges[edge_key]
96 | else:
97 | self._edges[edge_key] = cost
98 | self.update_routing_table()
99 | route_info.info(dict(edges=str(set(self.get_edges()))))
100 |
101 | def remove_node(self, node):
102 | """Removes a node and its associated edges."""
103 | edge_keys = [ek for ek in self._edges.keys() if ek[0] == node or ek[1] == node]
104 | for ek in edge_keys:
105 | del self._edges[ek]
106 | if node in self._neighbors:
107 | for neighbor in self._neighbors[node]:
108 | if node in self._neighbors[neighbor]:
109 | self._neighbors[neighbor].remove(node)
110 | del self._neighbors[node]
111 | if node in self._nodes:
112 | self._nodes.remove(node)
113 | route_info.info(dict(edges=str(set(self.get_edges()))))
114 |
115 | def get_edge_keys(self):
116 | """Returns list of edge keys as sorted node-pair tuples"""
117 | return set(self._edges.keys())
118 |
119 | def get_edges(self):
120 | """Returns set of edges as a list of (node1, node2, cost) tuples."""
121 | return [(ek[0], ek[1], cost) for ek, cost in self._edges.items()]
122 |
123 | def get_nodes(self):
124 | """Returns the list of nodes known to the router."""
125 | return [node for node in self._nodes]
126 |
127 | def get_neighbors(self, node):
128 | """Returns the set of nodes which are neighbors of the given node."""
129 | return self._neighbors[node]
130 |
131 | def get_edge_cost(self, node1, node2):
132 | """Returns the cost of the edge between node1 and node2."""
133 | if node1 == node2:
134 | return 0
135 | node_key = tuple(sorted([node1, node2]))
136 | if node_key in self._edges:
137 | return self._edges[node_key]
138 | else:
139 | return None
140 |
141 | def update_routing_table(self):
142 | """Dijkstra's algorithm"""
143 | Q = PriorityQueue()
144 | Q.add_with_priority(self.node_id, 0)
145 | cost = {self.node_id: 0}
146 | prev = dict()
147 |
148 | for node in self._nodes:
149 | cost[node] = sys.maxsize # poor man's infinity
150 | prev[node] = None
151 | Q.add_with_priority(node, cost[node])
152 |
153 | while not Q.is_empty():
154 | node = Q.pop_item()
155 | for neighbor in self.get_neighbors(node):
156 | path_cost = cost[node] + self.get_edge_cost(node, neighbor)
157 | if path_cost < cost[neighbor]:
158 | cost[neighbor] = path_cost
159 | prev[neighbor] = node
160 | Q.add_with_priority(neighbor, path_cost)
161 |
162 | new_routing_table = dict()
163 | for dest in self._nodes:
164 | p = dest
165 | while prev[p] != self.node_id:
166 | p = prev[p]
167 | new_routing_table[dest] = (p, cost[dest])
168 | self.routing_table = new_routing_table
169 |
170 | def next_hop(self, recipient):
171 | """
172 | Return the node ID of the next hop for routing a message to the
173 | given recipient. If the current node is the recipient or there is
174 | no path, then return None.
175 | """
176 | if recipient == self.node_id:
177 | return self.node_id
178 | elif recipient in self.routing_table:
179 | return self.routing_table[recipient][0]
180 | else:
181 | return None
182 |
183 | async def ping_node(self, node_id, expected_response=True):
184 | now = datetime.datetime.utcnow()
185 | logger.info(f"Sending ping to node {node_id}, timestamp={now}")
186 | message = FramedMessage(
187 | header=dict(
188 | sender=self.node_id, recipient=node_id, timestamp=now, directive="receptor:ping"
189 | )
190 | )
191 | return await self.send(message, expected_response)
192 |
193 | async def forward(self, msg, next_hop):
194 | """
195 | Forward a message on to the next hop closer to its destination
196 | """
197 | buffer_obj = self.receptor.buffer_mgr[next_hop]
198 | if "route_list" not in msg.header or msg.header["route_list"][-1] != self.node_id:
199 | msg.header["route_list"].append(self.node_id)
200 | logger.debug(f"Forwarding frame {msg.msg_id} to {next_hop}")
201 | try:
202 | route_counter.inc()
203 | await buffer_obj.put(msg)
204 | except ReceptorBufferError as e:
205 | logger.exception(
206 | "Receptor Buffer Write Error forwarding message to {}: {}".format(next_hop, e)
207 | )
208 | # TODO: Possible to find another route? This might be a hard failure
209 | except Exception as e:
210 | logger.exception("Error trying to forward message to {}: {}".format(next_hop, e))
211 |
212 | async def send(self, message, expected_response=False):
213 | """
214 | Send a new message with the given outer envelope.
215 | """
216 | recipient = message.header["recipient"]
217 | next_node_id = self.next_hop(recipient)
218 | if not next_node_id:
219 | # TODO: This probably needs to emit an error response
220 | raise UnrouteableError(f"No route found to {recipient}")
221 |
222 | # TODO: Not signing/serializing in order to finish buffered output work
223 |
224 | message.header.update({"sender": self.node_id, "route_list": [self.node_id]})
225 | logger.debug(f"Sending {message.msg_id} to {recipient} via {next_node_id}")
226 | if expected_response and "directive" in message.header:
227 | self.response_registry[message.msg_id] = dict(
228 | message_sent_time=message.header["timestamp"]
229 | )
230 | if next_node_id == self.node_id:
231 | asyncio.ensure_future(self.receptor.handle_message(message))
232 | else:
233 | await self.forward(message, next_node_id)
234 | return message.msg_id
235 |
--------------------------------------------------------------------------------
/receptor/serde.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import json
3 | from functools import partial, singledispatch
4 |
5 | decoders = {}
6 |
7 |
8 | def decoder(typename):
9 | def _inner(func):
10 | decoders[typename] = func
11 | return func
12 |
13 | return _inner
14 |
15 |
16 | def decode(o):
17 | try:
18 | return decoders[o["_type"]](o["value"])
19 | except Exception:
20 | return o
21 |
22 |
23 | @singledispatch
24 | def encode(o):
25 | return json.JSONEncoder().default(o)
26 |
27 |
28 | @encode.register(datetime.datetime)
29 | def encode_date(obj):
30 | return {"_type": "datetime.datetime", "value": obj.timestamp()}
31 |
32 |
33 | @decoder("datetime.datetime")
34 | def decode_date(value):
35 | return datetime.datetime.fromtimestamp(value)
36 |
37 |
38 | load = partial(json.load, object_hook=decode)
39 | loads = partial(json.loads, object_hook=decode)
40 | dump = partial(json.dump, default=encode)
41 | dumps = partial(json.dumps, default=encode)
42 |
--------------------------------------------------------------------------------
/receptor/stats.py:
--------------------------------------------------------------------------------
1 | from prometheus_client import Counter, Gauge, Info
2 |
3 | bytes_recv = Counter("bytes_recv", "Number of bytes received")
4 | messages_received_counter = Counter("incoming_messages", "Messages received from Receptor Peers")
5 | connected_peers_gauge = Gauge("connected_peers", "Number of active peer connections")
6 | work_counter = Counter(
7 | "work_events", "A count of the number of work events that have been received"
8 | )
9 | active_work_gauge = Gauge("active_work", "Amount of work currently being performed")
10 | route_counter = Counter(
11 | "route_events", "A count of the number of messages that have been routed elsewhere in the mesh"
12 | )
13 | route_info = Info("routing_table", "This nodes view of the mesh routing table")
14 | receptor_info = Info("receptor_info", "Version and Node information of the current node")
15 | work_info = Info("worker_info", "Plugin information and versions")
16 |
--------------------------------------------------------------------------------
/receptor/work.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import concurrent.futures
3 | import datetime
4 | import logging
5 |
6 | import pkg_resources
7 |
8 | from . import exceptions
9 | from .bridgequeue import BridgeQueue
10 | from .messages.framed import FileBackedBuffer, FramedMessage
11 | from .plugin_utils import BUFFER_PAYLOAD, BYTES_PAYLOAD, FILE_PAYLOAD
12 | from .stats import active_work_gauge, work_counter, work_info
13 |
14 | logger = logging.getLogger(__name__)
15 |
16 |
17 | class WorkManager:
18 | def __init__(self, receptor):
19 | self.receptor = receptor
20 | work_info.info(dict(plugins=str(self.get_capabilities())))
21 | self.active_work = []
22 | self.thread_pool = concurrent.futures.ThreadPoolExecutor(
23 | max_workers=self.receptor.config.default_max_workers
24 | )
25 |
26 | def load_receptor_worker(self, name):
27 | entry_points = [
28 | x
29 | for x in filter(
30 | lambda x: x.name == name, pkg_resources.iter_entry_points("receptor.worker")
31 | )
32 | ]
33 | if not entry_points:
34 | raise exceptions.UnknownDirective(f"Error loading directive handlers for {name}")
35 | return entry_points[0].load()
36 |
37 | def get_capabilities(self):
38 | caps = {
39 | "worker_versions": {
40 | x.name: pkg_resources.get_distribution(x.resolve().__package__).version
41 | for x in pkg_resources.iter_entry_points("receptor.worker")
42 | },
43 | "max_work_threads": self.receptor.config.default_max_workers,
44 | }
45 | if self.receptor.config._is_ephemeral:
46 | caps["ephemeral"] = True
47 | return caps
48 |
49 | def get_work(self):
50 | return self.active_work
51 |
52 | def add_work(self, message):
53 | work_counter.inc()
54 | active_work_gauge.inc()
55 | self.active_work.append(
56 | dict(
57 | id=message.msg_id,
58 | directive=message.header["directive"],
59 | sender=message.header["sender"],
60 | )
61 | )
62 |
63 | def remove_work(self, message):
64 | for work in self.active_work:
65 | if message.msg_id == work["id"]:
66 | active_work_gauge.dec()
67 | self.active_work.remove(work)
68 |
69 | def resolve_payload_input(self, payload_type, payload):
70 | if payload_type == BUFFER_PAYLOAD:
71 | payload.seek(0)
72 | return payload
73 | elif payload_type == FILE_PAYLOAD:
74 | payload.flush()
75 | return payload.name
76 | return payload.readall()
77 |
78 | def get_action_method(self, directive):
79 | namespace, action = directive.split(":", 1)
80 | worker_module = self.load_receptor_worker(namespace)
81 | try:
82 | action_method = getattr(worker_module, action)
83 | except AttributeError:
84 | logger.exception(f"Could not load action {action} from {namespace}")
85 | raise exceptions.InvalidDirectiveAction(f"Invalid action {action} for {namespace}")
86 | if not getattr(action_method, "receptor_export", False):
87 | logger.exception(
88 | f"""Not allowed to call {action} from {namespace} """
89 | """because it is not marked for export"""
90 | )
91 | raise exceptions.InvalidDirectiveAction(
92 | f"Access denied calling {action} for {namespace}"
93 | )
94 | return action_method, namespace
95 |
96 | async def handle(self, message):
97 | directive = message.header["directive"]
98 | logger.info(f"Handling work for {message.msg_id} as {directive}")
99 | try:
100 | serial = 0
101 | eof_response = None
102 | action_method, namespace = self.get_action_method(directive)
103 | payload_input_type = getattr(action_method, "payload_type", BYTES_PAYLOAD)
104 |
105 | self.add_work(message)
106 | response_queue = BridgeQueue()
107 | asyncio.wrap_future(
108 | self.thread_pool.submit(
109 | action_method,
110 | self.resolve_payload_input(payload_input_type, message.payload),
111 | self.receptor.config.plugins.get(namespace, {}),
112 | response_queue,
113 | )
114 | ).add_done_callback(lambda fut: response_queue.close())
115 |
116 | async for response in response_queue:
117 | serial += 1
118 | logger.debug(f"Response emitted for {message.msg_id}, serial {serial}")
119 | response_message = FramedMessage(
120 | header=dict(
121 | recipient=message.header["sender"],
122 | in_response_to=message.msg_id,
123 | serial=serial,
124 | timestamp=datetime.datetime.utcnow(),
125 | ),
126 | payload=FileBackedBuffer.from_data(response),
127 | )
128 | await self.receptor.router.send(response_message)
129 |
130 | except Exception as e:
131 | logger.exception(
132 | """Error encountered while handling the response, replying with an error message"""
133 | )
134 | eof_response = FramedMessage(
135 | header=dict(
136 | recipient=message.header["sender"],
137 | in_response_to=message.msg_id,
138 | serial=serial + 1,
139 | code=1,
140 | timestamp=datetime.datetime.utcnow(),
141 | eof=True,
142 | ),
143 | payload=FileBackedBuffer.from_data(str(e)),
144 | )
145 | self.remove_work(message)
146 |
147 | if eof_response is None:
148 | eof_response = FramedMessage(
149 | header=dict(
150 | recipient=message.header["sender"],
151 | in_response_to=message.msg_id,
152 | serial=serial + 1,
153 | code=0,
154 | timestamp=datetime.datetime.utcnow(),
155 | eof=True,
156 | )
157 | )
158 | await self.receptor.router.send(eof_response)
159 |
--------------------------------------------------------------------------------
/receptor/worker/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/receptor/worker/__init__.py
--------------------------------------------------------------------------------
/receptor/worker/demo.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import subprocess
3 |
4 | logger = logging.getLogger(__name__)
5 |
6 |
7 | async def do_uptime(inner_env):
8 | yield subprocess.check_output("uptime").decode("utf8")
9 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # Copyright (c) 2019 Red Hat, Inc.
4 | # All Rights Reserved.
5 |
6 | from setuptools import setup, find_packages
7 |
8 | with open("README.md", "r") as f:
9 | long_description = f.read()
10 |
11 | setup(
12 | name="receptor",
13 | version="1.0.0",
14 | author="Red Hat",
15 | url="https://github.com/project-receptor/receptor",
16 | license="Apache",
17 | packages=find_packages(),
18 | long_description=long_description,
19 | long_description_content_type="text/markdown",
20 | python_requires=">=3.6",
21 | install_requires=["prometheus_client>=0.7.1,<0.9", "aiohttp>=3.6.2,<4.0", "python-dateutil>=2.8.1"],
22 | zip_safe=False,
23 | entry_points={"console_scripts": ["receptor = receptor.__main__:main"]},
24 | classifiers=["Programming Language :: Python :: 3"],
25 | )
26 |
--------------------------------------------------------------------------------
/test.ini:
--------------------------------------------------------------------------------
1 | [server]
2 | port=8889
3 |
4 | [peers]
5 | 127.0.0.1:8888
6 |
--------------------------------------------------------------------------------
/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/test/__init__.py
--------------------------------------------------------------------------------
/test/integration/test_main.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import socket
3 | from unittest.mock import patch
4 |
5 | import pytest
6 |
7 | import receptor
8 | from receptor.config import ReceptorConfig
9 | from receptor.receptor import Receptor
10 |
11 |
12 | @pytest.fixture
13 | def receptor_config(unused_tcp_port, tmpdir, type="node"):
14 | return ReceptorConfig(
15 | ["--data-dir", tmpdir.strpath, type, "--listen", "127.0.0.1:" + str(unused_tcp_port)]
16 | )
17 |
18 |
19 | @pytest.fixture
20 | def receptor_service(receptor_config):
21 | return Receptor(config=receptor_config, node_id="A")
22 |
23 |
24 | @pytest.fixture
25 | def receptor_service_factory(unused_tcp_port_factory, tmpdir):
26 | def _receptor_service(node_name, peer_ports=None, type="node"):
27 | if peer_ports is None:
28 | peer_ports = []
29 | peers = {"127.0.0.1:{}".format(p): "" for p in peer_ports}
30 | peer_config = []
31 | for peer in peers:
32 | peer_config.extend(["--peer", peer])
33 | base_config = [
34 | "--node-id",
35 | node_name,
36 | "--data-dir",
37 | tmpdir.strpath,
38 | type,
39 | "--listen",
40 | "127.0.0.1" + str(unused_tcp_port_factory()),
41 | ]
42 | base_config.extend(peer_config)
43 | receptor_config = ReceptorConfig(base_config)
44 | return Receptor(receptor_config)
45 |
46 | return _receptor_service
47 |
48 |
49 | async def connect_port(receptor_obj):
50 | n = 5
51 | while n:
52 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
53 | node, port = receptor_obj.config.node_listen[0].split(":")
54 | result = sock.connect_ex((node, int(port)))
55 | if result != 0:
56 | await asyncio.sleep(1)
57 | n = n - 1
58 | continue
59 | break
60 | receptor_obj.stop = True
61 |
62 |
63 | async def wait_for_time(seconds):
64 | await asyncio.sleep(seconds)
65 |
66 |
67 | @patch("receptor.connection.sock.serve")
68 | def test_main_node(mock_sock, event_loop, receptor_config):
69 | c = receptor.Controller(receptor_config, loop=event_loop)
70 | event_loop.call_soon(event_loop.create_task, connect_port(c.receptor))
71 | c.enable_server(receptor_config.node_listen)
72 | c.run()
73 | mock_sock.assert_called_once()
74 |
--------------------------------------------------------------------------------
/test/perf/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/test/perf/__init__.py
--------------------------------------------------------------------------------
/test/perf/flat-mesh.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | nodes:
3 | controller:
4 | connections: []
5 | listen: receptor://127.0.0.1:8889
6 | name: controller
7 | stats_enable: true
8 | stats_port: null
9 | node1:
10 | connections:
11 | - controller
12 | listen: receptor://127.0.0.1:11111
13 | name: node1
14 | stats_enable: true
15 | stats_port: null
16 | node10:
17 | connections:
18 | - controller
19 | listen: null
20 | name: node10
21 | stats_enable: true
22 | stats_port: null
23 | node11:
24 | connections:
25 | - controller
26 | listen: null
27 | name: node11
28 | stats_enable: true
29 | stats_port: null
30 | node12:
31 | connections:
32 | - controller
33 | listen: null
34 | name: node12
35 | stats_enable: true
36 | stats_port: null
37 | node2:
38 | connections:
39 | - controller
40 | listen: null
41 | name: node2
42 | stats_enable: true
43 | stats_port: null
44 | node3:
45 | connections:
46 | - controller
47 | listen: null
48 | name: node3
49 | stats_enable: true
50 | stats_port: null
51 | node4:
52 | connections:
53 | - controller
54 | listen: null
55 | name: node4
56 | stats_enable: true
57 | stats_port: null
58 | node5:
59 | connections:
60 | - controller
61 | listen: null
62 | name: node5
63 | stats_enable: true
64 | stats_port: null
65 | node6:
66 | connections:
67 | - controller
68 | listen: null
69 | name: node6
70 | stats_enable: true
71 | stats_port: null
72 | node7:
73 | connections:
74 | - controller
75 | listen: null
76 | name: node7
77 | stats_enable: true
78 | stats_port: null
79 | node8:
80 | connections:
81 | - controller
82 | listen: null
83 | name: node8
84 | stats_enable: true
85 | stats_port: null
86 | node9:
87 | connections:
88 | - controller
89 | listen: null
90 | name: node9
91 | stats_enable: true
92 | stats_port: null
93 |
--------------------------------------------------------------------------------
/test/perf/random-mesh.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | nodes:
3 | controller:
4 | connections: []
5 | listen: receptor://127.0.0.1:8889
6 | name: controller
7 | stats_enable: true
8 | stats_port: null
9 | node1:
10 | connections:
11 | - controller
12 | listen: receptor://127.0.0.1:11111
13 | name: node1
14 | stats_enable: true
15 | stats_port: null
16 | node10:
17 | connections:
18 | - node12
19 | - node4
20 | listen: null
21 | name: node10
22 | stats_enable: true
23 | stats_port: null
24 | node11:
25 | connections:
26 | - node1
27 | listen: null
28 | name: node11
29 | stats_enable: true
30 | stats_port: null
31 | node12:
32 | connections:
33 | - node11
34 | - controller
35 | listen: null
36 | name: node12
37 | stats_enable: true
38 | stats_port: null
39 | node2:
40 | connections:
41 | - node1
42 | listen: null
43 | name: node2
44 | stats_enable: true
45 | stats_port: null
46 | node3:
47 | connections:
48 | - node6
49 | - node4
50 | listen: null
51 | name: node3
52 | stats_enable: true
53 | stats_port: null
54 | node4:
55 | connections:
56 | - node7
57 | - node2
58 | listen: null
59 | name: node4
60 | stats_enable: true
61 | stats_port: null
62 | node5:
63 | connections:
64 | - node8
65 | - node12
66 | listen: null
67 | name: node5
68 | stats_enable: true
69 | stats_port: null
70 | node6:
71 | connections:
72 | - node10
73 | - node3
74 | listen: null
75 | name: node6
76 | stats_enable: true
77 | stats_port: null
78 | node7:
79 | connections:
80 | - node3
81 | - node1
82 | listen: null
83 | name: node7
84 | stats_enable: true
85 | stats_port: null
86 | node8:
87 | connections:
88 | - node1
89 | listen: null
90 | name: node8
91 | stats_enable: true
92 | stats_port: null
93 | node9:
94 | connections:
95 | - node10
96 | - node5
97 | listen: null
98 | name: node9
99 | stats_enable: true
100 | stats_port: null
101 |
--------------------------------------------------------------------------------
/test/perf/test_ping.py:
--------------------------------------------------------------------------------
1 | from receptor_affinity.mesh import Mesh
2 | from wait_for import TimedOutError
3 | import time
4 | import pytest
5 |
6 |
7 | @pytest.fixture(
8 | scope="function",
9 | params=["test/perf/flat-mesh.yaml", "test/perf/tree-mesh.yaml", "test/perf/random-mesh.yaml"],
10 | ids=["flat", "tree", "random"],
11 | )
12 | def mesh(request):
13 | mesh = Mesh.load_from_file(request.param, use_diag_node=True)
14 | try:
15 | mesh.start(wait=True)
16 | yield mesh
17 | except TimedOutError:
18 | raise
19 | finally:
20 | print(f"{time.time()} - Stopping current mesh")
21 | print(mesh.nodes["controller"])
22 | mesh.stop()
23 |
24 |
25 | def test_pings_perf(mesh):
26 | results = mesh.ping()
27 | mesh.validate_ping_results(results)
28 |
--------------------------------------------------------------------------------
/test/perf/test_ports.py:
--------------------------------------------------------------------------------
1 | """Test port binding logic."""
2 | from uuid import uuid4
3 |
4 | import psutil
5 | import pytest
6 | from receptor_affinity import utils
7 | from receptor_affinity.exceptions import NodeUnavailableError
8 | from receptor_affinity.mesh import Node
9 |
10 |
11 | def test_invalid_listen_scheme():
12 | """Start a node, and give it a listen address with an invalid scheme.
13 |
14 | The node should fail to start. See `receptor #93`_.
15 |
16 | .. NOTE:: This test should be extended to check that a traceback isn't printed to stdout or
17 | stderr.
18 |
19 | .. _receptor #93: https://github.com/project-receptor/receptor/issues/93
20 | """
21 | node = Node(str(uuid4()), listen=f'harglebargle://127.0.0.1:{utils.random_port()}')
22 | with pytest.raises(NodeUnavailableError):
23 | node.start()
24 |
25 |
26 | def test_listen_fragment():
27 | """Start a node, and give it a listen address with a fragment.
28 |
29 | The node should fail to start. See `receptor #153`_.
30 |
31 | .. _receptor #153: https://github.com/project-receptor/receptor/issues/153
32 | """
33 | node = Node(str(uuid4()), listen=f'receptor://127.0.0.1:{utils.random_port()}#frag')
34 | with pytest.raises(NodeUnavailableError):
35 | node.start()
36 |
37 |
38 | def test_listen_path():
39 | """Start a node, and give it a listen address with a path.
40 |
41 | The node should fail to start. See `receptor #153`_.
42 |
43 | .. _receptor #153: https://github.com/project-receptor/receptor/issues/153
44 | """
45 | node = Node(str(uuid4()), listen=f'receptor://127.0.0.1:{utils.random_port()}/path')
46 | with pytest.raises(NodeUnavailableError):
47 | node.start()
48 |
49 |
50 | def test_listen_query():
51 | """Start a node, and give it a listen address with a query.
52 |
53 | The node should fail to start. See `receptor #153`_.
54 |
55 | .. _receptor #153: https://github.com/project-receptor/receptor/issues/153
56 | """
57 | node = Node(str(uuid4()), listen=f'receptor://127.0.0.1:{utils.random_port()}?key=val')
58 | with pytest.raises(NodeUnavailableError):
59 | node.start()
60 |
61 |
62 | def test_no_port_given():
63 | """Start a node, and don't specify a port on which to listen.
64 |
65 | Assert that it listens on port 8888. Older versions of receptor would listen on a random port.
66 | See: `receptor #138`_.
67 |
68 | .. _receptor #138: https://github.com/project-receptor/receptor/issues/138
69 | """
70 | node = Node(str(uuid4()), listen=f'receptor://127.0.0.1')
71 | node.start()
72 | try:
73 | conns = psutil.Process(node.pid).connections()
74 | assert len(conns) == 1
75 | assert conns[0].laddr.port == 8888
76 | finally:
77 | node.stop()
78 |
--------------------------------------------------------------------------------
/test/perf/test_route.py:
--------------------------------------------------------------------------------
1 | from receptor_affinity.mesh import Node
2 | from receptor_affinity.mesh import Mesh
3 | from receptor_affinity.utils import random_port
4 | import time
5 |
6 | import pytest
7 |
8 |
9 | @pytest.fixture(scope="function")
10 | def random_mesh():
11 | mesh = Mesh.load_from_file("test/perf/random-mesh.yaml", use_diag_node=True)
12 | try:
13 | mesh.start(wait=True)
14 | yield mesh
15 | finally:
16 | mesh.stop()
17 |
18 |
19 | @pytest.fixture(scope="function")
20 | def tree_mesh():
21 | mesh = Mesh.load_from_file("test/perf/tree-mesh.yaml", use_diag_node=True)
22 | try:
23 | mesh.start(wait=True)
24 | yield mesh
25 | finally:
26 | mesh.stop()
27 |
28 |
29 | def test_default_routes_validate(random_mesh):
30 | random_mesh.validate_routes()
31 |
32 |
33 | def test_add_remove_node(random_mesh):
34 | nodeX = Node("nodeX", connections=["controller"], stats_enable=True, stats_port=random_port())
35 | random_mesh.add_node(nodeX)
36 | nodeX.start()
37 | random_mesh.settle()
38 | assert nodeX.ping(1) != "Failed"
39 | random_mesh.settle()
40 | assert "nodeX" in str(random_mesh.nodes["controller"].get_routes())
41 | random_mesh.validate_routes()
42 | nodeX.stop()
43 |
44 |
45 | def test_alternative_route(tree_mesh):
46 | nodeX = Node(
47 | "nodeX", connections=["node4", "node3"], stats_enable=True, stats_port=random_port()
48 | )
49 | tree_mesh.add_node(nodeX)
50 | nodeX.start()
51 | tree_mesh.settle()
52 | assert nodeX.ping(1) != "Failed"
53 | tree_mesh.settle()
54 | assert "nodeX" in str(tree_mesh.nodes["controller"].get_routes())
55 | tree_mesh.validate_routes()
56 | tree_mesh.nodes["node3"].stop()
57 | time.sleep(7)
58 | tree_mesh.settle()
59 | # TODO make ping return quicker if it can't ping then reenable to ensure node3 is dead
60 | # assert tree_mesh.nodes['node3'].ping() != "Failed"
61 | assert nodeX.ping(1) != "Failed"
62 | tree_mesh.nodes["node3"].start()
63 | tree_mesh.settle()
64 | tree_mesh.nodes["node4"].stop()
65 | time.sleep(7)
66 | assert nodeX.ping(1) != "Failed"
67 | nodeX.stop()
68 |
--------------------------------------------------------------------------------
/test/perf/test_websockets.py:
--------------------------------------------------------------------------------
1 | from receptor_affinity.mesh import Node
2 | from receptor_affinity.mesh import Mesh
3 | from receptor_affinity.utils import random_port
4 | import time
5 |
6 | import pytest
7 |
8 |
9 | @pytest.fixture(scope="function")
10 | def random_mesh():
11 | mesh = Mesh.load_from_file("test/perf/random-mesh.yaml", use_diag_node=True)
12 | try:
13 | mesh.start(wait=True)
14 | yield mesh
15 | finally:
16 | mesh.stop()
17 |
18 |
19 | def test_websocket_reconnect(random_mesh):
20 | nodeX = Node(
21 | "nodeX",
22 | connections=["node1"],
23 | stats_enable=True,
24 | stats_port=random_port(),
25 | listen=f"ws://127.0.0.1:{random_port()}",
26 | )
27 | nodeY = Node(
28 | "nodeY",
29 | connections=["nodeX"],
30 | stats_enable=True,
31 | stats_port=random_port(),
32 | listen=f"ws://127.0.0.1:{random_port()}",
33 | )
34 | random_mesh.add_node(nodeX)
35 | random_mesh.add_node(nodeY)
36 | nodeX.start()
37 | nodeY.start()
38 | random_mesh.settle()
39 | assert nodeY.ping(1) != "Failed"
40 | nodeX.stop()
41 | time.sleep(7)
42 | assert nodeY.ping(1) == "Failed"
43 | nodeX.start()
44 | time.sleep(7)
45 | random_mesh.settle()
46 | assert nodeY.ping(1) != "Failed"
47 | nodeY.stop()
48 | nodeX.stop()
49 |
--------------------------------------------------------------------------------
/test/perf/tree-mesh.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | nodes:
3 | controller:
4 | connections: []
5 | listen: receptor://127.0.0.1:8889
6 | name: controller
7 | stats_enable: true
8 | stats_port: null
9 | node1:
10 | connections:
11 | - controller
12 | listen: receptor://127.0.0.1:11111
13 | name: node1
14 | stats_enable: true
15 | stats_port: null
16 | node10:
17 | connections:
18 | - node9
19 | listen: null
20 | name: node10
21 | stats_enable: true
22 | stats_port: null
23 | node11:
24 | connections:
25 | - node9
26 | listen: null
27 | name: node11
28 | stats_enable: true
29 | stats_port: null
30 | node12:
31 | connections:
32 | - node9
33 | listen: null
34 | name: node12
35 | stats_enable: true
36 | stats_port: null
37 | node2:
38 | connections:
39 | - node1
40 | listen: null
41 | name: node2
42 | stats_enable: true
43 | stats_port: null
44 | node3:
45 | connections:
46 | - node1
47 | listen: null
48 | name: node3
49 | stats_enable: true
50 | stats_port: null
51 | node4:
52 | connections:
53 | - node1
54 | listen: null
55 | name: node4
56 | stats_enable: true
57 | stats_port: null
58 | node5:
59 | connections:
60 | - controller
61 | listen: null
62 | name: node5
63 | stats_enable: true
64 | stats_port: null
65 | node6:
66 | connections:
67 | - node5
68 | listen: null
69 | name: node6
70 | stats_enable: true
71 | stats_port: null
72 | node7:
73 | connections:
74 | - node5
75 | listen: null
76 | name: node7
77 | stats_enable: true
78 | stats_port: null
79 | node8:
80 | connections:
81 | - node5
82 | listen: null
83 | name: node8
84 | stats_enable: true
85 | stats_port: null
86 | node9:
87 | connections:
88 | - controller
89 | listen: null
90 | name: node9
91 | stats_enable: true
92 | stats_port: null
93 |
--------------------------------------------------------------------------------
/test/unit/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/project-receptor/python-receptor/0eb9f0e3bd3b25bce948f7a2f43562f181a630a1/test/unit/__init__.py
--------------------------------------------------------------------------------
/test/unit/test_bridge_queue.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from receptor.connection.base import BridgeQueue
4 |
5 |
6 | async def read_all(q):
7 | buf = []
8 | async for item in q:
9 | buf.append(item)
10 | return buf
11 |
12 |
13 | @pytest.mark.asyncio
14 | async def test_one(event_loop):
15 | bq = BridgeQueue.one(b"this is a test")
16 | buf = await read_all(bq)
17 | assert buf[0] == b"this is a test"
18 |
--------------------------------------------------------------------------------
/test/unit/test_durable_buffer.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import os
3 | import shutil
4 | import tempfile
5 |
6 | import pytest
7 |
8 | from receptor import fileio
9 | from receptor.buffers.file import DurableBuffer
10 |
11 |
12 | @pytest.fixture
13 | def tempdir():
14 | dir_ = tempfile.mkdtemp()
15 | yield dir_
16 | shutil.rmtree(dir_)
17 |
18 |
19 | @pytest.mark.asyncio
20 | async def test_read(event_loop, tempdir):
21 | with tempfile.NamedTemporaryFile() as fp:
22 | fp.write(b"hello")
23 | fp.flush()
24 |
25 | data = await fileio.read(fp.name)
26 | assert data == b"hello"
27 |
28 |
29 | @pytest.mark.asyncio
30 | async def test_create(event_loop, tempdir):
31 | b = DurableBuffer(tempdir, "test_create", asyncio.get_event_loop())
32 | await b.put(b"some data")
33 | item = await b.get()
34 | data = await fileio.read(item["path"])
35 | assert data == b"some data"
36 |
37 |
38 | @pytest.mark.asyncio
39 | async def test_manifest(event_loop, tempdir):
40 | b = DurableBuffer(tempdir, "test_manifest", event_loop, write_time=0.0)
41 | await b.put(b"one")
42 | await b.put(b"two")
43 | await b.put(b"three")
44 |
45 | item = await b.get()
46 | data = await fileio.read(item["path"])
47 | assert data == b"one"
48 |
49 |
50 | @pytest.mark.asyncio
51 | async def test_chunks(event_loop, tempdir):
52 | b = DurableBuffer(tempdir, "test_chunks", event_loop, write_time=0.0)
53 | await b.put((b"one", b"two", b"three"))
54 |
55 | item = await b.get()
56 | data = await fileio.read(item["path"])
57 | assert data == b"onetwothree"
58 |
59 |
60 | @pytest.mark.asyncio
61 | async def test_unreadable_file(event_loop, tempdir):
62 | b = DurableBuffer(tempdir, "test_unreadable_file", event_loop)
63 | b.q._queue.appendleft("junk")
64 | await b.put(b"valid data")
65 | item = await b.get()
66 | data = await fileio.read(item["path"])
67 | assert data == b"valid data"
68 | assert b.q.empty()
69 |
70 |
71 | @pytest.mark.asyncio
72 | async def test_does_not_delete_messages(event_loop, tempdir):
73 | b = DurableBuffer(tempdir, "test_deletes_messages", event_loop, write_time=0.0)
74 | await b.put(b"some data")
75 | item = await b.get()
76 | data = await fileio.read(item["path"])
77 | assert data == b"some data"
78 | await b._manifest_clean.wait()
79 | assert os.path.exists(item["path"])
80 |
--------------------------------------------------------------------------------
/test/unit/test_framedbuffer.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import json
3 | import uuid
4 |
5 | import pytest
6 |
7 | from receptor.messages.framed import FileBackedBuffer, Frame, FramedBuffer, FramedMessage
8 |
9 |
10 | @pytest.fixture
11 | def msg_id():
12 | return uuid.uuid4().int
13 |
14 |
15 | @pytest.fixture
16 | def framed_buffer(event_loop):
17 | return FramedBuffer(loop=event_loop)
18 |
19 |
20 | @pytest.mark.asyncio
21 | async def test_framedbuffer(framed_buffer, msg_id):
22 | header = {"sender": "node1", "recipient": "node2", "route_list": []}
23 | header_bytes = json.dumps(header).encode("utf-8")
24 | f1 = Frame(Frame.Types.HEADER, 1, len(header_bytes), msg_id, 1)
25 |
26 | await framed_buffer.put(f1.serialize() + header_bytes)
27 |
28 | payload = b"payload one is very boring"
29 | payload2 = b"payload two is also very boring"
30 | f2 = Frame(Frame.Types.PAYLOAD, 1, len(payload) + len(payload2), msg_id, 2)
31 |
32 | await framed_buffer.put(f2.serialize() + payload)
33 | await framed_buffer.put(payload2)
34 |
35 | m = await framed_buffer.get()
36 |
37 | assert m.header == header
38 | assert m.payload.readall() == payload + payload2
39 |
40 |
41 | @pytest.mark.asyncio
42 | async def test_hi(msg_id, framed_buffer):
43 | hi = json.dumps({"cmd": "hi"}).encode("utf-8")
44 | f1 = Frame(Frame.Types.PAYLOAD, 1, len(hi), msg_id, 1)
45 |
46 | await framed_buffer.put(f1.serialize())
47 | await framed_buffer.put(hi)
48 |
49 | m = await framed_buffer.get()
50 |
51 | assert m.header is None
52 | assert m.payload.readall() == hi
53 |
54 |
55 | @pytest.mark.asyncio
56 | async def test_extra_header(framed_buffer, msg_id):
57 | h1 = {"sender": "node1", "recipient": "node2", "route_list": []}
58 | payload = json.dumps(h1).encode("utf-8")
59 | f1 = Frame(Frame.Types.HEADER, 1, len(payload), msg_id, 1)
60 | await framed_buffer.put(f1.serialize())
61 | await framed_buffer.put(payload)
62 |
63 | h2 = {"sender": "node3", "recipient": "node4", "route_list": []}
64 | payload = json.dumps(h2).encode("utf-8")
65 | f2 = Frame(Frame.Types.HEADER, 1, len(payload), msg_id, 2)
66 | await framed_buffer.put(f2.serialize())
67 | await framed_buffer.put(payload)
68 |
69 | assert framed_buffer.header == h2
70 |
71 |
72 | @pytest.mark.asyncio
73 | async def test_command(framed_buffer, msg_id):
74 | cmd = {"cmd": "hi"}
75 | payload = json.dumps(cmd).encode("utf-8")
76 | f1 = Frame(Frame.Types.COMMAND, 1, len(payload), msg_id, 1)
77 | await framed_buffer.put(f1.serialize())
78 | await framed_buffer.put(payload)
79 |
80 | m = await framed_buffer.get()
81 | assert m.header == cmd
82 | assert m.payload is None
83 |
84 |
85 | @pytest.mark.asyncio
86 | async def test_overfull(framed_buffer, msg_id):
87 | header = {"foo": "bar"}
88 | payload = b"this is a test"
89 | fbb = FileBackedBuffer.from_data(payload)
90 | msg = FramedMessage(header=header, payload=fbb)
91 |
92 | await framed_buffer.put(b"".join(msg))
93 |
94 | m = await framed_buffer.get()
95 |
96 | assert m.header == header
97 | assert m.payload.readall() == payload
98 |
99 |
100 | @pytest.mark.asyncio
101 | async def test_underfull(framed_buffer, msg_id):
102 | header = {"foo": "bar"}
103 | payload = b"this is a test"
104 | fbb = FileBackedBuffer.from_data(payload)
105 | msg = FramedMessage(header=header, payload=fbb)
106 | b = b"".join(msg)
107 |
108 | await framed_buffer.put(b[:10])
109 | await framed_buffer.put(b[10:])
110 |
111 | m = await framed_buffer.get()
112 |
113 | assert m.header == header
114 | assert m.payload.readall() == payload
115 |
116 |
117 | @pytest.mark.asyncio
118 | async def test_malformed_frame(framed_buffer, msg_id):
119 | with pytest.raises(ValueError):
120 | await framed_buffer.put(b"this is total garbage and should break things very nicely")
121 |
122 |
123 | @pytest.mark.asyncio
124 | async def test_too_short(framed_buffer, msg_id):
125 | f1 = Frame(Frame.Types.HEADER, 1, 100, 1, 1)
126 | too_short_header = b"this is not long enough"
127 | f2 = Frame(Frame.Types.PAYLOAD, 1, 100, 1, 2)
128 | too_short_payload = b"this is also not long enough"
129 |
130 | await framed_buffer.put(f1.serialize() + too_short_header)
131 | await framed_buffer.put(f2.serialize() + too_short_payload)
132 |
133 | with pytest.raises(asyncio.QueueEmpty):
134 | framed_buffer.get_nowait()
135 |
--------------------------------------------------------------------------------
/test/unit/test_router.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from receptor.router import MeshRouter
3 |
4 | test_networks = [
5 | (
6 | [
7 | ("a", "b", 1),
8 | ("a", "d", 1),
9 | ("a", "f", 1),
10 | ("b", "d", 1),
11 | ("b", "c", 1),
12 | ("c", "e", 1),
13 | ("c", "h", 1),
14 | ("c", "j", 1),
15 | ("e", "f", 1),
16 | ("e", "g", 1),
17 | ("e", "h", 1),
18 | ("f", "g", 1),
19 | ("g", "h", 1),
20 | ("h", "j", 1),
21 | ("h", "k", 1),
22 | ("j", "k", 1),
23 | ("j", "m", 1),
24 | ("l", "m", 1),
25 | ],
26 | [("a", "f", "f"), ("a", "m", "b"), ("h", "d", "c")],
27 | [("a", {"b", "d", "f"}), ("f", {"a", "e", "g"}), ("j", {"c", "h", "k", "m"})],
28 | ),
29 | (
30 | [("a", "b", 1), ("b", "c", 1), ("c", "d", 1), ("d", "e", 1), ("e", "f", 1)],
31 | [("a", "f", "b"), ("c", "a", "b"), ("f", "c", "e")],
32 | [("a", {"b"}), ("f", {"e"}), ("c", {"b", "d"})],
33 | ),
34 | ]
35 |
36 |
37 | @pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
38 | def test_next_hop(edges, expected_next_hops, expected_neighbors):
39 | for node_id, remote, enh in expected_next_hops:
40 | r = MeshRouter(node_id=node_id)
41 | r.add_or_update_edges(edges)
42 | assert r.next_hop(remote) == enh
43 |
44 |
45 | @pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
46 | def test_neighbors(edges, expected_next_hops, expected_neighbors):
47 | r = MeshRouter(node_id=edges[0][0])
48 | r.add_or_update_edges(edges)
49 | for node_id, neighbors in expected_neighbors:
50 | assert r.get_neighbors(node_id) == neighbors
51 |
--------------------------------------------------------------------------------
/test/unit/test_serde.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | from receptor.serde import dumps, loads
4 |
5 |
6 | def test_date_serde():
7 |
8 | o = {"now": datetime.datetime.utcnow()}
9 |
10 | serialized = dumps(o)
11 | deserialized = loads(serialized)
12 |
13 | assert deserialized == o
14 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | isolated_build = true
3 | envlist = linters,py36,py37,integration
4 |
5 | [testenv]
6 | whitelist_externals = poetry
7 | deps = poetry >= 1.0.5
8 | commands =
9 | poetry install -v
10 | poetry env info
11 |
12 | [testenv:linters]
13 | commands=
14 | {[testenv]commands}
15 | poetry run flake8
16 | poetry run yamllint -s .
17 |
18 | [testenv:py36]
19 | commands =
20 | {[testenv]commands}
21 | poetry run pytest test/integration test/unit
22 |
23 | [testenv:py37]
24 | commands =
25 | {[testenv]commands}
26 | poetry run pytest test/integration test/unit
27 |
28 | [testenv:integration]
29 | commands =
30 | {[testenv]commands}
31 | poetry run pytest ./test/perf/ -s
32 |
--------------------------------------------------------------------------------