├── .gitignore ├── .travis.yml ├── CHANGES.rst ├── COPYING ├── MANIFEST.in ├── README.rst ├── docs ├── Makefile ├── changelog.rst ├── conf.py ├── contributors.rst ├── design.rst ├── getting-started.rst ├── index.rst ├── installation.rst ├── make.bat ├── onionbalance-config.rst ├── running-onionbalance.rst ├── tutorial.rst └── use-cases.rst ├── onionbalance.png ├── onionbalance.py ├── onionbalance ├── __init__.py ├── __main__.py ├── config.py ├── consensus.py ├── data │ ├── config.example.yaml │ ├── torrc-instance │ └── torrc-server ├── descriptor.py ├── eventhandler.py ├── instance.py ├── log.py ├── manager.py ├── scheduler.py ├── service.py ├── settings.py ├── status.py └── util.py ├── requirements.txt ├── scripts └── rend-connection-stats.py ├── setup.cfg ├── setup.py ├── test-requirements.txt ├── test ├── __init__.py ├── functional │ ├── test_onionbalance_config.py │ └── test_publish_master_descriptor.py ├── scripts │ ├── install-chutney.sh │ └── install-tor.sh ├── test_consensus.py ├── test_descriptor.py ├── test_settings.py ├── test_util.py └── util.py └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # PyInstaller 26 | # Usually these files are written by a python script from a template 27 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 28 | *.manifest 29 | *.spec 30 | 31 | # Installer logs 32 | pip-log.txt 33 | pip-delete-this-directory.txt 34 | 35 | # Unit test / coverage reports 36 | htmlcov/ 37 | .tox/ 38 | .coverage 39 | .cache 40 | nosetests.xml 41 | coverage.xml 42 | 43 | # Translations 44 | *.mo 45 | *.pot 46 | 47 | # Django stuff: 48 | *.log 49 | 50 | # Sphinx documentation 51 | docs/_build/ 52 | 53 | # PyBuilder 54 | target/ 55 | 56 | # Config files and keys 57 | *.yaml 58 | *.key 59 | 60 | # Coverage files 61 | htmlcov 62 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | sudo: required 3 | dist: trusty 4 | env: 5 | - TOXENV=py27 TEST=functional 6 | - TOXENV=py35 TEST=functional 7 | - TOXENV=style 8 | - TOXENV=docs 9 | before_install: 10 | # Install tor and chutney if doing functional tests 11 | - if [[ $TEST == 'functional' ]]; then ./test/scripts/install-tor.sh; fi 12 | - if [[ $TEST == 'functional' ]]; then source test/scripts/install-chutney.sh; fi 13 | install: 14 | - pip install tox coveralls 15 | script: 16 | - tox 17 | after_success: 18 | - coveralls 19 | -------------------------------------------------------------------------------- /CHANGES.rst: -------------------------------------------------------------------------------- 1 | 0.1.8 2 | ----- 3 | 4 | - Fix a bug which could cause descriptor fetching to crash and stall if an 5 | old instance descriptor was retrieved from a HSDir. #64 6 | - Minors fixes to documentation and addition of a tutorial. 7 | 8 | 0.1.7 9 | ----- 10 | 11 | - Add functionality to reconnect to the Tor control port while OnionBalance is 12 | running. Thank you to Ceysun Sucu for the patch. #45 13 | - Fix bug where instance descriptors were not updated correctly when an 14 | instance address was listed under multiple master service. #49 15 | - Improve performance by only requesting each unique instance descriptor 16 | once per round, rather once for each time it was listed in the config 17 | file. #51 18 | - Fix bug where an exception was raised when the status socket location did 19 | not exist. 20 | - Improve the installation documentation for Debian and Fedora/EPEL 21 | installations. 22 | 23 | 0.1.6 24 | ----- 25 | 26 | - Remove unicode tags from the yaml files generated by onionbalance-config. 27 | - Fix bug resulting in invalid instance onion addresses when attempting to 28 | remove the ".onion" TLD. #44 29 | 30 | 0.1.5 31 | ----- 32 | 33 | - Log error when OnionBalance does not have permission to read a private key. #34 34 | - Fix bug loading descriptors when an address with .onion extension is listed 35 | in the configuration file. #37 36 | - Add support for connecting to the Tor control port over a unix domain socket. #3 37 | 38 | 0.1.4 39 | ----- 40 | 41 | - Use setproctitle to set a cleaner process title 42 | - Replace the python-schedule dependency with a custom scheduler. 43 | - Add a Unix domain socket which outputs the status of the OnionBalance 44 | service when a client connects. By default this socket is created at 45 | `/var/run/onionbalance/control`. Thank you to Federico Ceratto for the 46 | original socket implementation. 47 | - Add support for handling the `SIGINT` and `SIGTERM` signals. Thank you to 48 | Federico Ceratto for this feature. 49 | - Upgrade tests to use the stable Tor 0.2.7.x release. 50 | - Fix bug when validating the modulus length of a provided RSA private key. 51 | - Upload distinct service descriptors to each hidden service directory by 52 | default. The distinct descriptors allows up to 60 introduction points or 53 | backend instances to be reachable by external clients. Thank you to Ceysun 54 | Sucu for describing this technique in his Masters thesis. 55 | - Add `INITIAL_DELAY` option to wait longer before initial descriptor 56 | publication. This is useful when there are many backend instance descriptors 57 | which need to be downloaded. 58 | - Add configuration option to allow connecting to a Tor control port on a 59 | different host. 60 | - Remove external image assets when documentation is generated locally 61 | instead of on ReadTheDocs. 62 | 63 | 0.1.3 64 | ----- 65 | 66 | - Streamline the integration tests by using Tor and Chutney from the 67 | upstream repositories. 68 | - Fix bug when HSFETCH is called with a HSDir argument (3d225fd). 69 | - Remove the 'schedule' package from the source code and re-add it as a 70 | dependency. This Python package is now packaged for Debian. 71 | - Extensively restructure the documentation to make it more comprehensible. 72 | - Add --version argument to the command line 73 | - Add configuration options to output log entries to a log file. 74 | 75 | 0.1.2 76 | ----- 77 | 78 | - Remove dependency on the schedule package to prepare for packaging 79 | OnionBalance in Debian. The schedule code is now included directly in 80 | onionbalance/schedule.py. 81 | - Fix the executable path in the help messages for onionbalance and 82 | onionbalance-config. 83 | 84 | 0.1.1 85 | ----- 86 | 87 | - Patch to resolve issue when saving generated torrc files from 88 | onionbalance-config in Python 2. 89 | 90 | 91 | 0.1.0 92 | ----- 93 | 94 | - Initial release 95 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | include COPYING 3 | include requirements.txt 4 | include tox.ini 5 | recursive-include docs *.rst 6 | recursive-include onionbalance/data * 7 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | **This repository is out-of-date and no longer maintained.** 2 | 3 | *Onionbalance is now official maintained by The Tor Project at* https://gitlab.torproject.org/asn/onionbalance ! 4 | 5 | *If you prefer Github, please use the following repo for outside contributions:* https://github.com/asn-d6/onionbalance 6 | 7 | Thank you! 8 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | html: 55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 56 | @echo 57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 58 | 59 | dirhtml: 60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 61 | @echo 62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 63 | 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | pickle: 70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 71 | @echo 72 | @echo "Build finished; now you can process the pickle files." 73 | 74 | json: 75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 76 | @echo 77 | @echo "Build finished; now you can process the JSON files." 78 | 79 | htmlhelp: 80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 81 | @echo 82 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 83 | ".hhp project file in $(BUILDDIR)/htmlhelp." 84 | 85 | qthelp: 86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 87 | @echo 88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/onionbalance.qhcp" 91 | @echo "To view the help file:" 92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/onionbalance.qhc" 93 | 94 | applehelp: 95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 96 | @echo 97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 98 | @echo "N.B. You won't be able to view it unless you put it in" \ 99 | "~/Library/Documentation/Help or install it in your application" \ 100 | "bundle." 101 | 102 | devhelp: 103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 104 | @echo 105 | @echo "Build finished." 106 | @echo "To view the help file:" 107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/onionbalance" 108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/onionbalance" 109 | @echo "# devhelp" 110 | 111 | epub: 112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 113 | @echo 114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 115 | 116 | latex: 117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 118 | @echo 119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 121 | "(use \`make latexpdf' here to do that automatically)." 122 | 123 | latexpdf: 124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 125 | @echo "Running LaTeX files through pdflatex..." 126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 128 | 129 | latexpdfja: 130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 131 | @echo "Running LaTeX files through platex and dvipdfmx..." 132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 134 | 135 | text: 136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 137 | @echo 138 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 139 | 140 | man: 141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 142 | @echo 143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 144 | 145 | texinfo: 146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 147 | @echo 148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 149 | @echo "Run \`make' in that directory to run these through makeinfo" \ 150 | "(use \`make info' here to do that automatically)." 151 | 152 | info: 153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 154 | @echo "Running Texinfo files through makeinfo..." 155 | make -C $(BUILDDIR)/texinfo info 156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 157 | 158 | gettext: 159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 160 | @echo 161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 162 | 163 | changes: 164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 165 | @echo 166 | @echo "The overview file is in $(BUILDDIR)/changes." 167 | 168 | linkcheck: 169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 170 | @echo 171 | @echo "Link check complete; look for any errors in the above output " \ 172 | "or in $(BUILDDIR)/linkcheck/output.txt." 173 | 174 | doctest: 175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 176 | @echo "Testing of doctests in the sources finished, look at the " \ 177 | "results in $(BUILDDIR)/doctest/output.txt." 178 | 179 | coverage: 180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 181 | @echo "Testing of coverage in the sources finished, look at the " \ 182 | "results in $(BUILDDIR)/coverage/python.txt." 183 | 184 | xml: 185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 186 | @echo 187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 188 | 189 | pseudoxml: 190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 191 | @echo 192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 193 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. _changelog: 2 | 3 | Change Log 4 | ========== 5 | 6 | .. include:: ../CHANGES.rst 7 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # onionbalance documentation build configuration file, created by 5 | # sphinx-quickstart on Wed Jun 10 13:54:42 2015. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | import sys 17 | import os 18 | import datetime 19 | 20 | import sphinx.environment 21 | from docutils.utils import get_source_line 22 | 23 | # Documentation configuration 24 | __version__ = '0.1.8' 25 | __author__ = "Donncha O'Cearbhaill" 26 | __contact__ = "donncha@donncha.is" 27 | 28 | # Ignore the 'dev' version suffix. 29 | if __version__.endswith('dev'): 30 | __version__ = __version__[:-4] 31 | 32 | 33 | # If extensions (or modules to document with autodoc) are in another directory, 34 | # add these directories to sys.path here. If the directory is relative to the 35 | # documentation root, use os.path.abspath to make it absolute, like shown here. 36 | sys.path.insert(0, os.path.abspath('..')) 37 | 38 | on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 39 | 40 | # -- General configuration ------------------------------------------------ 41 | 42 | 43 | # Don't give warning for external images 44 | def _warn_node(self, msg, node): 45 | if not msg.startswith('nonlocal image URI found:'): 46 | self._warnfunc(msg, '%s:%s' % get_source_line(node)) 47 | sphinx.environment.BuildEnvironment.warn_node = _warn_node 48 | 49 | # If your documentation needs a minimal Sphinx version, state it here. 50 | needs_sphinx = '1.1' 51 | 52 | # Add any Sphinx extension module names here, as strings. They can be 53 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 54 | # ones. 55 | extensions = [ 56 | 'alabaster', 57 | 'sphinx.ext.autodoc', 58 | 'sphinx.ext.todo', 59 | 'sphinx.ext.viewcode', 60 | 'sphinxcontrib.autoprogram', 61 | ] 62 | 63 | # Add any paths that contain templates here, relative to this directory. 64 | templates_path = ['_templates'] 65 | 66 | # The suffix(es) of source filenames. 67 | # You can specify multiple suffix as a list of string: 68 | # source_suffix = ['.rst', '.md'] 69 | source_suffix = '.rst' 70 | 71 | # The encoding of source files. 72 | source_encoding = 'utf-8-sig' 73 | 74 | # The master toctree document. 75 | master_doc = 'index' 76 | 77 | # General information about the project. 78 | project = 'OnionBalance' 79 | 80 | # Remove copyright notice for man page 81 | copyright = '' 82 | author = __author__ 83 | 84 | # The version info for the project you're documenting, acts as replacement for 85 | # |version| and |release|, also used in various other places throughout the 86 | # built documents. 87 | # 88 | # The short X.Y version. 89 | version = __version__ 90 | # The full version, including alpha/beta/rc tags. 91 | release = __version__ 92 | 93 | # The language for content autogenerated by Sphinx. Refer to documentation 94 | # for a list of supported languages. 95 | # 96 | # This is also used if you do content translation via gettext catalogs. 97 | # Usually you set "language" from the command line for these cases. 98 | language = 'en' 99 | 100 | # There are two options for replacing |today|: either, you set today to some 101 | # non-false value, then it is used: 102 | #today = '' 103 | # Else, today_fmt is used as the format for a strftime call. 104 | #today_fmt = '%B %d, %Y' 105 | 106 | # List of patterns, relative to source directory, that match files and 107 | # directories to ignore when looking for source files. 108 | exclude_patterns = ['_build', 'modules.rst'] 109 | 110 | # The name of the Pygments (syntax highlighting) style to use. 111 | pygments_style = 'sphinx' 112 | 113 | # A list of ignored prefixes for module index sorting. 114 | #modindex_common_prefix = [] 115 | 116 | # If true, keep warnings as "system message" paragraphs in the built documents. 117 | #keep_warnings = False 118 | 119 | # If true, `todo` and `todoList` produce output, else they produce nothing. 120 | todo_include_todos = True 121 | 122 | 123 | # -- Options for HTML output ---------------------------------------------- 124 | 125 | # The theme to use for HTML and HTML Help pages. See the documentation for 126 | # a list of builtin themes. 127 | html_theme = 'alabaster' 128 | 129 | # Theme options are theme-specific and customize the look and feel of a theme 130 | # further. For a list of options available for each theme, see the 131 | # documentation. 132 | html_theme_options = { 133 | "description": "Load balancing and redundancy for Tor hidden services.", 134 | 'github_user': 'DonnchaC', 135 | 'github_repo': 'onionbalance', 136 | 'github_button': False, 137 | 'travis_button': False, 138 | } 139 | 140 | # Enable external resources on the RTD hosted documentation only 141 | if on_rtd: 142 | html_theme_options['github_button'] = True 143 | html_theme_options['travis_button'] = True 144 | 145 | # Add any paths that contain custom themes here, relative to this directory. 146 | #html_theme_path = [] 147 | 148 | # The name for this set of Sphinx documents. If None, it defaults to 149 | # " v documentation". 150 | #html_title = None 151 | 152 | # A shorter title for the navigation bar. Default is the same as html_title. 153 | html_short_title = "OnionBalance Docs" 154 | 155 | # The name of an image file (relative to this directory) to place at the top 156 | # of the sidebar. 157 | #html_logo = None 158 | 159 | # Add any paths that contain custom static files (such as style sheets) here, 160 | # relative to this directory. They are copied after the builtin static files, 161 | # so a file named "default.css" will overwrite the builtin "default.css". 162 | html_static_path = [] 163 | 164 | # Custom sidebar templates, maps document names to template names. 165 | html_sidebars = { 166 | '**': [ 167 | 'about.html', 168 | 'navigation.html', 169 | 'relations.html', 170 | ] 171 | } 172 | 173 | # If false, no module index is generated. 174 | html_domain_indices = False 175 | 176 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 177 | html_show_sphinx = False 178 | 179 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 180 | html_show_copyright = False 181 | 182 | # Output file base name for HTML help builder. 183 | htmlhelp_basename = 'onionbalancedoc' 184 | 185 | # -- Options for manual page output --------------------------------------- 186 | 187 | # One entry per manual page. List of tuples 188 | # (source start file, name, description, authors, manual section). 189 | man_pages = [ 190 | ('running-onionbalance', 'onionbalance', 191 | 'a Tor hidden service load balancer', 192 | ['%s <%s>' % (__author__, __contact__)], 1), 193 | ('onionbalance-config', 'onionbalance-config', 194 | 'tool for generating onionbalance config files and keys', 195 | ['%s <%s>' % (__author__, __contact__)], 1), 196 | ] 197 | 198 | # If true, show URL addresses after external links. 199 | #man_show_urls = False 200 | -------------------------------------------------------------------------------- /docs/contributors.rst: -------------------------------------------------------------------------------- 1 | .. _contributors: 2 | 3 | Contributors 4 | ============ 5 | 6 | Thank you to the following contributors and others for their invaluble help 7 | and advice in developing OnionBalance. Contributions of any kind (code, 8 | documentation, testing) are very welcome. 9 | 10 | * `Federico Ceratto `_ 11 | 12 | - Tireless assistance with Debian packaging and OnionBalance improvements. 13 | - Replaced and reimplemented the job scheduler. 14 | - Implemented support for Unix signals and added a status socket to 15 | retrieve information about the running service. 16 | 17 | * `Michael Scherer `_ 18 | 19 | - Improving the Debian installation documentation. 20 | 21 | * `s7r `_ 22 | 23 | - Assisted in testing and load testing OnionBalance from an early stage. 24 | - Many useful suggestions for performance and usability improvements. 25 | 26 | * `Ceysun Sucu `_ 27 | 28 | - Added code to reconnect to the Tor control port while OnionBalance is 29 | running. 30 | 31 | * `Alec Muffett `_ 32 | 33 | - Extensively tested OnionBalance, found many bugs and made many 34 | suggestions to improve the software. 35 | 36 | * `duritong `_ 37 | 38 | - Packaged OnionBalance for Fedora, CentOS, and Redhat 7 (EPEL repository). 39 | -------------------------------------------------------------------------------- /docs/design.rst: -------------------------------------------------------------------------------- 1 | Design Document 2 | =============== 3 | 4 | This tool is designed to allow requests to Tor onion service to be 5 | directed to multiple back-end Tor instances, thereby increasing 6 | availability and reliability. The design involves collating the set of 7 | introduction points created by one or more independent Tor onion service 8 | instances into a single 'master' descriptor. 9 | 10 | Overview 11 | -------- 12 | 13 | This tool is designed to allow requests to Tor onion service to be 14 | directed to multiple back-end Tor instances, thereby increasing 15 | availability and reliability. The design involves collating the set of 16 | introduction points created by one or more independent Tor onion service 17 | instances into a single 'master' onion service descriptor. 18 | 19 | The master descriptor is signed by the onion service permanent key and 20 | published to the HSDir system as normal. 21 | 22 | Clients who wish to access the onion service would then retrieve the 23 | *master* service descriptor and try to connect to introduction points 24 | from the descriptor in a random order. If a client successfully 25 | establishes an introduction circuit, they can begin communicating with 26 | one of the onion services instances with the normal onion service 27 | protocol defined in rend-spec.txt 28 | 29 | Instance 30 | A load-balancing node running an individual onion service. 31 | Introduction Point 32 | A Tor relay chosen by an onion service instance as a medium-term 33 | *meeting-place* for initial client connections. 34 | Master Descriptor 35 | An onion service descriptor published with the desired onion address 36 | containing introduction points for each instance. 37 | Management Server 38 | Server running OnionBalance which collates introduction points and 39 | publishes a master descriptor. 40 | Metadata Channel 41 | A direct connection from an instance to a management server which can 42 | be used for instance descriptor upload and transfer of other data. 43 | 44 | Retrieving Introduction Point Data 45 | ---------------------------------- 46 | 47 | The core functionality of the OnionBalance service is the collation of 48 | introduction point data from multiple onion service instances by the 49 | management server. 50 | 51 | Basic Mode 52 | ~~~~~~~~~~ 53 | 54 | In the 'Basic mode` of operation, the introduction point information is 55 | transferred from the onion service instances to the management server 56 | via the HSDir system. Each instance runs an onion service with an 57 | instance specific permanent key. The instance publishes a descriptor to 58 | the DHT at regularly intervals or when its introduction point set 59 | changes. 60 | 61 | On initial startup the management server will load the previously 62 | published master descriptor from the DHT if it exists. The master 63 | descriptor is used to prepopulate the introduction point set. The 64 | management server regularly polls the HSDir system for a descriptor for 65 | each of its instances. Currently polling occurs every 10 minutes. This 66 | polling period can be tuned for hidden services with shorter or longer 67 | lasting introduction points. 68 | 69 | When the management server receives a new descriptor from the HSDir 70 | system, it should before a number of checks to ensure that it is valid: 71 | 72 | - Confirm that the descriptor has a valid signature and that the public 73 | key matches the instance that was requested. 74 | - Confirm that the descriptor timestamp is equal or newer than the 75 | previously received descriptor for that hidden service instance. This 76 | reduces the ability of a HSDir to replay older descriptors for an 77 | instance which may contain expired introduction points. 78 | - Confirm that the descriptor timestamp is not more than 4 hours in the 79 | past. An older descriptor indicates that the instance may no longer 80 | be online and publishing descriptors. The instance should not be 81 | included in the master descriptor. 82 | 83 | It should be possible for two or more independent management servers to 84 | publish descriptors for a single onion service. The servers would 85 | publish independent descriptors which will replace each other on the 86 | HSDir system.. Any difference in introduction point selection between 87 | descriptors should not impact the end user. 88 | 89 | Limitations 90 | ''''''''''' 91 | 92 | - A malicious HSDir could replay old instance descriptors in an attempt 93 | to include expired introduction points in the master descriptor. 94 | When an attacker does not control all of the responsible HSDirs this 95 | attack can be mitigated by not accepting descriptors with a timestamp 96 | older than the most recently retrieved descriptor. 97 | 98 | - The management server may also retrieve an old instance descriptor as 99 | a result of churn in the DHT. The management server may attempt to 100 | fetch the instance descriptor from a different set of HSDirs than the 101 | instance published to. 102 | 103 | - An onion service instance may rapidly rotate its introduction point 104 | circuits when subjected to a Denial of Service attack. An 105 | introduction point circuit is closed by the onion service when it has 106 | received ``max_introductions`` for that circuit. During DoS this 107 | circuit rotating may occur faster than the management server polls 108 | the HSDir system for new descriptors. As a result clients may 109 | retrieve master descriptors which contain no currently valid 110 | introduction points. 111 | 112 | - It is trivial for a HSDir to determine that a onion service is using 113 | OnionBalance when in Basic mode. OnionBalance will try poll for 114 | instance descriptors on a regular basis. A HSDir which connects to 115 | onion services published to it would find that a backend instance is 116 | serving the same content as the master service. This allows a HSDir 117 | to trivially determine the onion addresses for a service's backend 118 | instances. 119 | 120 | 121 | Basic mode allows for scaling across multiple onion service 122 | instances with no additional software or Tor modifications necessary 123 | on the onion service instance. Basic mode does not hide that a 124 | service is using OnionBalance. It also does not significantly 125 | protect a service from introduction point denial of service or 126 | actively malicious HSDirs. 127 | 128 | Complex Mode 129 | ~~~~~~~~~~~~ 130 | 131 | In Complex mode, introduction point information is uploaded directly from 132 | each instance to the management server via an onion service. The onion 133 | service instance does not publishing its onion service descriptor to the 134 | HSDir system. 135 | 136 | A descriptor is uploaded from an instance to its management servers 137 | each time Tor generates a new onion service descriptor. A simple daemon 138 | running on the onion service instance listens for the event emitted on 139 | the Tor control port when a onion service descriptor is generated. The 140 | daemon should retrieve the descriptor from the service's local 141 | descriptor cache and upload it to one or more management servers 142 | configured for that onion service. The protocol for the metadata channel 143 | is not yet defined. 144 | 145 | The metadata channel should authorize connecting instance clients using 146 | ``basic`` or ``stealth`` authorization. 147 | 148 | Multiple management servers for the same onion service may communicate 149 | with each other via a hidden service channel. This extra channel can be 150 | used to signal when any of the management servers becomes unavailable. A 151 | slave management server may begin publishing service descriptors if its 152 | master management server is no longer available. 153 | 154 | Complex mode requires additional software to be run on the service 155 | instances. It also requires more complicated communication via a 156 | metadata channel. In practice, this metadata channel may be less 157 | reliable than the HSDir system. 158 | 159 | .. note :: 160 | The management server communication channel is not implemented yet. The 161 | Complex Mode design may be revised significantly before implementation. 162 | 163 | Complex mode minimizes the information transmitted via the HSDir 164 | system and may make it more difficult for a HSDir to determine that 165 | a service is using OnionBalance. It also makes it more difficult for 166 | an active malicious HSDir to carry out descriptor replay attacks or 167 | otherwise interfere with the transfer of introduction point 168 | information. The management server is notified about new 169 | introduction points shortly after they are created which will result 170 | in more recent descriptor data during very high load or 171 | denial-of-service situations. 172 | 173 | Choice of Introduction Points 174 | ----------------------------- 175 | 176 | Tor onion service descriptors can include a maximum of 10 introduction 177 | points. OnionBalance should select introduction points so as to 178 | uniformly distribute load across the available backend instances. 179 | 180 | Onionbalance will upload multiple distinct descriptors if you have configured 181 | more than 10 instances. 182 | 183 | - **1 instance** - 3 IPs 184 | - **2 instance** - 6 IPs (3 IPs from each instance) 185 | - **3 instance** - 9 IPs (3 IPs from each instance) 186 | - **4 instance** - 10 IPs (3 IPs from one instance, 2 from each other 187 | instance) 188 | - **5 instance** - 10 IPs (2 IPs from each instance) 189 | - **6-10 instances** - 10 IPs (selection from all instances) 190 | - **11 or more instances** - 10 IPs (distinct descriptors - selection from all instances) 191 | 192 | If running in Complex mode, introduction points can be selected so as to 193 | obscure that a service is using OnionBalance. Always attempting to 194 | choose 3 introduction points per descriptor may make it more difficult 195 | for a passive observer to confirm that a service is running 196 | OnionBalance. However behavioral characteristics such as the rate of 197 | introduction point rotation may still allow a passive observer to 198 | distinguish an OnionBalance service from a standard Tor onion service. 199 | Selecting a smaller set of introduction points may impact on performance 200 | or reliability of the service. 201 | 202 | - **1 instance** - 3 IPs 203 | - **2 instances** - 3 IPs (2 IPs from one instance, 1 IP from the other 204 | instance) 205 | - **3 instances** - 3 IPs (1 IP from each instance) 206 | - **more than 3 instances** - Select the maximum set of introduction 207 | points as outlined previously. 208 | 209 | It may be advantageous to select introduction points in a non-random 210 | manner. The longest-lived introduction points published by a backend 211 | instance are likely to be stable. Conversely selecting more recently 212 | created introduction points may more evenly distribute client 213 | introductions across an instances introduction point circuits. Further 214 | investigation of these options should indicate if there is significant 215 | advantages to any of these approaches. 216 | 217 | Generation and Publication of Master Descriptor 218 | ----------------------------------------------- 219 | 220 | The management server should generate a onion service descriptor 221 | containing the selected introduction points. This master descriptor is 222 | then signed by the actual onion service permanent key. The signed master 223 | descriptor should be published to the responsible HSDirs as normal. 224 | 225 | Clients who wish to access the onion service would then retrieve the 226 | 'master' service descriptor and begin connect to introduction points at 227 | random from the introduction point list. After successful introduction 228 | the client will have created an onion service circuit to one of the 229 | available onion services instances and can then begin communicating as 230 | normally along that circuit. 231 | 232 | Next-Generation Onion Services (Prop 224) Compatibility 233 | ------------------------------------------------------- 234 | 235 | In the next-generation onion service proposal (Prop224), introduction 236 | point keys will no longer be independent of the instance/descriptor 237 | permanent key. The proposal specifies that each introduction point 238 | authentication key cross-certifies the descriptor's blinded public key. 239 | Each instance must know the master descriptor blinded public key during 240 | descriptor generation. 241 | 242 | One solution is to operate in the Complex mode described previously. 243 | Each instance is provided with the descriptor signing key derived from 244 | the same master identity key. Each introduction point authentication key 245 | will then cross-certify the same blinded public key. The generated 246 | service descriptors are not uploaded to the HSDir system. Instead the 247 | descriptors are passed to the management server where introduction 248 | points are selected and a master descriptor is published. 249 | 250 | Alternatively a Tor control port command could be implemented to allow a 251 | controller to request a onion service descriptor which has each 252 | introduction point authentication key cross-certify a blinded public key 253 | provided in the control port command. This would remove the need to 254 | provide any master service private keys to backend instances. 255 | 256 | The descriptor signing keys specified in Prop224 are valid for a limited 257 | period of time. As a result the compromise of a descriptor signing key 258 | does not lead to permanent compromise of the onion service 259 | 260 | .. TODO: Tidy up this section 261 | 262 | Implementation 263 | ------------------------------------------------------- 264 | 265 | **TODO** 266 | -------------------------------------------------------------------------------- /docs/getting-started.rst: -------------------------------------------------------------------------------- 1 | .. _getting_started: 2 | 3 | Getting Started 4 | =============== 5 | 6 | OnionBalance implements `round-robin` like load balancing on top of Tor 7 | onion services. A typical OnionBalance deployment will incorporate one management 8 | servers and multiple backend application servers. 9 | 10 | Architecture 11 | ------------ 12 | 13 | The management server runs the OnionBalance daemon. OnionBalance combines the routing information (the introduction points) for multiple backend onion services instances and publishes this information in a master descriptor. 14 | 15 | .. image:: ../onionbalance.png 16 | 17 | The backend application servers run a standard Tor onion service. When a client connects to the public onion service they select one of the introduction points at random. When the introduction circuit completes the user is connected to the corresponding backend instance. 18 | 19 | **Management Server** 20 | is the machine running the OnionBalance daemon. It needs to have access to the onion 21 | service private key corresponding for the desired onion address. This is the public onion address that users will request. 22 | 23 | This machine can be located geographically isolated from the machines 24 | hosting the onion service content. It does not need to serve any content. 25 | 26 | **Backend Instance** 27 | Each backend application server runs a Tor onion service with a unique onion service key. 28 | 29 | .. note:: 30 | The :ref:`onionbalance-config ` tool can be used to 31 | quickly generate keys and config files for your OnionBalance deployment. 32 | 33 | 34 | The OnionBalance tool provide two command line tools: 35 | 36 | **onionbalance** acts as a long running daemon. 37 | 38 | **onionbalance-config** is a helper utility which eases the process of 39 | creating keys and configuration files for onionbalance and the backend 40 | Tor instances. 41 | 42 | CLI Documentation 43 | ~~~~~~~~~~~~~~~~~ 44 | 45 | .. toctree:: 46 | :maxdepth: 1 47 | 48 | onionbalance 49 | onionbalance-config 50 | 51 | 52 | Installing and Configuring Tor 53 | ------------------------------ 54 | 55 | Tor is need on the management server and every backend onion service 56 | instance. 57 | 58 | Management Server 59 | ~~~~~~~~~~~~~~~~~ 60 | 61 | OnionBalance requires that a recent version of Tor (``>= 0.2.7.1-alpha``) is 62 | installed on the management server system. This version might not be available 63 | in your operating system's repositories yet. 64 | 65 | It is recommended that you install Tor from the 66 | `Tor Project repositories `_ 67 | to ensure you stay up to date with the latest Tor releases. 68 | 69 | The management server need to have its control port enabled to allow 70 | the OnionBalance daemon to talk to the Tor process. This can be done by 71 | uncommenting the ``ControlPort`` option in your ``torrc`` configuration file. 72 | 73 | Alternatively you can replace your ``torrc`` file with the following 74 | is suitable for the Tor instance running on the management server: 75 | 76 | .. literalinclude:: ../onionbalance/data/torrc-server 77 | :name: torrc-server 78 | :lines: 6- 79 | 80 | After configuring Tor you should restart your Tor process 81 | 82 | .. code-block:: console 83 | 84 | $ sudo service tor reload 85 | 86 | Backend Instances 87 | ~~~~~~~~~~~~~~~~~ 88 | 89 | Each backend instance should be run a standard onion service which serves your 90 | website or other content. More information about configuring onion services is 91 | available in the Tor Project's 92 | `hidden service configuration guide `_. 93 | 94 | If you have used the ``onionbalance-config`` tool you should transfer the 95 | generated instance config files and keys to the Tor configuration directory 96 | on the backend servers. 97 | 98 | .. literalinclude:: ../onionbalance/data/torrc-instance 99 | :name: torrc-instance 100 | :lines: 6- 101 | 102 | After configuring Tor you should restart your Tor process 103 | 104 | .. code-block:: console 105 | 106 | $ sudo service tor reload 107 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. onionbalance documentation master file, created by 2 | sphinx-quickstart on Wed Jun 10 13:54:42 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Overview 7 | ======== 8 | 9 | The OnionBalance software allows for Tor hidden service requests to be 10 | distributed across multiple backend Tor instances. OnionBalance provides 11 | load-balancing while also making onion services more resilient and reliable 12 | by eliminating single points-of-failure. 13 | 14 | - Latest release: |version| (:ref:`changelog`) 15 | - GitHub: https://github.com/DonnchaC/onionbalance/ 16 | - Issue tracker: https://github.com/DonnchaC/onionbalance/issues 17 | - PyPI: https://pypi.python.org/pypi/OnionBalance 18 | - IRC: #onionbalance @ OFTC 19 | 20 | Features 21 | ======== 22 | 23 | OnionBalance is under active development and new features are being added 24 | regularly: 25 | 26 | - Load balancing between up to 60 backend hidden services 27 | - Storage of the hidden service private key separate to the hidden service 28 | hosts 29 | 30 | 31 | Quickstart 32 | ========== 33 | 34 | The OnionBalance ::ref::`tutorial` describes the deployment of an onion service with 35 | multiple backed Tor instances and web servers. The following is a minimal 36 | quickstart guide for a new installation. 37 | 38 | Assuming there is no previous configuration in ``/etc/onionbalance``: 39 | 40 | .. code-block:: console 41 | 42 | $ sudo apt-get install onionbalance 43 | $ onionbalance-config 44 | $ sudo cp ./config/master/*.key /etc/onionbalance/ 45 | $ sudo cp ./config/master/config.yaml /etc/onionbalance/ 46 | $ sudo chown onionbalance:onionbalance /etc/onionbalance/*.key 47 | 48 | Restart OnionBalance to reload the configuration files. 49 | 50 | .. code-block:: console 51 | 52 | $ sudo service onionbalance restart 53 | 54 | Check the logs. The following warnings are expected: 55 | "Error generating descriptor: No introduction points for service ..." 56 | 57 | .. code-block:: console 58 | 59 | $ sudo tail -f /var/log/onionbalance/log 60 | 61 | Copy the ``instance_torrc`` and ``private_key`` files from each of the directories named ``./config/``, ``srv1``, ``srv2``, ... to each of the Tor servers providing the Onion Services. 62 | 63 | Configure and start the instance services. The onion service managed by OnionBalance should be ready within 10 minutes. 64 | 65 | User Guide 66 | ========== 67 | 68 | OnionBalance consists of a long-running daemon and a command-line 69 | configuration tool. Please see the :ref:`getting_started` page for usage 70 | instructions. 71 | 72 | 73 | .. toctree:: 74 | :maxdepth: 2 75 | 76 | installation 77 | getting-started 78 | running-onionbalance 79 | use-cases 80 | tutorial 81 | 82 | .. toctree:: 83 | :maxdepth: 2 84 | 85 | design 86 | contributors 87 | changelog 88 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | OnionBalance requires at least one system that is running the OnionBalance 5 | management server. 6 | 7 | The OnionBalance software does not need to be installed on the 8 | backend servers which provide the hidden service content (i.e. web site, 9 | IRC server etc.). 10 | 11 | OnionBalance is not yet packaged for most Linux and BSD. The tool can be 12 | installed from PyPI or directly from the Git repository: 13 | 14 | .. code-block:: console 15 | 16 | # pip install onionbalance 17 | 18 | or 19 | 20 | .. code-block:: console 21 | 22 | $ git clone https://github.com/DonnchaC/onionbalance.git 23 | $ cd onionbalance 24 | # python setup.py install 25 | 26 | If you are running Debian Jessie (with backports enabled) or later you 27 | can install OnionBalance with the following command: 28 | 29 | .. code-block:: console 30 | 31 | # apt-get install onionbalance 32 | 33 | There is also a python 3 based package available in Fedora >= 25: 34 | 35 | .. code-block:: console 36 | 37 | # yum install python3-onionbalance 38 | 39 | For CentOS or RedHat 7 there is a python 2 based package available in 40 | the EPEL Repository: 41 | 42 | .. code-block:: console 43 | 44 | # yum install python2-onionbalance 45 | 46 | All tagged releases on Github or PyPi are signed with my GPG key: 47 | 48 | :: 49 | 50 | pub 4096R/0x3B0D706A7FBFED86 2013-06-27 [expires: 2016-07-11] 51 | Key fingerprint = 7EFB DDE8 FD21 11AE A7BE 1AA6 3B0D 706A 7FBF ED86 52 | uid [ultimate] Donncha O'Cearbhaill 53 | sub 3072R/0xD60D64E73458F285 2013-06-27 [expires: 2016-07-11] 54 | sub 3072R/0x7D49FC2C759AA659 2013-06-27 [expires: 2016-07-11] 55 | sub 3072R/0x2C9C6F4ABBFCF7DD 2013-06-27 [expires: 2016-07-11] 56 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | echo. coverage to run coverage check of the documentation if enabled 41 | goto end 42 | ) 43 | 44 | if "%1" == "clean" ( 45 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 46 | del /q /s %BUILDDIR%\* 47 | goto end 48 | ) 49 | 50 | 51 | REM Check if sphinx-build is available and fallback to Python version if any 52 | %SPHINXBUILD% 2> nul 53 | if errorlevel 9009 goto sphinx_python 54 | goto sphinx_ok 55 | 56 | :sphinx_python 57 | 58 | set SPHINXBUILD=python -m sphinx.__init__ 59 | %SPHINXBUILD% 2> nul 60 | if errorlevel 9009 ( 61 | echo. 62 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 63 | echo.installed, then set the SPHINXBUILD environment variable to point 64 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 65 | echo.may add the Sphinx directory to PATH. 66 | echo. 67 | echo.If you don't have Sphinx installed, grab it from 68 | echo.http://sphinx-doc.org/ 69 | exit /b 1 70 | ) 71 | 72 | :sphinx_ok 73 | 74 | 75 | if "%1" == "html" ( 76 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 77 | if errorlevel 1 exit /b 1 78 | echo. 79 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 80 | goto end 81 | ) 82 | 83 | if "%1" == "dirhtml" ( 84 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 85 | if errorlevel 1 exit /b 1 86 | echo. 87 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 88 | goto end 89 | ) 90 | 91 | if "%1" == "singlehtml" ( 92 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 93 | if errorlevel 1 exit /b 1 94 | echo. 95 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 96 | goto end 97 | ) 98 | 99 | if "%1" == "pickle" ( 100 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 101 | if errorlevel 1 exit /b 1 102 | echo. 103 | echo.Build finished; now you can process the pickle files. 104 | goto end 105 | ) 106 | 107 | if "%1" == "json" ( 108 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 109 | if errorlevel 1 exit /b 1 110 | echo. 111 | echo.Build finished; now you can process the JSON files. 112 | goto end 113 | ) 114 | 115 | if "%1" == "htmlhelp" ( 116 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 117 | if errorlevel 1 exit /b 1 118 | echo. 119 | echo.Build finished; now you can run HTML Help Workshop with the ^ 120 | .hhp project file in %BUILDDIR%/htmlhelp. 121 | goto end 122 | ) 123 | 124 | if "%1" == "qthelp" ( 125 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 129 | .qhcp project file in %BUILDDIR%/qthelp, like this: 130 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\onionbalance.qhcp 131 | echo.To view the help file: 132 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\onionbalance.ghc 133 | goto end 134 | ) 135 | 136 | if "%1" == "devhelp" ( 137 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 138 | if errorlevel 1 exit /b 1 139 | echo. 140 | echo.Build finished. 141 | goto end 142 | ) 143 | 144 | if "%1" == "epub" ( 145 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 146 | if errorlevel 1 exit /b 1 147 | echo. 148 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 149 | goto end 150 | ) 151 | 152 | if "%1" == "latex" ( 153 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 154 | if errorlevel 1 exit /b 1 155 | echo. 156 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 157 | goto end 158 | ) 159 | 160 | if "%1" == "latexpdf" ( 161 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 162 | cd %BUILDDIR%/latex 163 | make all-pdf 164 | cd %~dp0 165 | echo. 166 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdfja" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf-ja 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "text" ( 181 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 182 | if errorlevel 1 exit /b 1 183 | echo. 184 | echo.Build finished. The text files are in %BUILDDIR%/text. 185 | goto end 186 | ) 187 | 188 | if "%1" == "man" ( 189 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 190 | if errorlevel 1 exit /b 1 191 | echo. 192 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 193 | goto end 194 | ) 195 | 196 | if "%1" == "texinfo" ( 197 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 198 | if errorlevel 1 exit /b 1 199 | echo. 200 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 201 | goto end 202 | ) 203 | 204 | if "%1" == "gettext" ( 205 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 206 | if errorlevel 1 exit /b 1 207 | echo. 208 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 209 | goto end 210 | ) 211 | 212 | if "%1" == "changes" ( 213 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 214 | if errorlevel 1 exit /b 1 215 | echo. 216 | echo.The overview file is in %BUILDDIR%/changes. 217 | goto end 218 | ) 219 | 220 | if "%1" == "linkcheck" ( 221 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 222 | if errorlevel 1 exit /b 1 223 | echo. 224 | echo.Link check complete; look for any errors in the above output ^ 225 | or in %BUILDDIR%/linkcheck/output.txt. 226 | goto end 227 | ) 228 | 229 | if "%1" == "doctest" ( 230 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 231 | if errorlevel 1 exit /b 1 232 | echo. 233 | echo.Testing of doctests in the sources finished, look at the ^ 234 | results in %BUILDDIR%/doctest/output.txt. 235 | goto end 236 | ) 237 | 238 | if "%1" == "coverage" ( 239 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 240 | if errorlevel 1 exit /b 1 241 | echo. 242 | echo.Testing of coverage in the sources finished, look at the ^ 243 | results in %BUILDDIR%/coverage/python.txt. 244 | goto end 245 | ) 246 | 247 | if "%1" == "xml" ( 248 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 249 | if errorlevel 1 exit /b 1 250 | echo. 251 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 252 | goto end 253 | ) 254 | 255 | if "%1" == "pseudoxml" ( 256 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 257 | if errorlevel 1 exit /b 1 258 | echo. 259 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 260 | goto end 261 | ) 262 | 263 | :end 264 | -------------------------------------------------------------------------------- /docs/onionbalance-config.rst: -------------------------------------------------------------------------------- 1 | .. _onionbalance_config: 2 | 3 | onionbalance-config Tool 4 | ======================== 5 | 6 | Description 7 | ----------- 8 | 9 | The ``onionbalance-config`` tool is the fastest way to generate the necessary 10 | keys and config files to get your onion service up and running. 11 | 12 | .. code-block:: console 13 | 14 | $ onionbalance-config 15 | 16 | When called without any arguments, the config generator will run in an 17 | interactive mode and prompt for user input. 18 | 19 | The ``master`` directory should be stored on the management server while 20 | the other ``instance`` directories should be transferred to the respective 21 | backend servers. 22 | 23 | 24 | Command-Line Options 25 | -------------------- 26 | 27 | .. autoprogram:: onionbalance.manager:parse_cmd_args() 28 | :prog: onionbalance-config 29 | 30 | 31 | Files 32 | ----- 33 | 34 | master/config.yaml 35 | This is the configuration file that is used my the OnionBalance management 36 | server. 37 | 38 | master/.key 39 | The private key which will become the public address and identity for your 40 | hidden service. It is essential that you keep this key secure. 41 | 42 | master/torrc-server 43 | A sample Tor configuration file which can be used with the Tor instance 44 | running on the management server. 45 | 46 | srv/torrc-instance 47 | A sample Tor config file which contains the Tor ``HiddenService*`` options 48 | needed for your backend Tor instance. 49 | 50 | srv//private_key 51 | Directory containing the private key for you backend hidden service instance. 52 | This key is less critical as it can be rotated if lost or compromised. 53 | 54 | 55 | See Also 56 | -------- 57 | 58 | Full documentation for the **OnionBalance** software is available at 59 | https://onionbalance.readthedocs.org/ 60 | -------------------------------------------------------------------------------- /docs/running-onionbalance.rst: -------------------------------------------------------------------------------- 1 | Running OnionBalance 2 | ==================== 3 | 4 | Description 5 | ----------- 6 | 7 | You can start the OnionBalance management server once all of your backend 8 | onion service instances are running. 9 | 10 | You will need to create a :ref:`configuration file ` 11 | which list the backend hidden services and the location of your hidden 12 | service keys. 13 | 14 | .. code-block:: console 15 | 16 | $ onionbalance -c config.yaml 17 | 18 | or 19 | 20 | .. code-block:: console 21 | 22 | $ sudo service onionbalance start 23 | 24 | The management server must be left running to publish new descriptors for 25 | your onion service. 26 | 27 | .. note:: 28 | 29 | Multiple OnionBalance management servers can be run simultaneously with 30 | the same master private key and configuration file to provide redundancy. 31 | 32 | Command-Line Options 33 | -------------------- 34 | 35 | .. autoprogram:: onionbalance.manager:parse_cmd_args() 36 | :prog: onionbalance 37 | 38 | 39 | .. _configuration_file_format: 40 | 41 | Configuration File Format 42 | ------------------------- 43 | 44 | The OnionBalance management server is primarily configured using a YAML 45 | configuration file. 46 | 47 | .. literalinclude:: ../onionbalance/data/config.example.yaml 48 | :name: example-config.yaml 49 | :language: yaml 50 | 51 | 52 | The ``services`` section of the configuration file contains a list of 53 | master onion services that OnionBalance is responsible for. 54 | 55 | Each ``key`` option specifies the location of the 1024 bit private RSA key 56 | for the hidden service. This master private key determines the address 57 | that users will use to access your onion service. This private key **must** 58 | be kept secure. 59 | 60 | The location of the private key is evaluated as an absolute path, or 61 | relative to the configuration file location. 62 | 63 | You can use existing Tor hidden service private key with OnionBalance 64 | to keep your onion address. 65 | 66 | Each backend Tor onion service instance is listed by its unique onion 67 | address in the ``instances`` list. 68 | 69 | .. note:: 70 | 71 | You can replace backend instance keys if they get lost or compromised. 72 | Simply start a new backend hidden service under a new key and replace 73 | the ``address`` in the config file. 74 | 75 | If you have used the :ref:`onionbalance-config ` tool 76 | you can simply use the generated config file from ``master/config.yaml``. 77 | 78 | .. note:: 79 | 80 | By default onionbalance will search for a ``config.yaml`` file in 81 | the current working directory. 82 | 83 | 84 | Configuration Options 85 | ~~~~~~~~~~~~~~~~~~~~~ 86 | 87 | The OnionBalance command line options can also be specified in the 88 | OnionBalance configuration file. Options specified on the command line 89 | take precedence over the related configuration file options: 90 | 91 | TOR_CONTROL_SOCKET: 92 | The location of the Tor unix domain control socket. OnionBalance will 93 | attempt to connect to this control socket first before falling back to 94 | using a control port connection. 95 | (default: /var/run/tor/control) 96 | 97 | TOR_ADDRESS: 98 | The address where the Tor control port is listening. (default: 127.0.0.1) 99 | 100 | TOR_PORT: 101 | The Tor control port. (default: 9051) 102 | 103 | TOR_CONTROL_PASSWORD: 104 | The password for authenticating to a Tor control port which is using the 105 | HashedControlPassword authentication method. This is not needed when the 106 | Tor control port is using the more common CookieAuthentication method. 107 | (default: None) 108 | 109 | Other options: 110 | 111 | LOG_LOCATION 112 | The path where OnionBalance should write its log file. 113 | 114 | LOG_LEVEL 115 | Specify the minimum verbosity of log messages to output. All log messages 116 | equal or higher the the specified log level are output. The available 117 | log levels are the same as the --verbosity command line option. 118 | 119 | REFRESH_INTERVAL 120 | How often to check for updated backend hidden service descriptors. This 121 | value can be decreased if your backend instance are under heavy loaded 122 | causing them to rotate introduction points quickly. 123 | (default: 600 seconds). 124 | 125 | PUBLISH_CHECK_INTERVAL 126 | How often should to check if new descriptors need to be published for 127 | the master hidden service (default: 360 seconds). 128 | 129 | INITIAL_DELAY 130 | How long to wait between starting OnionBalance and publishing the master 131 | descriptor. If you have more than 20 backend instances you may need to wait 132 | longer for all instance descriptors to download before starting 133 | (default: 45 seconds). 134 | 135 | DISTINCT_DESCRIPTORS 136 | Distinct descriptors are used if you have more than 10 backend instances. 137 | At the cost of scalability, this can be disabled to appear more like a 138 | standard onion service. (default: True) 139 | 140 | STATUS_SOCKET_LOCATION 141 | The OnionBalance service creates a Unix domain socket which provides 142 | real-time information about the currently loaded service and descriptors. 143 | This option can be used to change the location of this domain socket. 144 | (default: /var/run/onionbalance/control) 145 | 146 | The following options typically do not need to be modified by the end user: 147 | 148 | REPLICAS 149 | How many set of HSDirs to upload too (default: 2). 150 | 151 | MAX_INTRO_POINTS 152 | How many introduction points to include in a descriptor (default: 10) 153 | 154 | DESCRIPTOR_VALIDITY_PERIOD 155 | How long a hidden service descriptor remains valid (default: 156 | 86400 seconds) 157 | 158 | DESCRIPTOR_OVERLAP_PERIOD 159 | How long to overlap hidden service descriptors when changing 160 | descriptor IDs (default: 3600 seconds) 161 | 162 | DESCRIPTOR_UPLOAD_PERIOD 163 | How often to publish a descriptor, even when the introduction points 164 | don't change (default: 3600 seconds) 165 | 166 | 167 | Environment Variables 168 | ~~~~~~~~~~~~~~~~~~~~~ 169 | 170 | ONIONBALANCE_CONFIG 171 | Override the location for the OnionBalance configuration file. 172 | 173 | The loaded configuration file takes precedence over environment variables. 174 | Configuration file options will override environment variable which have the 175 | same name. 176 | 177 | ONIONBALANCE_LOG_LOCATION 178 | See the config file option. 179 | 180 | ONIONBALANCE_LOG_LEVEL 181 | See the config file option 182 | 183 | ONIONBALANCE_STATUS_SOCKET_LOCATION 184 | See the config file option 185 | 186 | ONIONBALANCE_TOR_CONTROL_SOCKET 187 | See the config file option 188 | 189 | 190 | Files 191 | ----- 192 | 193 | /etc/onionbalance/config.yaml 194 | The configuration file, which contains ``services`` entries. 195 | 196 | config.yaml 197 | Fallback location for torrc, if /etc/onionbalance/config.yaml is 198 | not found. 199 | 200 | See Also 201 | -------- 202 | 203 | Full documentation for the **OnionBalance** software is available at 204 | https://onionbalance.readthedocs.org/ 205 | -------------------------------------------------------------------------------- /docs/tutorial.rst: -------------------------------------------------------------------------------- 1 | .. _tutorial: 2 | 3 | Tutorial 4 | ======== 5 | 6 | This is a step-by-step tutorial to help you configure OnionBalance. 7 | 8 | OnionBalance implements `round-robin` like load balancing on top of Tor 9 | onion services. A typical OnionBalance deployment will incorporate one management 10 | servers and multiple backend application servers. 11 | 12 | Assumptions 13 | ----------- 14 | 15 | You want to run: 16 | 17 | - one or more OnionBalance processes, to perform load balancing, on hosts 18 | named ``obhost1``, ``obhost2``. 19 | - two or more Tor processes, to run the Onion Services, on hosts named 20 | ``torhost1``, ``torhost2``. 21 | - two or more servers (e.g. web servers) or traditional load balancers on 22 | hosts named ``webserver1``, ``webserver2``. 23 | 24 | Scaling up: 25 | 26 | - the number of ``obhostX`` can be increased but this will not help handling 27 | more traffic. 28 | - the number of ``torhostX`` can be increased up to 60 instances to handle 29 | more traffic. 30 | - the number of ``webserverX`` can be increased to handle more traffic until 31 | the Tor daemons in front of them become the bottleneck. 32 | 33 | Scaling down: 34 | 35 | - the three type of services can be run on the same hosts. The number of hosts 36 | can scale down to one. 37 | 38 | Reliability: 39 | 40 | Contrarily to traditional load balancers, the OnionBalance daemon does not 41 | receive and forward traffic. As such, ``obhostX`` does not need to be in 42 | proximity to ``torhostX`` and can be run from any location on the Internet. 43 | Failure of ``obhostX`` will not affect the service as long as either one 44 | ``obhost`` is still up or or the failure is shorter than 30 minutes. 45 | 46 | Other assumptions: 47 | 48 | - the hosts run Debian or Ubuntu 49 | - there is no previous configuration 50 | 51 | Configuring the OnionBalance host 52 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 53 | 54 | On ``obhost1``: 55 | 56 | .. code-block:: bash 57 | 58 | sudo apt-get install onionbalance tor 59 | mkdir -p /var/run/onionbalance 60 | chown onionbalance:onionbalance /var/run/onionbalance 61 | /usr/sbin/onionbalance-config -n --service-virtual-port \ 62 | --service-target --output ~/onionbalance_master_conf 63 | sudo cp ~/onionbalance_master_conf/master/*.key /etc/onionbalance/ 64 | sudo cp ~/onionbalance_master_conf/master/config.yaml /etc/onionbalance/ 65 | sudo chown onionbalance:onionbalance /etc/onionbalance/*.key 66 | sudo service onionbalance restart 67 | sudo tail -f /var/log/onionbalance/log 68 | 69 | Back up the files in ``~/onionbalance_master_conf``. 70 | 71 | If you have other ``obhostX``: 72 | 73 | .. code-block:: bash 74 | 75 | sudo apt-get install onionbalance 76 | mkdir -p /var/run/onionbalance 77 | chown onionbalance:onionbalance /var/run/onionbalance 78 | 79 | Copy ``/etc/onionbalance/\*.key`` and ``/etc/onionbalance/config.yml`` 80 | from ``obhost1`` to all hosts in ``obhostX``. 81 | 82 | Check the logs. The following warnings are expected: 83 | `"Error generating descriptor: No introduction points for service ..."`. 84 | 85 | Configuring the Tor services 86 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 87 | 88 | Copy the ``instance_torrc`` and ``private_key`` files from each of the 89 | directories named ``./config/srv1``, ``./config/srv2``,.. on ``obhost1`` 90 | to ``torhostX`` - the contents of one directory for each ``torhostX``. 91 | 92 | Configure and start the services - the onion service on OnionBalance should 93 | be ready within 10 minutes. 94 | 95 | Monitoring 96 | ~~~~~~~~~~ 97 | 98 | On each ``obhostX``, run: 99 | 100 | .. code-block:: bash 101 | 102 | sudo watch 'socat - unix-connect:/var/run/onionbalance/control' 103 | -------------------------------------------------------------------------------- /docs/use-cases.rst: -------------------------------------------------------------------------------- 1 | Use Cases 2 | ========= 3 | 4 | There a many ways to use OnionBalance to increase the scalability, reliability and security of your onion service. The following are some examples of what is 5 | possible. 6 | 7 | 8 | Current Deployments 9 | ------------------- 10 | 11 | **SKS Keyserver Pool** 12 | Kristian Fiskerstrand has set up a hidden service 13 | `keyserver pool `_ 14 | which connects users to one of the available hidden service key servers. 15 | 16 | 17 | 18 | Other Examples 19 | -------------- 20 | 21 | - A popular onion service with an overloaded web server or Tor process 22 | 23 | A service such as Facebook which gets a large number of users would like 24 | to distribute client requests across multiple servers as the load is too 25 | much for a single Tor instance to handle. They would also like to balance 26 | between instances when the 'encrypted services' proposal is implemented [2555]. 27 | 28 | - Redundancy and automatic failover 29 | 30 | A political activist would like to keep their web service accessible and 31 | secure in the event that the secret police seize some of their servers. 32 | Clients should ideally automatically fail-over to another online instances 33 | with minimal service disruption. 34 | 35 | - Secure Onion Service Key storage 36 | 37 | An onion service operator would like to compartmentalize their permanent 38 | onion key in a secure location separate to their Tor process and other 39 | services. With this proposal permanent keys could be stored on an 40 | independent, isolated system. 41 | 42 | Research 43 | -------- 44 | 45 | `Ceysun Sucu `_ has analysed OnionBalance and other 46 | approaches to hidden service scaling in his masters thesis 47 | `Tor\: Hidden Service Scaling `_. The thesis provides a good overview of current approaches. It is a recommended read for those 48 | interested in higher performance hidden services. 49 | -------------------------------------------------------------------------------- /onionbalance.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DonnchaC/onionbalance/5c781506860d88c3a1966047a2b856f8e29ce97a/onionbalance.png -------------------------------------------------------------------------------- /onionbalance.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """Convenience wrapper for running OnionBalance directly from source tree.""" 5 | 6 | from onionbalance.manager import main 7 | 8 | if __name__ == '__main__': 9 | main() 10 | -------------------------------------------------------------------------------- /onionbalance/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | __version__ = "0.1.8" 4 | __author__ = "Donncha O'Cearbhaill" 5 | __contact__ = "donncha@donncha.is" 6 | __url__ = "https://github.com/DonnchaC/onionbalance" 7 | __license__ = "GPL" 8 | -------------------------------------------------------------------------------- /onionbalance/__main__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from onionbalance.manager import main 4 | 5 | 6 | if __name__ == "__main__": 7 | main() 8 | -------------------------------------------------------------------------------- /onionbalance/config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | 4 | """ 5 | Define default config options for the management server 6 | """ 7 | 8 | # Set default configuration options for the management server 9 | 10 | REPLICAS = 2 11 | HSDIR_SET = 3 # Publish each descriptor to 3 consecutive HSDirs 12 | MAX_INTRO_POINTS = 10 13 | DESCRIPTOR_VALIDITY_PERIOD = 24 * 60 * 60 14 | DESCRIPTOR_OVERLAP_PERIOD = 60 * 60 15 | DESCRIPTOR_UPLOAD_PERIOD = 60 * 60 # Re-upload descriptor every hour 16 | REFRESH_INTERVAL = 10 * 60 17 | PUBLISH_CHECK_INTERVAL = 5 * 60 18 | INITIAL_DELAY = 45 # Wait for instance descriptors before publishing 19 | 20 | LOG_LOCATION = os.environ.get('ONIONBALANCE_LOG_LOCATION') 21 | LOG_LEVEL = os.environ.get('ONIONBALANCE_LOG_LEVEL', 'info') 22 | 23 | STATUS_SOCKET_LOCATION = os.environ.get('ONIONBALANCE_STATUS_SOCKET_LOCATION', 24 | '/var/run/onionbalance/control') 25 | 26 | TOR_ADDRESS = '127.0.0.1' 27 | TOR_PORT = 9051 28 | TOR_CONTROL_PASSWORD = None 29 | TOR_CONTROL_SOCKET = os.environ.get('ONIONBALANCE_TOR_CONTROL_SOCKET', 30 | '/var/run/tor/control') 31 | 32 | # Upload multiple distinct descriptors containing different subsets of 33 | # the available introduction points 34 | DISTINCT_DESCRIPTORS = True 35 | 36 | # Store global data about onion services and their instance nodes. 37 | services = [] 38 | 39 | controller = None 40 | -------------------------------------------------------------------------------- /onionbalance/consensus.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | This module provides a set of functions for reading Tor consensus documents. 4 | """ 5 | from bisect import bisect_left 6 | import base64 7 | import binascii 8 | 9 | import stem 10 | import stem.descriptor 11 | 12 | import onionbalance.log as log 13 | import onionbalance.config as config 14 | 15 | logger = log.get_logger() 16 | 17 | HSDIR_LIST = [] 18 | 19 | 20 | def refresh_consensus(): 21 | """ 22 | Update consensus state when Tor receives a new network status 23 | 24 | Retrieve the current set of hidden service directories 25 | """ 26 | global HSDIR_LIST 27 | 28 | if not config.controller: 29 | logger.warning("Controller connection not found in the configuration. " 30 | "Cannot update the Tor state.") 31 | return None 32 | else: 33 | controller = config.controller 34 | 35 | # pylint: disable=no-member 36 | # Retrieve the current set of hidden service directories 37 | hsdirs = [] 38 | try: 39 | for desc in controller.get_network_statuses(): 40 | if stem.Flag.HSDIR in desc.flags: 41 | hsdirs.append(desc.fingerprint) 42 | except IOError as err: 43 | logger.error("Could not load consensus from Tor: %s" % err) 44 | else: 45 | HSDIR_LIST = hsdirs 46 | logger.debug("Updated the list of Tor hidden service directories.") 47 | 48 | 49 | def get_hsdirs(descriptor_id): 50 | """ 51 | Get the responsible HSDirs for a given descriptor ID. 52 | """ 53 | 54 | # Try fetch a consensus if we haven't loaded one already 55 | if not HSDIR_LIST: 56 | refresh_consensus() 57 | 58 | if not HSDIR_LIST: 59 | raise ValueError('Could not determine the responsible HSDirs.') 60 | 61 | desc_id_bytes = base64.b32decode(descriptor_id, 1) 62 | descriptor_id_hex = (binascii.hexlify(desc_id_bytes). 63 | decode('utf-8').upper()) 64 | 65 | responsible_hsdirs = [] 66 | 67 | # Find postion of descriptor ID in the HSDir list 68 | index = descriptor_position = bisect_left(HSDIR_LIST, descriptor_id_hex) 69 | 70 | # Pick HSDirs until we have enough 71 | while len(responsible_hsdirs) < config.HSDIR_SET: 72 | try: 73 | responsible_hsdirs.append(HSDIR_LIST[index]) 74 | index += 1 75 | except IndexError: 76 | # Wrap around when we reach the end of the HSDir list 77 | index = 0 78 | 79 | # Do not choose a HSDir more than once 80 | if index == descriptor_position: 81 | break 82 | 83 | return responsible_hsdirs 84 | -------------------------------------------------------------------------------- /onionbalance/data/config.example.yaml: -------------------------------------------------------------------------------- 1 | # Onion Load Balancer Config File 2 | # --- 3 | # Each hidden service key line should be followed be followed by a list of 0 4 | # or more instances which contain the onion address of the load balancing 5 | # hidden service 6 | 7 | REFRESH_INTERVAL: 600 # How often to poll for updated descriptors 8 | services: 9 | - key: /path/to/private_key # 7s4hxwwifcslrus2.onion 10 | instances: 11 | - address: o6ff73vmigi4oxka # web1 12 | - address: nkz23ai6qesuwqhc # web2 13 | - key: /path/to/private_key.enc # dpkdeys3apjtqydk.onion 14 | instances: 15 | - address: htbzowpp5cn7wj2u # irc1 16 | - address: huey7aiod8dja8a3 # irc2 17 | -------------------------------------------------------------------------------- /onionbalance/data/torrc-instance: -------------------------------------------------------------------------------- 1 | # Tor config for the onion service instance servers 2 | # --- 3 | # The instance servers run standard onion services. In Basic mode the 4 | # control port does not need to be enabled. 5 | 6 | DataDirectory tor-data 7 | 8 | # ControlPort 9051 9 | # CookieAuthentication 1 10 | SocksPort 0 11 | 12 | RunAsDaemon 1 13 | 14 | # Configure each onion service instance with a unique permanent key. 15 | # HiddenServiceDir tor-data/hidden_service/ 16 | # HiddenServicePort 80 127.0.0.1:80 17 | 18 | -------------------------------------------------------------------------------- /onionbalance/data/torrc-server: -------------------------------------------------------------------------------- 1 | # Tor config for the management server 2 | # --- 3 | # The management server must be able to access the Tor control port. 4 | # Alternatively the control port can be enabled on the system Tor process. 5 | 6 | DataDirectory tor-data 7 | 8 | ControlPort 9051 9 | CookieAuthentication 1 10 | SocksPort 0 11 | 12 | RunAsDaemon 1 13 | -------------------------------------------------------------------------------- /onionbalance/descriptor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from future.moves.itertools import zip_longest 3 | import hashlib 4 | import base64 5 | import textwrap 6 | import datetime 7 | import random 8 | import itertools 9 | 10 | import Crypto.Util.number 11 | import stem 12 | 13 | from onionbalance import util 14 | from onionbalance import log 15 | from onionbalance import config 16 | 17 | logger = log.get_logger() 18 | 19 | 20 | class IntroductionPointSet(object): 21 | """ 22 | Select a set of introduction points to included in a HS descriptor. 23 | 24 | Provided with a list of available introduction points for each 25 | backend instance for an onionbalance service. This object will store 26 | the set of available introduction points and allow IPs to be selected 27 | from the available set. 28 | 29 | This class tracks which introduction points have already been provided 30 | and tries to provide the most diverse set of IPs. 31 | """ 32 | 33 | def __init__(self, available_introduction_points): 34 | # Shuffle the introduction point order before selecting IPs. 35 | # Randomizing now allows later calls to .choose() to be 36 | # deterministic. 37 | for instance_intro_points in available_introduction_points: 38 | random.shuffle(instance_intro_points) 39 | random.shuffle(available_introduction_points) 40 | 41 | self.available_intro_points = available_introduction_points 42 | self.intro_point_generator = self.get_intro_point() 43 | 44 | def __len__(self): 45 | """Provide the total number of available introduction points""" 46 | return sum(len(ips) for ips in self.available_intro_points) 47 | 48 | def get_intro_point(self): 49 | """ 50 | Generator function which yields an introduction point 51 | 52 | Iterates through all available introduction points and try 53 | to pick IPs breath first across all backend instances. The 54 | intro point set is wrapped in `itertools.cycle` and will provided 55 | an infinite series of introduction points. 56 | """ 57 | 58 | # Combine intro points from across the backend instances and flatten 59 | intro_points = zip_longest(*self.available_intro_points) 60 | flat_intro_points = itertools.chain.from_iterable(intro_points) 61 | for intro_point in itertools.cycle(flat_intro_points): 62 | if intro_point: 63 | yield intro_point 64 | 65 | def choose(self, count=10, shuffle=True): 66 | """ 67 | Retrieve N introduction points from the set of IPs 68 | 69 | Where more than `count` IPs are available, introduction points are 70 | selected to try and achieve the greatest distribution of introduction 71 | points across all of the available backend instances. 72 | 73 | Return a list of IntroductionPoints. 74 | """ 75 | 76 | # Limit `count` to the available number of IPs to avoid repeats. 77 | count = min(len(self), count) 78 | choosen_ips = list(itertools.islice(self.intro_point_generator, count)) 79 | 80 | if shuffle: 81 | random.shuffle(choosen_ips) 82 | return choosen_ips 83 | 84 | 85 | def generate_service_descriptor(permanent_key, introduction_point_list=None, 86 | replica=0, timestamp=None, deviation=0): 87 | """ 88 | High-level interface for generating a signed HS descriptor 89 | """ 90 | 91 | if not timestamp: 92 | timestamp = datetime.datetime.utcnow() 93 | unix_timestamp = int(timestamp.strftime("%s")) 94 | 95 | permanent_key_block = make_public_key_block(permanent_key) 96 | permanent_id = util.calc_permanent_id(permanent_key) 97 | 98 | # Calculate the current secret-id-part for this hidden service 99 | # Deviation allows the generation of a descriptor for a different time 100 | # period. 101 | time_period = (util.get_time_period(unix_timestamp, permanent_id) + 102 | int(deviation)) 103 | 104 | secret_id_part = util.calc_secret_id_part(time_period, None, replica) 105 | descriptor_id = util.calc_descriptor_id(permanent_id, secret_id_part) 106 | 107 | if not introduction_point_list: 108 | onion_address = util.calc_onion_address(permanent_key) 109 | raise ValueError("No introduction points for service %s.onion." % 110 | onion_address) 111 | 112 | # Generate the introduction point section of the descriptor 113 | intro_section = make_introduction_points_part( 114 | introduction_point_list 115 | ) 116 | 117 | unsigned_descriptor = generate_hs_descriptor_raw( 118 | desc_id_base32=util.base32_encode_str(descriptor_id), 119 | permanent_key_block=permanent_key_block, 120 | secret_id_part_base32=util.base32_encode_str(secret_id_part), 121 | publication_time=util.rounded_timestamp(timestamp), 122 | introduction_points_part=intro_section 123 | ) 124 | 125 | signed_descriptor = sign_descriptor(unsigned_descriptor, permanent_key) 126 | return signed_descriptor 127 | 128 | 129 | def generate_hs_descriptor_raw(desc_id_base32, permanent_key_block, 130 | secret_id_part_base32, publication_time, 131 | introduction_points_part): 132 | """ 133 | Generate hidden service descriptor string 134 | """ 135 | doc = [ 136 | "rendezvous-service-descriptor {}".format(desc_id_base32), 137 | "version 2", 138 | "permanent-key", 139 | permanent_key_block, 140 | "secret-id-part {}".format(secret_id_part_base32), 141 | "publication-time {}".format(publication_time), 142 | "protocol-versions 2,3", 143 | "introduction-points", 144 | introduction_points_part, 145 | "signature\n", 146 | ] 147 | 148 | unsigned_descriptor = '\n'.join(doc) 149 | return unsigned_descriptor 150 | 151 | 152 | def make_introduction_points_part(introduction_point_list=None): 153 | """ 154 | Make introduction point block from list of IntroductionPoint objects 155 | """ 156 | 157 | # If no intro points were specified, we should create an empty list 158 | if not introduction_point_list: 159 | introduction_point_list = [] 160 | 161 | intro = [] 162 | for intro_point in introduction_point_list: 163 | intro.append("introduction-point {}".format(intro_point.identifier)) 164 | intro.append("ip-address {}".format(intro_point.address)) 165 | intro.append("onion-port {}".format(intro_point.port)) 166 | intro.append("onion-key") 167 | intro.append(intro_point.onion_key) 168 | intro.append("service-key") 169 | intro.append(intro_point.service_key) 170 | 171 | intro_section = '\n'.join(intro).encode('utf-8') 172 | intro_section_base64 = base64.b64encode(intro_section).decode('utf-8') 173 | intro_section_base64 = textwrap.fill(intro_section_base64, 64) 174 | 175 | # Add the header and footer: 176 | intro_points_with_headers = '\n'.join([ 177 | '-----BEGIN MESSAGE-----', 178 | intro_section_base64, 179 | '-----END MESSAGE-----']) 180 | return intro_points_with_headers 181 | 182 | 183 | def make_public_key_block(key): 184 | """ 185 | Get ASN.1 representation of public key, base64 and add headers 186 | """ 187 | asn1_pub = util.get_asn1_sequence(key) 188 | pub_base64 = base64.b64encode(asn1_pub).decode('utf-8') 189 | pub_base64 = textwrap.fill(pub_base64, 64) 190 | 191 | # Add the header and footer: 192 | pub_with_headers = '\n'.join([ 193 | '-----BEGIN RSA PUBLIC KEY-----', 194 | pub_base64, 195 | '-----END RSA PUBLIC KEY-----']) 196 | return pub_with_headers 197 | 198 | 199 | def sign_digest(digest, private_key): 200 | """ 201 | Sign, base64 encode, wrap and add Tor signature headers 202 | 203 | The message digest is PKCS1 padded without the optional 204 | algorithmIdentifier section. 205 | """ 206 | 207 | digest = util.add_pkcs1_padding(digest) 208 | (signature_long, ) = private_key.sign(digest, None) 209 | signature_bytes = Crypto.Util.number.long_to_bytes(signature_long, 128) 210 | signature_base64 = base64.b64encode(signature_bytes).decode('utf-8') 211 | signature_base64 = textwrap.fill(signature_base64, 64) 212 | 213 | # Add the header and footer: 214 | signature_with_headers = '\n'.join([ 215 | '-----BEGIN SIGNATURE-----', 216 | signature_base64, 217 | '-----END SIGNATURE-----']) 218 | return signature_with_headers 219 | 220 | 221 | def sign_descriptor(descriptor, service_key): 222 | """ 223 | Sign or resign a provided hidden service descriptor 224 | """ 225 | token_descriptor_signature = '\nsignature\n' 226 | 227 | # Remove signature block if it exists 228 | if token_descriptor_signature in descriptor: 229 | descriptor = descriptor[:descriptor.find(token_descriptor_signature) + 230 | len(token_descriptor_signature)] 231 | else: 232 | descriptor = descriptor.strip() + token_descriptor_signature 233 | 234 | descriptor_digest = hashlib.sha1(descriptor.encode('utf-8')).digest() 235 | signature_with_headers = sign_digest(descriptor_digest, service_key) 236 | return descriptor + signature_with_headers 237 | 238 | 239 | def descriptor_received(descriptor_content): 240 | """ 241 | Process onion service descriptors retrieved from the HSDir system or 242 | received directly over the metadata channel. 243 | """ 244 | 245 | try: 246 | parsed_descriptor = stem.descriptor.hidden_service_descriptor.\ 247 | HiddenServiceDescriptor(descriptor_content, validate=True) 248 | except ValueError: 249 | logger.exception("Received an invalid service descriptor.") 250 | return None 251 | 252 | # Ensure the received descriptor matches the requested descriptor 253 | permanent_key = Crypto.PublicKey.RSA.importKey( 254 | parsed_descriptor.permanent_key) 255 | descriptor_onion_address = util.calc_onion_address(permanent_key) 256 | 257 | known_descriptor, instance_changed = False, False 258 | for instance in [instance for service in config.services for 259 | instance in service.instances]: 260 | if instance.onion_address == descriptor_onion_address: 261 | instance_changed |= instance.update_descriptor(parsed_descriptor) 262 | known_descriptor = True 263 | 264 | if instance_changed: 265 | logger.info("The introduction point set has changed for instance " 266 | "%s.onion.", descriptor_onion_address) 267 | 268 | if not known_descriptor: 269 | # No matching service instance was found for the descriptor 270 | logger.debug("Received a descriptor for an unknown service:\n%s", 271 | descriptor_content.decode('utf-8')) 272 | logger.warning("Received a descriptor with address %s.onion that " 273 | "did not match any configured service instances.", 274 | descriptor_onion_address) 275 | 276 | return None 277 | 278 | 279 | def upload_descriptor(controller, signed_descriptor, hsdirs=None): 280 | """ 281 | Upload descriptor via the Tor control port 282 | 283 | If no HSDirs are specified, Tor will upload to what it thinks are the 284 | responsible directories 285 | """ 286 | logger.debug("Beginning service descriptor upload.") 287 | 288 | # Provide server fingerprints to control command if HSDirs are specified. 289 | if hsdirs: 290 | server_args = ' '.join([("SERVER={}".format(hsdir)) 291 | for hsdir in hsdirs]) 292 | else: 293 | server_args = "" 294 | 295 | # Stem will insert the leading + and trailing '\r\n.\r\n' 296 | response = controller.msg("HSPOST %s\n%s" % 297 | (server_args, signed_descriptor)) 298 | 299 | (response_code, divider, response_content) = response.content()[0] 300 | if not response.is_ok(): 301 | if response_code == "552": 302 | raise stem.InvalidRequest(response_code, response_content) 303 | else: 304 | raise stem.ProtocolError("HSPOST returned unexpected response " 305 | "code: %s\n%s" % (response_code, 306 | response_content)) 307 | -------------------------------------------------------------------------------- /onionbalance/eventhandler.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from builtins import str, object 3 | import logging 4 | import signal 5 | import sys 6 | 7 | import stem 8 | 9 | from onionbalance import log 10 | from onionbalance import descriptor 11 | from onionbalance import consensus 12 | 13 | logger = log.get_logger() 14 | 15 | 16 | class EventHandler(object): 17 | 18 | """ 19 | Handles asynchronous Tor events. 20 | """ 21 | 22 | @staticmethod 23 | def new_status(status_event): 24 | """ 25 | Parse Tor status events such as "STATUS_GENERAL" 26 | """ 27 | # pylint: disable=no-member 28 | if status_event.status_type == stem.StatusType.GENERAL: 29 | if status_event.action == "CONSENSUS_ARRIVED": 30 | # Update the local view of the consensus in OnionBalance 31 | try: 32 | consensus.refresh_consensus() 33 | except Exception: 34 | logger.exception("An unexpected exception occured in the " 35 | "when processing the consensus update " 36 | "callback.") 37 | 38 | @staticmethod 39 | def new_desc(desc_event): 40 | """ 41 | Parse HS_DESC response events 42 | """ 43 | logger.debug("Received new HS_DESC event: %s", str(desc_event)) 44 | 45 | @staticmethod 46 | def new_desc_content(desc_content_event): 47 | """ 48 | Parse HS_DESC_CONTENT response events for descriptor content 49 | 50 | Update the HS instance object with the data from the new descriptor. 51 | """ 52 | logger.debug("Received new HS_DESC_CONTENT event for %s.onion", 53 | desc_content_event.address) 54 | 55 | # Check that the HSDir returned a descriptor that is not empty 56 | descriptor_text = str(desc_content_event.descriptor).encode('utf-8') 57 | 58 | # HSDirs provide a HS_DESC_CONTENT response with either one or two 59 | # CRLF lines when they do not have a matching descriptor. Using 60 | # len() < 5 should ensure all empty HS_DESC_CONTENT events are matched. 61 | if len(descriptor_text) < 5: 62 | logger.debug("Empty descriptor received for %s.onion", 63 | desc_content_event.address) 64 | return None 65 | 66 | # Send content to callback function which will process the descriptor 67 | try: 68 | descriptor.descriptor_received(descriptor_text) 69 | except Exception: 70 | logger.exception("An unexpected exception occured in the " 71 | "new descriptor callback.") 72 | 73 | return None 74 | 75 | 76 | class SignalHandler(object): 77 | """ 78 | Handle signals sent to the OnionBalance daemon process 79 | """ 80 | 81 | def __init__(self, controller, status_socket): 82 | """ 83 | Setup signal handler 84 | """ 85 | self._tor_controller = controller 86 | self._status_socket = status_socket 87 | 88 | # Register signal handlers 89 | signal.signal(signal.SIGTERM, self._handle_sigint_sigterm) 90 | signal.signal(signal.SIGINT, self._handle_sigint_sigterm) 91 | 92 | def _handle_sigint_sigterm(self, signum, frame): 93 | """ 94 | Handle SIGINT (Ctrl-C) and SIGTERM 95 | 96 | Disconnect from control port and cleanup the status socket 97 | """ 98 | logger.info("Signal %d received, exiting", signum) 99 | self._tor_controller.close() 100 | self._status_socket.close() 101 | logging.shutdown() 102 | sys.exit(0) 103 | -------------------------------------------------------------------------------- /onionbalance/instance.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import datetime 3 | import time 4 | 5 | import stem.control 6 | 7 | from onionbalance import log 8 | from onionbalance import config 9 | from onionbalance import util 10 | 11 | logger = log.get_logger() 12 | 13 | 14 | def fetch_instance_descriptors(controller): 15 | """ 16 | Try fetch fresh descriptors for all HS instances 17 | """ 18 | logger.info("Initiating fetch of descriptors for all service instances.") 19 | 20 | # pylint: disable=no-member 21 | 22 | while True: 23 | try: 24 | # Clear Tor descriptor cache before making fetches by sending 25 | # the NEWNYM singal 26 | controller.signal(stem.control.Signal.NEWNYM) 27 | time.sleep(5) # Sleep to allow Tor time to build new circuits 28 | except stem.SocketClosed: 29 | logger.error("Failed to send NEWNYM signal, socket is closed.") 30 | util.reauthenticate(controller, logger) 31 | else: 32 | break 33 | 34 | unique_instances = set(instance for service in config.services 35 | for instance in service.instances) 36 | 37 | # Only try to retrieve the descriptor once for each unique instance 38 | # address. An instance may be configured under multiple master 39 | # addressed. We do not want to request the same instance descriptor 40 | # multiple times. 41 | # OnionBalance will update all of the matching instances when a 42 | # descriptor is received. 43 | for instance in unique_instances: 44 | while True: 45 | try: 46 | instance.fetch_descriptor() 47 | except stem.SocketClosed: 48 | logger.error("Failed to fecth descriptor, socket " 49 | "is closed") 50 | util.reauthenticate(controller, logger) 51 | else: 52 | break 53 | 54 | 55 | class Instance(object): 56 | """ 57 | Instance represents a back-end load balancing hidden service. 58 | """ 59 | 60 | def __init__(self, controller, onion_address, authentication_cookie=None): 61 | """ 62 | Initialise an Instance object. 63 | """ 64 | self.controller = controller 65 | 66 | # Onion address for the service instance. 67 | if onion_address: 68 | onion_address = onion_address.replace('.onion', '') 69 | self.onion_address = onion_address 70 | self.authentication_cookie = authentication_cookie 71 | 72 | # Store the latest set of introduction points for this instance 73 | self.introduction_points = [] 74 | 75 | # Timestamp when last received a descriptor for this instance 76 | self.received = None 77 | 78 | # Timestamp of the currently loaded descriptor 79 | self.timestamp = None 80 | 81 | # Flag this instance with its introduction points change. A new 82 | # master descriptor will then be published as the introduction 83 | # points have changed. 84 | self.changed_since_published = False 85 | 86 | def fetch_descriptor(self): 87 | """ 88 | Try fetch a fresh descriptor for this service instance from the HSDirs 89 | """ 90 | logger.debug("Trying to fetch a descriptor for instance %s.onion.", 91 | self.onion_address) 92 | try: 93 | self.controller.get_hidden_service_descriptor(self.onion_address, 94 | await_result=False) 95 | except stem.SocketClosed: 96 | # Tor maybe restarting. 97 | raise 98 | except stem.DescriptorUnavailable: 99 | # Could not find the descriptor on the HSDir 100 | self.received = None 101 | logger.warning("No descriptor received for instance %s.onion, " 102 | "the instance may be offline.", self.onion_address) 103 | 104 | def update_descriptor(self, parsed_descriptor): 105 | """ 106 | Update introduction points when a new HS descriptor is received 107 | 108 | Parse the descriptor content and update the set of introduction 109 | points for this HS instance. Returns True if the introduction 110 | point set has changed, False otherwise.` 111 | """ 112 | 113 | self.received = datetime.datetime.utcnow() 114 | 115 | logger.debug("Received a descriptor for instance %s.onion.", 116 | self.onion_address) 117 | 118 | # Reject descriptor if its timestamp is older than the current 119 | # descriptor. Prevents HSDirs from replaying old, expired 120 | # descriptors. 121 | if self.timestamp and parsed_descriptor.published < self.timestamp: 122 | logger.error("Received descriptor for instance %s.onion with " 123 | "publication timestamp (%s) older than the latest " 124 | "descriptor (%s). Ignoring the descriptor.", 125 | self.onion_address, 126 | parsed_descriptor.published, 127 | self.timestamp) 128 | return False 129 | else: 130 | self.timestamp = parsed_descriptor.published 131 | 132 | # Parse the introduction point list, decrypting if necessary 133 | introduction_points = parsed_descriptor.introduction_points( 134 | authentication_cookie=self.authentication_cookie 135 | ) 136 | 137 | # If the new introduction points are different, flag this instance 138 | # as modified. Compare the set of introduction point identifiers 139 | # (fingerprint of the per IP circuit service key). 140 | if (set(ip.identifier for ip in introduction_points) != 141 | set(ip.identifier for ip in self.introduction_points)): 142 | self.changed_since_published = True 143 | self.introduction_points = introduction_points 144 | return True 145 | 146 | else: 147 | logger.debug("Introduction points for instance %s.onion matched " 148 | "the cached set.", self.onion_address) 149 | return False 150 | 151 | def __eq__(self, other): 152 | """ 153 | Instance objects are equal if they have the same onion address. 154 | """ 155 | if isinstance(other, Instance): 156 | return self.onion_address == other.onion_address 157 | else: 158 | return False 159 | 160 | def __hash__(self): 161 | """ 162 | Define __hash__ method allowing for set comparison between instances. 163 | """ 164 | return hash(self.onion_address) 165 | -------------------------------------------------------------------------------- /onionbalance/log.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import logging 3 | import logging.handlers 4 | 5 | handler = logging.StreamHandler() 6 | handler.setFormatter(logging.Formatter(fmt="%(asctime)s [%(levelname)s]: " 7 | "%(message)s")) 8 | 9 | logger = logging.getLogger("onionbalance") 10 | logger.addHandler(handler) 11 | logger.setLevel(logging.DEBUG) 12 | 13 | 14 | def get_logger(): 15 | """ 16 | Returns a logger. 17 | """ 18 | return logger 19 | 20 | 21 | def setup_file_logger(log_file): 22 | """ 23 | Add log file handler to the existing logger 24 | """ 25 | handler = logging.handlers.RotatingFileHandler( 26 | log_file, maxBytes=10485760, backupCount=3) 27 | handler.setFormatter(logging.Formatter(fmt="%(asctime)s [%(levelname)s]: " 28 | "%(message)s")) 29 | logging.getLogger('onionbalance').addHandler(handler) 30 | 31 | 32 | def get_config_generator_logger(): 33 | """ 34 | Simplified logger for interactive config generator CLI 35 | """ 36 | handler = logging.StreamHandler() 37 | handler.setFormatter(logging.Formatter(fmt="[%(levelname)s]: " 38 | "%(message)s")) 39 | 40 | logger = logging.getLogger("onionbalance-config") 41 | logger.addHandler(handler) 42 | logger.setLevel(logging.INFO) 43 | return logger 44 | -------------------------------------------------------------------------------- /onionbalance/manager.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Load balance a hidden service across multiple (remote) Tor instances by 4 | create a hidden service descriptor containing introduction points from 5 | each instance. 6 | """ 7 | import os 8 | import sys 9 | import argparse 10 | import logging 11 | 12 | # import Crypto.PublicKey 13 | import stem 14 | from stem.control import Controller, EventType 15 | from setproctitle import setproctitle # pylint: disable=no-name-in-module 16 | 17 | import onionbalance 18 | from onionbalance import log 19 | from onionbalance import settings 20 | from onionbalance import config 21 | from onionbalance import eventhandler 22 | from onionbalance import status 23 | from onionbalance import scheduler 24 | 25 | from onionbalance.service import publish_all_descriptors 26 | from onionbalance.instance import fetch_instance_descriptors 27 | 28 | logger = log.get_logger() 29 | 30 | 31 | def parse_cmd_args(): 32 | """ 33 | Parses and returns command line arguments. 34 | """ 35 | 36 | parser = argparse.ArgumentParser( 37 | description="onionbalance distributes the requests for a Tor hidden " 38 | "services across multiple Tor instances.") 39 | 40 | parser.add_argument("-i", "--ip", type=str, default=None, 41 | help="Tor controller IP address") 42 | 43 | parser.add_argument("-p", "--port", type=int, default=None, 44 | help="Tor controller port") 45 | 46 | parser.add_argument("-s", "--socket", type=str, default=None, 47 | help="Tor unix domain control socket location") 48 | 49 | parser.add_argument("-c", "--config", type=str, 50 | default=os.environ.get('ONIONBALANCE_CONFIG', 51 | "config.yaml"), 52 | help="Config file location") 53 | 54 | parser.add_argument("-v", "--verbosity", type=str, default=None, 55 | help="Minimum verbosity level for logging. Available " 56 | "in ascending order: debug, info, warning, " 57 | "error, critical). The default is info.") 58 | 59 | parser.add_argument('--version', action='version', 60 | version='onionbalance %s' % onionbalance.__version__) 61 | 62 | return parser 63 | 64 | 65 | def main(): 66 | """ 67 | Entry point when invoked over the command line. 68 | """ 69 | setproctitle('onionbalance') 70 | args = parse_cmd_args().parse_args() 71 | config_file_options = settings.parse_config_file(args.config) 72 | 73 | # Update global configuration with options specified in the config file 74 | for setting in dir(config): 75 | if setting.isupper() and config_file_options.get(setting): 76 | setattr(config, setting, config_file_options.get(setting)) 77 | 78 | # Override the log level if specified on the command line. 79 | if args.verbosity: 80 | config.LOG_LEVEL = args.verbosity.upper() 81 | 82 | # Write log file if configured in environment variable or config file 83 | if config.LOG_LOCATION: 84 | log.setup_file_logger(config.LOG_LOCATION) 85 | 86 | logger.setLevel(logging.__dict__[config.LOG_LEVEL.upper()]) 87 | 88 | # Create a connection to the Tor unix domain control socket or control port 89 | try: 90 | tor_socket = (args.socket or config.TOR_CONTROL_SOCKET) 91 | tor_address = (args.ip or config.TOR_ADDRESS) 92 | tor_port = (args.port or config.TOR_PORT) 93 | try: 94 | controller = Controller.from_socket_file(path=tor_socket) 95 | logger.debug("Successfully connected to the Tor control socket " 96 | "%s.", tor_socket) 97 | except stem.SocketError: 98 | logger.debug("Unable to connect to the Tor control socket %s.", 99 | tor_socket) 100 | controller = Controller.from_port(address=tor_address, 101 | port=tor_port) 102 | logger.debug("Successfully connected to the Tor control port.") 103 | except stem.SocketError as exc: 104 | logger.error("Unable to connect to Tor control socket or port: %s", 105 | exc) 106 | sys.exit(1) 107 | 108 | try: 109 | controller.authenticate(password=config.TOR_CONTROL_PASSWORD) 110 | except stem.connection.AuthenticationFailure as exc: 111 | logger.error("Unable to authenticate on the Tor control connection: " 112 | "%s", exc) 113 | sys.exit(1) 114 | else: 115 | logger.debug("Successfully authenticated on the Tor control " 116 | "connection.") 117 | 118 | status_socket = status.StatusSocket(config.STATUS_SOCKET_LOCATION) 119 | eventhandler.SignalHandler(controller, status_socket) 120 | 121 | # Disable no-member due to bug with "Instance of 'Enum' has no * member" 122 | # pylint: disable=no-member 123 | 124 | # Check that the Tor client supports the HSPOST control port command 125 | if not controller.get_version() >= stem.version.Requirement.HSPOST: 126 | logger.error("A Tor version >= %s is required. You may need to " 127 | "compile Tor from source or install a package from " 128 | "the experimental Tor repository.", 129 | stem.version.Requirement.HSPOST) 130 | sys.exit(1) 131 | 132 | # Load the keys and config for each onion service 133 | settings.initialize_services(controller, 134 | config_file_options.get('services')) 135 | 136 | # Finished parsing all the config file. 137 | 138 | handler = eventhandler.EventHandler() 139 | controller.add_event_listener(handler.new_status, 140 | EventType.STATUS_GENERAL) 141 | controller.add_event_listener(handler.new_desc, 142 | EventType.HS_DESC) 143 | controller.add_event_listener(handler.new_desc_content, 144 | EventType.HS_DESC_CONTENT) 145 | 146 | # Schedule descriptor fetch and upload events 147 | scheduler.add_job(config.REFRESH_INTERVAL, fetch_instance_descriptors, 148 | controller) 149 | scheduler.add_job(config.PUBLISH_CHECK_INTERVAL, publish_all_descriptors) 150 | 151 | # Run initial fetch of HS instance descriptors 152 | scheduler.run_all(delay_seconds=config.INITIAL_DELAY) 153 | 154 | # Begin main loop to poll for HS descriptors 155 | scheduler.run_forever() 156 | 157 | return 0 158 | -------------------------------------------------------------------------------- /onionbalance/scheduler.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Simple scheduler for running jobs at regular intervals 4 | """ 5 | import functools 6 | import time 7 | 8 | from onionbalance import log 9 | 10 | logger = log.get_logger() 11 | 12 | jobs = [] 13 | 14 | 15 | class Job(object): 16 | """ 17 | Object to represent a scheduled job task 18 | """ 19 | 20 | def __init__(self, interval, job_func, *job_args, **job_kwargs): 21 | self.interval = interval 22 | self.planned_run_time = time.time() 23 | 24 | # Configure the job function and calling arguments 25 | self.job_func = functools.partial(job_func, *job_args, **job_kwargs) 26 | functools.update_wrapper(self.job_func, job_func) 27 | 28 | def __lt__(self, other): 29 | """ 30 | Jobs are sorted based on their next scheduled run time 31 | """ 32 | return self.planned_run_time < other.planned_run_time 33 | 34 | @property 35 | def should_run(self): 36 | """ 37 | Check if the job should be run now 38 | """ 39 | return self.planned_run_time <= time.time() 40 | 41 | def run(self, override_run_time=None): 42 | """ 43 | Run job then reschedule it in the job list 44 | """ 45 | logger.debug("Running {}".format(self)) 46 | ret = self.job_func() 47 | 48 | # Pretend the job was scheduled now, if we ran it early with run_all() 49 | if override_run_time: 50 | self.planned_run_time = time.time() 51 | self.planned_run_time += self.interval 52 | 53 | return ret 54 | 55 | def __repr__(self): 56 | """ 57 | Return human readable representation of the Job and arguments 58 | """ 59 | args = [repr(x) for x in self.job_func.args] 60 | kwargs = ["{}={}".format(k, repr(v)) for 61 | k, v in self.job_func.keywords.items()] 62 | return "{}({})".format(self.job_func.__name__, 63 | ', '.join(args + kwargs)) 64 | 65 | 66 | def add_job(interval, function, *job_args, **job_kwargs): 67 | """ 68 | Add a job to be executed at regular intervals 69 | 70 | The `interval` value is in seconds, starting from now. 71 | """ 72 | job = Job(interval, function, *job_args, **job_kwargs) 73 | jobs.append(job) 74 | 75 | 76 | def _run_job(job, override_run_time=False): 77 | """ 78 | Run a job and put it back in the job queue 79 | """ 80 | return job.run(override_run_time) 81 | 82 | 83 | def run_all(delay_seconds=0): 84 | """ 85 | Run all jobs at `delay_seconds` regardless of their schedule 86 | """ 87 | for job in jobs: 88 | _run_job(job, override_run_time=True) 89 | time.sleep(delay_seconds) 90 | 91 | 92 | def run_forever(check_interval=1): 93 | """ 94 | Run jobs forever 95 | """ 96 | while True: 97 | if not jobs: 98 | logger.error("No scheduled jobs found, scheduler exiting.") 99 | return None 100 | 101 | jobs_to_run = (job for job in jobs if job.should_run) 102 | for job in sorted(jobs_to_run): 103 | _run_job(job) 104 | 105 | time.sleep(check_interval) 106 | -------------------------------------------------------------------------------- /onionbalance/service.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import datetime 3 | import time 4 | import base64 5 | 6 | import Crypto.PublicKey.RSA 7 | import stem 8 | 9 | from onionbalance import descriptor 10 | from onionbalance import util 11 | from onionbalance import log 12 | from onionbalance import config 13 | from onionbalance import consensus 14 | 15 | logger = log.get_logger() 16 | 17 | 18 | def publish_all_descriptors(): 19 | """ 20 | Called periodically to upload new super-descriptors if needed 21 | 22 | .. todo:: Publishing descriptors for different services at the same time 23 | will leak that they are related. Descriptors should 24 | be published individually at a random interval to avoid 25 | correlation. 26 | """ 27 | logger.debug("Checking if any master descriptors should be published.") 28 | for service in config.services: 29 | service.descriptor_publish() 30 | 31 | 32 | class Service(object): 33 | """ 34 | Service represents a front-facing hidden service which should 35 | be load-balanced. 36 | """ 37 | 38 | def __init__(self, controller, service_key=None, instances=None): 39 | """ 40 | Initialise a HiddenService object. 41 | """ 42 | self.controller = controller 43 | 44 | # Service key must be a valid PyCrypto RSA key object 45 | if isinstance(service_key, Crypto.PublicKey.RSA._RSAobj): 46 | self.service_key = service_key 47 | else: 48 | raise ValueError("Service key is not a valid RSA object.") 49 | 50 | # List of instances for this onion service 51 | if not instances: 52 | instances = [] 53 | self.instances = instances 54 | 55 | # Calculate the onion address for this service 56 | self.onion_address = util.calc_onion_address(self.service_key) 57 | 58 | # Timestamp when this descriptor was last attempted 59 | self.uploaded = None 60 | 61 | def _intro_points_modified(self): 62 | """ 63 | Check if the introduction point set has changed since last 64 | publish. 65 | """ 66 | return any(instance.changed_since_published 67 | for instance in self.instances) 68 | 69 | def _descriptor_not_uploaded_recently(self): 70 | """ 71 | Check if the master descriptor hasn't been uploaded recently 72 | """ 73 | if not self.uploaded: 74 | # Descriptor never uploaded 75 | return True 76 | 77 | descriptor_age = (datetime.datetime.utcnow() - self.uploaded) 78 | if (descriptor_age.total_seconds() > config.DESCRIPTOR_UPLOAD_PERIOD): 79 | return True 80 | else: 81 | return False 82 | 83 | def _descriptor_id_changing_soon(self): 84 | """ 85 | If the descriptor ID will change soon, upload under both descriptor IDs 86 | """ 87 | permanent_id = base64.b32decode(self.onion_address, 1) 88 | seconds_valid = util.get_seconds_valid(time.time(), permanent_id) 89 | 90 | # Check if descriptor ID will be changing within the overlap period. 91 | if seconds_valid < config.DESCRIPTOR_OVERLAP_PERIOD: 92 | return True 93 | else: 94 | return False 95 | 96 | def _select_introduction_points(self): 97 | """ 98 | Choose set of introduction points from all fresh descriptors 99 | 100 | Returns a descriptor.IntroductionPointSet() which can be used to 101 | choose introduction points. 102 | """ 103 | available_intro_points = [] 104 | 105 | # Loop through each instance and determine fresh intro points 106 | for instance in self.instances: 107 | if not instance.received: 108 | logger.info("No descriptor received for instance %s.onion " 109 | "yet.", instance.onion_address) 110 | continue 111 | 112 | # The instance may be offline if no descriptor has been received 113 | # for it recently or if the received descriptor's timestamp is 114 | # too old 115 | received_age = datetime.datetime.utcnow() - instance.received 116 | timestamp_age = datetime.datetime.utcnow() - instance.timestamp 117 | received_age = received_age.total_seconds() 118 | timestamp_age = timestamp_age.total_seconds() 119 | 120 | if received_age > config.DESCRIPTOR_UPLOAD_PERIOD: 121 | logger.info("Our descriptor for instance %s.onion " 122 | "was received too long ago (%d). " 123 | "The instance may be offline. Its introduction " 124 | "points will not be included in the master " 125 | "descriptor.", 126 | instance.onion_address, 127 | received_age) 128 | continue 129 | elif timestamp_age > (4 * 60 * 60): 130 | logger.info("Our descriptor for instance %s.onion " 131 | "has an old timestamp (%d). " 132 | "The instance may be offline. Its introduction " 133 | "points will not be included in the master " 134 | "descriptor.", 135 | instance.onion_address, 136 | timestamp_age) 137 | continue 138 | else: 139 | # Include this instance's introduction points 140 | instance.changed_since_published = False 141 | available_intro_points.append(instance.introduction_points) 142 | 143 | return descriptor.IntroductionPointSet(available_intro_points) 144 | 145 | def _publish_descriptor(self, deviation=0): 146 | """ 147 | Create, sign and upload master descriptors for this service 148 | """ 149 | 150 | # Retrieve the set of available introduction points 151 | intro_point_set = self._select_introduction_points() 152 | max_intro_points = config.MAX_INTRO_POINTS 153 | 154 | # Upload multiple unique descriptors which contain different 155 | # subsets of the available introduction points. 156 | # (https://github.com/DonnchaC/onionbalance/issues/16) 157 | distinct_descriptors = config.DISTINCT_DESCRIPTORS 158 | 159 | # If we have <= MAX_INTRO_POINTS we should choose the introduction 160 | # points now and use the same set in every descriptor. Using the 161 | # same set of introduction points will look more like a standard 162 | # Tor client. 163 | num_intro_points = len(intro_point_set) 164 | 165 | if num_intro_points <= max_intro_points: 166 | intro_points = intro_point_set.choose(num_intro_points) 167 | logger.debug("We have %d IPs, not using distinct descriptors.", 168 | len(intro_point_set)) 169 | distinct_descriptors = False 170 | 171 | for replica in range(0, config.REPLICAS): 172 | # Using distinct descriptors, choose a new set of intro points 173 | # for each descriptor and upload it to individual HSDirs. 174 | if distinct_descriptors: 175 | descriptor_id = util.calc_descriptor_id_b32( 176 | self.onion_address, 177 | time=time.time(), 178 | replica=replica, 179 | deviation=deviation, 180 | ) 181 | responsible_hsdirs = consensus.get_hsdirs(descriptor_id) 182 | 183 | for hsdir in responsible_hsdirs: 184 | intro_points = intro_point_set.choose(max_intro_points) 185 | try: 186 | signed_descriptor = ( 187 | descriptor.generate_service_descriptor( 188 | self.service_key, 189 | introduction_point_list=intro_points, 190 | replica=replica, 191 | deviation=deviation 192 | )) 193 | except ValueError as exc: 194 | logger.warning("Error generating descriptor: %s", exc) 195 | continue 196 | 197 | # Signed descriptor was generated successfully, upload it 198 | # to the respective HSDir 199 | self._upload_descriptor(signed_descriptor, replica, 200 | hsdirs=hsdir) 201 | logger.info("Published distinct master descriptors for " 202 | "service %s.onion under replica %d.", 203 | self.onion_address, replica) 204 | 205 | else: 206 | # Not using distinct descriptors, upload one descriptor 207 | # under each replica and let Tor pick the HSDirs. 208 | try: 209 | signed_descriptor = descriptor.generate_service_descriptor( 210 | self.service_key, 211 | introduction_point_list=intro_points, 212 | replica=replica, 213 | deviation=deviation 214 | ) 215 | except ValueError as exc: 216 | logger.warning("Error generating descriptor: %s", exc) 217 | continue 218 | 219 | # Signed descriptor was generated successfully, upload it 220 | self._upload_descriptor(signed_descriptor, replica) 221 | logger.info("Published a descriptor for service %s.onion " 222 | "under replica %d.", self.onion_address, replica) 223 | 224 | # It would be better to set last_uploaded when an upload succeeds and 225 | # not when an upload is just attempted. Unfortunately the HS_DESC # 226 | # UPLOADED event does not provide information about the service and 227 | # so it can't be used to determine when descriptor upload succeeds 228 | self.uploaded = datetime.datetime.utcnow() 229 | 230 | def _upload_descriptor(self, signed_descriptor, replica, hsdirs=None): 231 | """ 232 | Convenience method to upload a descriptor 233 | Handle some error checking and logging inside the Service class 234 | """ 235 | if hsdirs and not isinstance(hsdirs, list): 236 | hsdirs = [hsdirs] 237 | 238 | while True: 239 | try: 240 | descriptor.upload_descriptor(self.controller, 241 | signed_descriptor, 242 | hsdirs=hsdirs) 243 | break 244 | except stem.SocketClosed: 245 | logger.error("Error uploading descriptor for service " 246 | "%s.onion, Socket is closed.", 247 | self.onion_address) 248 | util.reauthenticate(self.controller, logger) 249 | except stem.ControllerError: 250 | logger.exception("Error uploading descriptor for service " 251 | "%s.onion.", self.onion_address) 252 | break 253 | 254 | def descriptor_publish(self, force_publish=False): 255 | """ 256 | Publish descriptor if have new IPs or if descriptor has expired 257 | """ 258 | 259 | # A descriptor should be published if any of the following conditions 260 | # are True 261 | if any([self._intro_points_modified(), # If any IPs have changed 262 | self._descriptor_not_uploaded_recently(), 263 | force_publish]): 264 | 265 | logger.debug("Publishing a descriptor for service %s.onion.", 266 | self.onion_address) 267 | self._publish_descriptor() 268 | 269 | # If the descriptor ID will change soon, need to upload under 270 | # the new ID too. 271 | if self._descriptor_id_changing_soon(): 272 | logger.info("Publishing a descriptor for service %s.onion " 273 | "under next descriptor ID.", self.onion_address) 274 | self._publish_descriptor(deviation=1) 275 | 276 | else: 277 | logger.debug("Not publishing a new descriptor for service " 278 | "%s.onion.", self.onion_address) 279 | -------------------------------------------------------------------------------- /onionbalance/settings.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | Implements the generation and loading of configuration files. 5 | """ 6 | from builtins import input, range 7 | import os 8 | import sys 9 | import errno 10 | import argparse 11 | import getpass 12 | import logging 13 | import pkg_resources 14 | 15 | import yaml 16 | import Crypto.PublicKey 17 | 18 | from onionbalance import config 19 | from onionbalance import util 20 | from onionbalance import log 21 | 22 | import onionbalance.service 23 | import onionbalance.instance 24 | 25 | logger = log.get_logger() 26 | 27 | 28 | def parse_config_file(config_file): 29 | """ 30 | Parse config file containing service information 31 | """ 32 | config_path = os.path.abspath(config_file) 33 | if os.path.exists(config_path): 34 | with open(config_file, 'r') as handle: 35 | config_data = yaml.load(handle.read()) 36 | logger.info("Loaded the config file '%s'.", config_path) 37 | else: 38 | logger.error("The specified config file '%s' does not exist. The " 39 | "onionbalance-config tool can generate the required " 40 | "keys and config files.", config_path) 41 | sys.exit(1) 42 | 43 | # Rewrite relative paths in the config to be relative to the config 44 | # file directory 45 | config_directory = os.path.dirname(config_path) 46 | for service in config_data.get('services'): 47 | if not os.path.isabs(service.get('key')): 48 | service['key'] = os.path.join(config_directory, service['key']) 49 | 50 | return config_data 51 | 52 | 53 | def initialize_services(controller, services_config): 54 | """ 55 | Load keys for services listed in the config 56 | """ 57 | 58 | # Load the keys and config for each onion service 59 | for service in services_config: 60 | try: 61 | service_key = util.key_decrypt_prompt(service.get("key")) 62 | except (IOError, OSError) as e: 63 | if e.errno == errno.ENOENT: 64 | logger.error("Private key file %s could not be found. " 65 | "Relative paths in the config file are loaded " 66 | "relative to the config file directory.", 67 | service.get("key")) 68 | sys.exit(1) 69 | elif e.errno == errno.EACCES: 70 | logger.error("Permission denied to private key %s.", 71 | service.get("key")) 72 | sys.exit(1) 73 | else: 74 | raise 75 | # Key file was read but a valid private key was not found. 76 | if not service_key: 77 | logger.error("Private key %s could not be loaded. It is a not " 78 | "valid 1024 bit PEM encoded RSA private key", 79 | service.get("key")) 80 | sys.exit(1) 81 | else: 82 | # Successfully imported the private key 83 | onion_address = util.calc_onion_address(service_key) 84 | logger.debug("Loaded private key for service %s.onion.", 85 | onion_address) 86 | 87 | # Load all instances for the current onion service 88 | instance_config = service.get("instances", []) 89 | if not instance_config: 90 | logger.error("Could not load any instances for service " 91 | "%s.onion.", onion_address) 92 | sys.exit(1) 93 | else: 94 | instances = [] 95 | for instance in instance_config: 96 | instances.append(onionbalance.instance.Instance( 97 | controller=controller, 98 | onion_address=instance.get("address"), 99 | authentication_cookie=instance.get("auth") 100 | )) 101 | 102 | logger.info("Loaded %d instances for service %s.onion.", 103 | len(instances), onion_address) 104 | 105 | # Store service configuration in config.services global 106 | config.services.append(onionbalance.service.Service( 107 | controller=controller, 108 | service_key=service_key, 109 | instances=instances 110 | )) 111 | 112 | # Store a global reference to current controller connection 113 | config.controller = controller 114 | 115 | 116 | def parse_cmd_args(): 117 | """ 118 | Parses and returns command line arguments for config generator 119 | """ 120 | 121 | parser = argparse.ArgumentParser( 122 | description="onionbalance-config generates config files and keys for " 123 | "OnionBalance instances and management servers. Calling without any " 124 | "options will initiate an interactive mode.") 125 | 126 | parser.add_argument("--key", type=str, default=None, 127 | help="RSA private key for the master onion service.") 128 | 129 | parser.add_argument("-p", "--password", type=str, default=None, 130 | help="Optional password which can be used to encrypt" 131 | "the master service private key.") 132 | 133 | parser.add_argument("-n", type=int, default=2, dest="num_instances", 134 | help="Number of instances to generate (default: " 135 | "%(default)s).") 136 | 137 | parser.add_argument("-t", "--tag", type=str, default='srv', 138 | help="Prefix name for the service instances " 139 | "(default: %(default)s).") 140 | 141 | parser.add_argument("--output", type=str, default='config/', 142 | help="Directory to store generate config files. " 143 | "The directory will be created if it does not " 144 | "already exist.") 145 | 146 | parser.add_argument("--no-interactive", action='store_true', 147 | help="Try to run automatically without prompting for" 148 | "user input.") 149 | 150 | parser.add_argument("-v", type=str, default="info", dest='verbosity', 151 | help="Minimum verbosity level for logging. Available " 152 | "in ascending order: debug, info, warning, error, " 153 | "critical). The default is info.") 154 | 155 | parser.add_argument("--service-virtual-port", type=str, 156 | default="80", 157 | help="Onion service port for external client " 158 | "connections (default: %(default)s).") 159 | 160 | # TODO: Add validator to check if the target host:port line makes sense. 161 | parser.add_argument("--service-target", type=str, 162 | default="127.0.0.1:80", 163 | help="Target IP and port where your service is " 164 | "listening (default: %(default)s).") 165 | 166 | # .. todo:: Add option to specify HS host and port for instance torrc 167 | 168 | parser.add_argument('--version', action='version', 169 | version='onionbalance %s' % onionbalance.__version__) 170 | 171 | return parser 172 | 173 | 174 | def generate_config(): 175 | """ 176 | Entry point for interactive config file generation. 177 | """ 178 | 179 | # Parse initial command line options 180 | args = parse_cmd_args().parse_args() 181 | 182 | # Simplify the logging output for the command line tool 183 | logger = log.get_config_generator_logger() 184 | 185 | logger.info("Beginning OnionBalance config generation.") 186 | 187 | # If CLI options have been provided, don't enter interactive mode 188 | # Crude check to see if any options beside --verbosity are set. 189 | verbose = True if '-v' in sys.argv else False 190 | 191 | if ((len(sys.argv) > 1 and not verbose) or len(sys.argv) > 3 or 192 | args.no_interactive): 193 | interactive = False 194 | logger.info("Entering non-interactive mode.") 195 | else: 196 | interactive = True 197 | logger.info("No command line arguments found, entering interactive " 198 | "mode.") 199 | 200 | logger.setLevel(logging.__dict__[args.verbosity.upper()]) 201 | 202 | # Check if output directory exists, if not try create it 203 | output_path = None 204 | if interactive: 205 | output_path = input("Enter path to store generated config " 206 | "[{}]: ".format(os.path.abspath(args.output))) 207 | output_path = output_path or args.output 208 | try: 209 | util.try_make_dir(output_path) 210 | except OSError: 211 | logger.exception("Problem encountered when trying to create the " 212 | "output directory %s.", os.path.abspath(output_path)) 213 | else: 214 | logger.debug("Created the output directory '%s'.", 215 | os.path.abspath(output_path)) 216 | 217 | # The output directory should be empty to avoid having conflict keys 218 | # or config files. 219 | if not util.is_directory_empty(output_path): 220 | logger.error("The specified output directory is not empty. Please " 221 | "delete any files and folders or specify another output " 222 | "directory.") 223 | sys.exit(1) 224 | 225 | # Load master key if specified 226 | key_path = None 227 | if interactive: 228 | # Read key path from user 229 | key_path = input("Enter path to master service private key " 230 | "(Leave empty to generate a key): ") 231 | key_path = args.key or key_path 232 | if key_path: 233 | if not os.path.isfile(key_path): 234 | logger.error("The specified master service private key '%s' " 235 | "could not be found. Please confirm the path and " 236 | "file permissions are correct.", key_path) 237 | sys.exit(1) 238 | else: 239 | # Try load the specified private key file 240 | master_key = util.key_decrypt_prompt(key_path) 241 | if not master_key: 242 | logger.error("The specified master private key %s could not " 243 | "be loaded.", os.path.abspath(master_key)) 244 | sys.exit(1) 245 | else: 246 | master_onion_address = util.calc_onion_address(master_key) 247 | logger.info("Successfully loaded a master key for service " 248 | "%s.onion.", master_onion_address) 249 | 250 | else: 251 | # No key specified, begin generating a new one. 252 | master_key = Crypto.PublicKey.RSA.generate(1024) 253 | master_onion_address = util.calc_onion_address(master_key) 254 | logger.debug("Created a new master key for service %s.onion.", 255 | master_onion_address) 256 | 257 | # Finished loading/generating master key, now try generate keys for 258 | # each service instance 259 | num_instances = None 260 | if interactive: 261 | num_instances = input("Number of instance services to create " 262 | "[{}]: ".format(args.num_instances)) 263 | # Cast to int if a number was specified 264 | try: 265 | num_instances = int(num_instances) 266 | except ValueError: 267 | num_instances = None 268 | num_instances = num_instances or args.num_instances 269 | logger.debug("Creating %d service instances.", num_instances) 270 | 271 | tag = None 272 | if interactive: 273 | tag = input("Provide a tag name to group these instances " 274 | "[{}]: ".format(args.tag)) 275 | tag = tag or args.tag 276 | 277 | # Create HiddenServicePort line for instance torrc file 278 | service_virtual_port = None 279 | if interactive: 280 | service_virtual_port = input("Specify the service virtual port (for " 281 | "client connections) [{}]: ".format( 282 | args.service_virtual_port)) 283 | service_virtual_port = service_virtual_port or args.service_virtual_port 284 | 285 | service_target = None 286 | if interactive: 287 | # In interactive mode, change default target to match the specified 288 | # virtual port 289 | default_service_target = u'127.0.0.1:{}'.format(service_virtual_port) 290 | service_target = input("Specify the service target IP and port (where " 291 | "your service is listening) [{}]: ".format( 292 | default_service_target)) 293 | service_target = service_target or default_service_target 294 | service_target = service_target or args.service_target 295 | torrc_port_line = u'HiddenServicePort {} {}'.format(service_virtual_port, 296 | service_target) 297 | 298 | instances = [] 299 | for i in range(0, num_instances): 300 | instance_key = Crypto.PublicKey.RSA.generate(1024) 301 | instance_address = util.calc_onion_address(instance_key) 302 | logger.debug("Created a key for instance %s.onion.", 303 | instance_address) 304 | instances.append((instance_address, instance_key)) 305 | 306 | # Write master service key to directory 307 | master_passphrase = None 308 | if interactive: 309 | master_passphrase = getpass.getpass( 310 | "Provide an optional password to encrypt the master private " 311 | "key (Not encrypted if no password is specified): ") 312 | master_passphrase = master_passphrase or args.password 313 | 314 | # Finished reading input, starting to write config files. 315 | master_dir = os.path.join(output_path, 'master') 316 | util.try_make_dir(master_dir) 317 | master_key_file = os.path.join(master_dir, 318 | '{}.key'.format(master_onion_address)) 319 | with open(master_key_file, "wb") as key_file: 320 | os.chmod(master_key_file, 384) # chmod 0600 in decimal 321 | key_file.write(master_key.exportKey(passphrase=master_passphrase)) 322 | logger.debug("Successfully wrote master key to file %s.", 323 | os.path.abspath(master_key_file)) 324 | 325 | # Create YAML OnionBalance settings file for these instances 326 | service_data = {'key': '{}.key'.format(master_onion_address)} 327 | service_data['instances'] = [{'address': address, 328 | 'name': '{}{}'.format(tag, i+1)} for 329 | i, (address, _) in enumerate(instances)] 330 | settings_data = {'services': [service_data]} 331 | config_yaml = yaml.safe_dump(settings_data, default_flow_style=False) 332 | 333 | config_file_path = os.path.join(master_dir, 'config.yaml') 334 | with open(config_file_path, "w") as config_file: 335 | config_file.write(u"# OnionBalance Config File\n") 336 | config_file.write(config_yaml) 337 | logger.info("Wrote master service config file '%s'.", 338 | os.path.abspath(config_file_path)) 339 | 340 | # Write master service torrc 341 | master_torrc_path = os.path.join(master_dir, 'torrc-server') 342 | master_torrc_template = pkg_resources.resource_string(__name__, 343 | 'data/torrc-server') 344 | with open(master_torrc_path, "w") as master_torrc_file: 345 | master_torrc_file.write(master_torrc_template.decode('utf-8')) 346 | 347 | # Try generate config files for each service instance 348 | for i, (instance_address, instance_key) in enumerate(instances): 349 | # Create a numbered directory for instance 350 | instance_dir = os.path.join(output_path, '{}{}'.format(tag, i+1)) 351 | instance_key_dir = os.path.join(instance_dir, instance_address) 352 | util.try_make_dir(instance_key_dir) 353 | os.chmod(instance_key_dir, 1472) # chmod 2700 in decimal 354 | 355 | instance_key_file = os.path.join(instance_key_dir, 'private_key') 356 | with open(instance_key_file, "wb") as key_file: 357 | os.chmod(instance_key_file, 384) # chmod 0600 in decimal 358 | key_file.write(instance_key.exportKey()) 359 | logger.debug("Successfully wrote key for instance %s.onion to " 360 | "file.", instance_address) 361 | 362 | # Write torrc file for each instance 363 | instance_torrc = os.path.join(instance_dir, 'instance_torrc') 364 | instance_torrc_template = pkg_resources.resource_string( 365 | __name__, 'data/torrc-instance') 366 | with open(instance_torrc, "w") as torrc_file: 367 | torrc_file.write(instance_torrc_template.decode('utf-8')) 368 | # The ./ relative path prevents Tor from raising relative 369 | # path warnings. The relative path may need to be edited manual 370 | # to work on Windows systems. 371 | torrc_file.write(u"HiddenServiceDir {}\n".format( 372 | instance_address)) 373 | torrc_file.write(u"{}\n".format(torrc_port_line)) 374 | 375 | # Output final status message 376 | logger.info("Done! Successfully generated an OnionBalance config and %d " 377 | "instance keys for service %s.onion.", 378 | num_instances, master_onion_address) 379 | 380 | sys.exit(0) 381 | -------------------------------------------------------------------------------- /onionbalance/status.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Provide status over Unix socket 4 | Default path: /var/run/onionbalance/control 5 | """ 6 | import os 7 | import errno 8 | import threading 9 | import socket 10 | from socketserver import BaseRequestHandler, ThreadingMixIn, UnixStreamServer 11 | 12 | from onionbalance import log 13 | from onionbalance import config 14 | 15 | logger = log.get_logger() 16 | 17 | 18 | class StatusSocketHandler(BaseRequestHandler): 19 | """ 20 | Handler for new domain socket connections 21 | """ 22 | def handle(self): 23 | """ 24 | Prepare and output the status summary when a connection is received 25 | """ 26 | time_format = "%Y-%m-%d %H:%M:%S" 27 | response = [] 28 | for service in config.services: 29 | if service.uploaded: 30 | service_timestamp = service.uploaded.strftime(time_format) 31 | else: 32 | service_timestamp = "[not uploaded]" 33 | response.append("{}.onion {}".format(service.onion_address, 34 | service_timestamp)) 35 | 36 | for instance in service.instances: 37 | if not instance.timestamp: 38 | response.append(" {}.onion [offline]".format( 39 | instance.onion_address)) 40 | else: 41 | response.append(" {}.onion {} {} IPs".format( 42 | instance.onion_address, 43 | instance.timestamp.strftime(time_format), 44 | len(instance.introduction_points))) 45 | response.append("") 46 | self.request.sendall('\n'.join(response).encode('utf-8')) 47 | 48 | 49 | class ThreadingSocketServer(ThreadingMixIn, UnixStreamServer): 50 | """ 51 | Unix socket server with threading 52 | """ 53 | pass 54 | 55 | 56 | class StatusSocket(object): 57 | """ 58 | Create a Unix domain socket which emits a summary of the OnionBalance 59 | status when a client connects. 60 | """ 61 | 62 | def __init__(self, status_socket_location): 63 | """ 64 | Create the Unix domain socket status server and start in a thread 65 | 66 | Example:: 67 | socat - unix-connect:/var/run/onionbalance/control 68 | 69 | uweyln7jhkyaokka.onion 2016-05-01 11:08:56 70 | r523s7jx65ckitf4.onion [offline] 71 | v2q7ujuleky7odph.onion 2016-05-01 11:00:00 3 IPs 72 | """ 73 | self.unix_socket_filename = status_socket_location 74 | self.cleanup_socket_file() 75 | 76 | logger.debug("Creating status socket at %s", self.unix_socket_filename) 77 | try: 78 | self.server = ThreadingSocketServer(self.unix_socket_filename, 79 | StatusSocketHandler) 80 | 81 | # Start running the socket server in a another thread 82 | server_thread = threading.Thread(target=self.server.serve_forever) 83 | server_thread.daemon = True # Exit daemon when main thread stops 84 | server_thread.start() 85 | 86 | except (OSError, socket.error): 87 | logger.error("Could not start status socket at %s. Does the path " 88 | "exist? Do you have permission?", 89 | status_socket_location) 90 | 91 | def cleanup_socket_file(self): 92 | """ 93 | Try to remove the socket file if it exists already 94 | """ 95 | try: 96 | os.unlink(self.unix_socket_filename) 97 | except OSError as e: 98 | # Reraise if its not a FileNotFound exception 99 | if e.errno != errno.ENOENT: 100 | raise 101 | 102 | def close(self): 103 | """ 104 | Close the unix domain socket and remove its file 105 | """ 106 | try: 107 | self.server.shutdown() 108 | self.server.server_close() 109 | self.cleanup_socket_file() 110 | except AttributeError: 111 | pass 112 | except OSError: 113 | logger.exception("Error when removing the status socket") 114 | -------------------------------------------------------------------------------- /onionbalance/util.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import hashlib 3 | import struct 4 | import datetime 5 | import getpass 6 | import base64 7 | import binascii 8 | import os 9 | import stem 10 | import time 11 | 12 | # import Crypto.Util 13 | import Crypto.PublicKey 14 | 15 | from onionbalance import config 16 | 17 | 18 | def add_pkcs1_padding(message): 19 | """Add PKCS#1 padding to **message**.""" 20 | padding = b'' 21 | typeinfo = b'\x00\x01' 22 | separator = b'\x00' 23 | padding = b'\xFF' * (125 - len(message)) 24 | padded_message = typeinfo + padding + separator + message 25 | assert len(padded_message) == 128 26 | return padded_message 27 | 28 | 29 | def get_asn1_sequence(rsa_key): 30 | seq = Crypto.Util.asn1.DerSequence() 31 | seq.append(rsa_key.n) 32 | seq.append(rsa_key.e) 33 | asn1_seq = seq.encode() 34 | return asn1_seq 35 | 36 | 37 | def calc_key_digest(rsa_key): 38 | """Calculate the SHA1 digest of an RSA key""" 39 | return hashlib.sha1(get_asn1_sequence(rsa_key)).digest() 40 | 41 | 42 | def calc_permanent_id(rsa_key): 43 | return calc_key_digest(rsa_key)[:10] 44 | 45 | 46 | def calc_onion_address(rsa_key): 47 | return base64.b32encode(calc_permanent_id(rsa_key)).decode().lower() 48 | 49 | 50 | def calc_descriptor_id(permanent_id, secret_id_part): 51 | return hashlib.sha1(permanent_id + secret_id_part).digest() 52 | 53 | 54 | def get_time_period(time, permanent_id): 55 | """ 56 | time-period = (current-time + permanent-id-byte * 86400 / 256) / 86400 57 | """ 58 | permanent_id_byte = int(struct.unpack('B', permanent_id[0:1])[0]) 59 | return int((int(time) + permanent_id_byte * 86400 / 256) / 86400) 60 | 61 | 62 | def get_seconds_valid(time, permanent_id): 63 | """ 64 | Calculate seconds until the descriptor ID changes 65 | """ 66 | permanent_id_byte = int(struct.unpack('B', permanent_id[0:1])[0]) 67 | return 86400 - int((int(time) + permanent_id_byte * 86400 / 256) % 86400) 68 | 69 | 70 | def calc_secret_id_part(time_period, descriptor_cookie, replica): 71 | """ 72 | secret-id-part = H(time-period | descriptor-cookie | replica) 73 | """ 74 | secret_id_part = hashlib.sha1() 75 | secret_id_part.update(struct.pack('>I', time_period)[:4]) 76 | if descriptor_cookie: 77 | secret_id_part.update(descriptor_cookie) 78 | secret_id_part.update(binascii.unhexlify('{0:02X}'.format(replica))) 79 | return secret_id_part.digest() 80 | 81 | 82 | def calc_descriptor_id_b32(onion_address, time, replica, deviation=0, 83 | descriptor_cookie=None): 84 | """ 85 | High level function to calculate the descriptor ID for a hidden 86 | service. 87 | 88 | The onion address and returned descriptor ID are both base32 encoded. 89 | """ 90 | permanent_id = base64.b32decode(onion_address, 1) 91 | time_period = get_time_period(time, permanent_id) + int(deviation) 92 | secret_id_part = calc_secret_id_part(time_period, descriptor_cookie, 93 | replica) 94 | descriptor_id = calc_descriptor_id(permanent_id, secret_id_part) 95 | 96 | return base64.b32encode(descriptor_id).decode('utf-8').lower() 97 | 98 | 99 | def rounded_timestamp(timestamp=None): 100 | """ 101 | Create timestamp rounded down to the nearest hour 102 | """ 103 | if not timestamp: 104 | timestamp = datetime.datetime.utcnow() 105 | timestamp = timestamp.replace(minute=0, second=0, microsecond=0) 106 | return timestamp.strftime('%Y-%m-%d %H:%M:%S') 107 | 108 | 109 | def base32_encode_str(byte_str): 110 | """ 111 | Encode bytes as lowercase base32 string 112 | """ 113 | return base64.b32encode(byte_str).lower().decode('utf-8') 114 | 115 | 116 | def key_decrypt_prompt(key_file, retries=3): 117 | """ 118 | Try open an PEM encrypted private key, prompting the user for a 119 | passphrase if required. 120 | """ 121 | 122 | key_passphrase = None 123 | with open(key_file, 'rt') as handle: 124 | pem_key = handle.read() 125 | 126 | for retries in range(0, retries): 127 | if "Proc-Type: 4,ENCRYPTED" in pem_key: # Key looks encrypted 128 | key_passphrase = getpass.getpass( 129 | "Enter the password for the private key (%s): " % key_file) 130 | try: 131 | rsa_key = Crypto.PublicKey.RSA.importKey( 132 | pem_key, passphrase=key_passphrase) 133 | except ValueError: 134 | # Key not decrypted correctly, prompt for passphrase again 135 | continue 136 | else: 137 | # .. todo:: Check the loaded key size in a more reasonable way. 138 | if rsa_key.has_private() and rsa_key.size() in (1023, 1024): 139 | return rsa_key 140 | else: 141 | raise ValueError("The specified key was not a 1024 bit " 142 | "private key.") 143 | 144 | # No private key was imported 145 | raise ValueError("Could not import RSA key.") 146 | 147 | 148 | def try_make_dir(path): 149 | """ 150 | Try to create a directory (including any parent directories) 151 | """ 152 | try: 153 | os.makedirs(path) 154 | except OSError: 155 | if not os.path.isdir(path): 156 | raise 157 | 158 | 159 | def is_directory_empty(path): 160 | """ 161 | Check if a directory contains any files or directories. 162 | """ 163 | if os.listdir(path): 164 | return False 165 | else: 166 | return True 167 | 168 | 169 | def reauthenticate(controller, logger): 170 | """ 171 | Tries to authenticate to the controller 172 | """ 173 | time.sleep(10) 174 | try: 175 | controller.authenticate(password=config.TOR_CONTROL_PASSWORD) 176 | except stem.connection.AuthenticationFailure: 177 | logger.error("Failed to re-authenticate controller.") 178 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | stem==1.4.1b 2 | PyYAML==3.11 3 | pycrypto==2.6.1 4 | future==0.14.3 5 | sphinxcontrib-autoprogram==0.1.2 6 | setproctitle==1.1.9 7 | -------------------------------------------------------------------------------- /scripts/rend-connection-stats.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Log information about the number and rate of rendezvous connections to a HS. 4 | """ 5 | 6 | import sys 7 | import time 8 | import argparse 9 | import logging 10 | import logging.handlers 11 | import threading 12 | 13 | import stem 14 | from stem.control import Controller 15 | import schedule 16 | 17 | handler = logging.StreamHandler() 18 | formatter = logging.Formatter(fmt="%(asctime)s [%(levelname)s]: %(message)s") 19 | handler.setFormatter(formatter) 20 | 21 | logger = logging.getLogger("onionbalance") 22 | logger.addHandler(handler) 23 | logger.setLevel(logging.DEBUG) 24 | 25 | lock = threading.RLock() 26 | 27 | # Track circuits established in current time period. 28 | new_rend_circuits_established = 0 29 | rend_circuits_closed = 0 30 | 31 | 32 | def circ_event_handler(event): 33 | """ 34 | Handle the event received when Tor emits an event related the a rendezvous 35 | circuit 36 | """ 37 | global new_rend_circuits_established, rend_circuits_closed 38 | 39 | if event.purpose == "HS_SERVICE_REND" and event.hs_state == "HSSR_JOINED": 40 | if event.type == "CIRC_MINOR": 41 | # Log when a new rendezvous circuit is successfully established. 42 | # A CIRC_MINOR event is emitted when the rendezvous circuit moves 43 | # from HS_STATE=HSSR_CONNECTING to HS_STATE=HSSR_JOINED 44 | logger.debug("New rendezvous circuit established (CircID: %s)", 45 | event.id) 46 | new_rend_circuits_established += 1 47 | 48 | elif event.type == "CIRC" and event.status == "CLOSED": 49 | logger.debug("Rendezvous circuit closed (CircID: %s)", event.id) 50 | rend_circuits_closed += 1 51 | return 52 | 53 | 54 | def output_status(controller): 55 | """ 56 | Output the current counts every tick period. 57 | """ 58 | global new_rend_circuits_established, rend_circuits_closed 59 | 60 | # Count number of currently established rendezvous circuits for this HS. 61 | rend_circ_count = len([circ for circ in controller.get_circuits() 62 | if circ.purpose == "HS_SERVICE_REND" 63 | and circ.hs_state == "HSSR_JOINED"]) 64 | 65 | with lock: 66 | logger.info("New rend circuits: %d - Closed rend circuits: %d - " 67 | "Established rend circuits: %d", 68 | new_rend_circuits_established, 69 | rend_circuits_closed, 70 | rend_circ_count) 71 | new_rend_circuits_established = 0 72 | rend_circuits_closed = 0 73 | 74 | return None 75 | 76 | 77 | def parse_cmd_args(): 78 | """ 79 | Parses and returns command line arguments. 80 | """ 81 | 82 | parser = argparse.ArgumentParser( 83 | description="%s logs stats about Tor rendezvous circuits" % 84 | sys.argv[0]) 85 | 86 | parser.add_argument("-i", "--ip", type=str, default="127.0.0.1", 87 | help="Tor controller IP address") 88 | 89 | parser.add_argument("-p", "--port", type=int, default=9051, 90 | help="Tor controller port") 91 | 92 | parser.add_argument("-t", "--tick", type=int, default=60, 93 | help="Output total every tick seconds " 94 | "(default: %(default)s)") 95 | 96 | parser.add_argument("--log-file", type=str, default="rendezvous.log", 97 | help="Location to log the rendezvous connection" 98 | "data.") 99 | 100 | parser.add_argument("-v", "--verbosity", type=str, default="info", 101 | help="Minimum verbosity level for logging. Available " 102 | "in ascending order: debug, info, warning, " 103 | "error, critical). The default is info.") 104 | 105 | return parser.parse_args() 106 | 107 | 108 | def main(): 109 | 110 | args = parse_cmd_args() 111 | logger.setLevel(logging.__dict__[args.verbosity.upper()]) 112 | 113 | if args.log_file: 114 | file_handler = logging.handlers.TimedRotatingFileHandler( 115 | args.log_file, when='D') 116 | file_handler.setFormatter(formatter) 117 | logger.addHandler(file_handler) 118 | 119 | logger.info("Beginning rendezvous circuit monitoring." 120 | "Status output every %d seconds", args.tick) 121 | 122 | with Controller.from_port(port=args.port) as controller: 123 | # Create a connection to the Tor control port 124 | controller.authenticate() 125 | 126 | # Add event listeners for HS_DESC and HS_DESC_CONTENT 127 | controller.add_event_listener(circ_event_handler, 128 | stem.control.EventType.CIRC) 129 | controller.add_event_listener(circ_event_handler, 130 | stem.control.EventType.CIRC_MINOR) 131 | 132 | # Schedule rendezvous status output. 133 | schedule.every(args.tick).seconds.do(output_status, controller) 134 | schedule.run_all() 135 | 136 | try: 137 | while True: 138 | schedule.run_pending() 139 | time.sleep(1) 140 | except KeyboardInterrupt: 141 | logger.info("Stopping rendezvous circuit monitoring.") 142 | 143 | sys.exit(0) 144 | 145 | if __name__ == '__main__': 146 | main() 147 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [pytest] 2 | norecursedirs = .tox _build tor chutney 3 | 4 | [bdist_wheel] 5 | universal=1 6 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """setup.py: setuptools control.""" 4 | 5 | import io 6 | import os 7 | 8 | from setuptools import setup 9 | 10 | # Read version and other info from package's __init.py file 11 | module_info = {} 12 | init_path = os.path.join(os.path.dirname(__file__), 'onionbalance', 13 | '__init__.py') 14 | with open(init_path) as init_file: 15 | exec(init_file.read(), module_info) 16 | 17 | 18 | def read(*names, **kwargs): 19 | return io.open( 20 | os.path.join(os.path.dirname(__file__), *names), 21 | encoding=kwargs.get("encoding", "utf8") 22 | ).read() 23 | 24 | setup( 25 | name="OnionBalance", 26 | packages=["onionbalance"], 27 | entry_points={ 28 | "console_scripts": [ 29 | 'onionbalance = onionbalance.manager:main', 30 | 'onionbalance-config = onionbalance.settings:generate_config', 31 | ]}, 32 | description="OnionBalance provides load-balancing and redundancy for Tor " 33 | "hidden services by distributing requests to multiple backend " 34 | "Tor instances.", 35 | long_description=read('README.rst'), 36 | version=module_info.get('__version__'), 37 | author=module_info.get('__author__'), 38 | author_email=module_info.get('__contact__'), 39 | url=module_info.get('__url__'), 40 | license=module_info.get('__license__'), 41 | keywords='tor', 42 | install_requires=[ 43 | 'setuptools', 44 | 'stem>=1.4.0-dev', 45 | 'PyYAML>=3.11', 46 | 'pycrypto>=2.6.1', 47 | 'future>=0.14.0', 48 | 'setproctitle', 49 | ], 50 | tests_require=['tox', 'pytest-mock', 'pytest', 'mock', 'pexpect'], 51 | package_data={'onionbalance': ['data/*']}, 52 | include_package_data=True, 53 | classifiers=[ 54 | 'Development Status :: 3 - Alpha', 55 | 'License :: OSI Approved :: GNU General Public License (GPL)', 56 | 'Programming Language :: Python :: 2', 57 | 'Programming Language :: Python :: 2.7', 58 | 'Programming Language :: Python :: 3', 59 | 'Programming Language :: Python :: 3.4', 60 | ] 61 | ) 62 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | mock 3 | pytest-mock 4 | pexpect 5 | coveralls==1.1 6 | pytest-cov==2.2.1 7 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DonnchaC/onionbalance/5c781506860d88c3a1966047a2b856f8e29ce97a/test/__init__.py -------------------------------------------------------------------------------- /test/functional/test_onionbalance_config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Functional tests which run the onionbalance-config tool and check 4 | the created output files. 5 | """ 6 | import sys 7 | 8 | import pexpect 9 | import Crypto.PublicKey.RSA 10 | 11 | import onionbalance.util 12 | 13 | 14 | def onionbalance_config_interact(cli, cli_input): 15 | """ 16 | Send each input line to the onionbalance-config CLI interface 17 | """ 18 | cli.expect(u"store generated config") 19 | cli.send("{}\n".format(cli_input.get('config_dir', u''))) 20 | 21 | cli.expect(u"path to master service private key") 22 | cli.send(u"{}\n".format(cli_input.get('private_key_path', u''))) 23 | 24 | cli.expect(u"Number of instance services") 25 | cli.send(u"{}\n".format(cli_input.get('num_instances', u''))) 26 | 27 | cli.expect(u"Provide a tag name") 28 | cli.send(u"{}\n".format(cli_input.get('tag_name', u''))) 29 | 30 | cli.expect(u"service virtual port") 31 | cli.send(u"{}\n".format(cli_input.get('virtual_port', u''))) 32 | 33 | cli.expect(u"service target IP and port") 34 | cli.send(u"{}\n".format(cli_input.get('target_ip', u''))) 35 | 36 | cli.expect(u"optional password") 37 | cli.send(u"{}\n".format(cli_input.get('password', u''))) 38 | 39 | return None 40 | 41 | 42 | def check_basic_config_output(config_dir): 43 | """ 44 | Run basic tests on the generated config files and keys to check 45 | that they look reasonable. 46 | """ 47 | 48 | assert len(config_dir.listdir()) == 1 + 2 49 | 50 | # Find generated instance addresses 51 | instance_addresses = [] 52 | for directory in config_dir.listdir(): 53 | if directory.basename != 'master': 54 | instance_addresses.extend( 55 | [str(name.basename) for name in directory.listdir() 56 | if 'torrc' not in name.basename]) 57 | 58 | # Correct number of directories created 59 | assert len(config_dir.listdir()) == 1 + 2 60 | 61 | assert config_dir.join('master', 'torrc-server').check() 62 | assert config_dir.join('master', 'config.yaml').check() 63 | 64 | config_file = config_dir.join('master', 'config.yaml').read_text('utf-8') 65 | assert all(address in config_file for address in instance_addresses) 66 | 67 | # Test that all addresses are encoded as bytes and not unicode 68 | assert "!!python/unicode" not in config_file 69 | 70 | return True 71 | 72 | 73 | def test_onionbalance_config_interactive(tmpdir): 74 | """ 75 | Functional test to run onion-balance config in interactive mode. 76 | """ 77 | # Start onionbalance-config in interactive mode (no command line arguments) 78 | cli = pexpect.spawnu("onionbalance-config", logfile=sys.stdout) 79 | cli.expect(u"entering interactive mode") 80 | 81 | # Interact with the running onionbalance-config process 82 | onionbalance_config_interact( 83 | cli, cli_input={'config_dir': str(tmpdir.join(u"configdir"))}) 84 | cli.expect(u"Done! Successfully generated") 85 | 86 | check_basic_config_output(tmpdir.join(u"configdir")) 87 | 88 | 89 | def test_onionbalance_config_automatic(tmpdir): 90 | """ 91 | Functional test to run onion-balance config in automatic mode. 92 | """ 93 | # Start onionbalance-config in automatic mode 94 | cli = pexpect.spawnu("onionbalance-config", logfile=sys.stdout, 95 | args=[ 96 | '--output', str(tmpdir.join(u"configdir")), 97 | ]) 98 | cli.expect(u"Done! Successfully generated") 99 | 100 | check_basic_config_output(tmpdir.join(u"configdir")) 101 | 102 | 103 | def test_onionbalance_config_automatic_custom_ports(tmpdir): 104 | """ 105 | Run onionbalance-config in interactive mode, providing a custom port line. 106 | """ 107 | cli = pexpect.spawnu("onionbalance-config", logfile=sys.stdout, 108 | args=[ 109 | '--output', str(tmpdir.join(u"configdir")), 110 | '--service-virtual-port', u'443', 111 | '--service-target', u'127.0.0.1:8443', 112 | ]) 113 | cli.expect(u"Done! Successfully generated") 114 | 115 | # Read one of the generated torrc files 116 | for directory in tmpdir.join(u"configdir").listdir(): 117 | if directory.basename != 'master': 118 | torrc_file = [name for name in directory.listdir() 119 | if name.basename == 'instance_torrc'][0] 120 | break 121 | assert torrc_file.check() 122 | 123 | # Check torrc line contains the correct HiddenServicePort line 124 | torrc_contents = torrc_file.read_text('utf-8') 125 | assert u'HiddenServicePort 443 127.0.0.1:8443' in torrc_contents 126 | 127 | 128 | def test_onionbalance_config_automatic_key_with_password(tmpdir, mocker): 129 | """ 130 | Run onionbalance-config with an existing key, export as password protected 131 | key. 132 | """ 133 | 134 | # Create input private_key 135 | private_key = Crypto.PublicKey.RSA.generate(1024) 136 | key_path = tmpdir.join('private_key') 137 | key_path.write(private_key.exportKey()) 138 | 139 | # Start onionbalance-config in automatic mode 140 | cli = pexpect.spawnu("onionbalance-config", logfile=sys.stdout, 141 | args=[ 142 | '--output', str(tmpdir.join(u"configdir")), 143 | '--key', str(key_path), 144 | '--password', 'testpassword', 145 | ]) 146 | cli.expect(u"Done! Successfully generated") 147 | 148 | # Check master config was generated with password protected key. 149 | master_dir = tmpdir.join('configdir', 'master') 150 | output_key_path = [fpath for fpath in master_dir.listdir() 151 | if fpath.ext == '.key'][0] 152 | assert output_key_path.check() 153 | 154 | # Check key decrypts and is valid 155 | mocker.patch('getpass.getpass', lambda *_: 'testpassword') 156 | output_key = onionbalance.util.key_decrypt_prompt(str(output_key_path)) 157 | assert isinstance(output_key, Crypto.PublicKey.RSA._RSAobj) 158 | -------------------------------------------------------------------------------- /test/functional/test_publish_master_descriptor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import os 3 | import sys 4 | import socket 5 | import time 6 | 7 | import pytest 8 | import Crypto.PublicKey.RSA 9 | import yaml 10 | import pexpect 11 | import stem.control 12 | 13 | import onionbalance.util 14 | 15 | # Skip functional tests if Chutney environment is not running. 16 | pytestmark = pytest.mark.skipif( 17 | "os.environ.get('CHUTNEY_ONION_ADDRESS') is None", 18 | reason="Skipping functional test, no Chutney environment detected") 19 | 20 | 21 | def parse_chutney_enviroment(): 22 | """ 23 | Read environment variables and determine chutney instance and 24 | client addresses. 25 | """ 26 | 27 | tor_client = os.environ.get('CHUTNEY_CLIENT_PORT') 28 | assert tor_client 29 | 30 | # Calculate the address and port of clients control port 31 | client_address, client_socks_port = tor_client.split(':') 32 | client_ip = socket.gethostbyname(client_address) 33 | 34 | tor_client_number = int(client_socks_port) - 9000 35 | # Control port in the 8000-8999 range, offset by Tor client number 36 | control_port = 8000 + tor_client_number 37 | assert control_port 38 | 39 | # Retrieve instance onion address exported during chutney setup 40 | instance_address = os.environ.get('CHUTNEY_ONION_ADDRESS') 41 | assert instance_address # Need at least 1 instance address for test 42 | 43 | if '.onion' in instance_address: 44 | instance_address = instance_address[:16] 45 | 46 | return { 47 | 'client_ip': client_ip, 48 | 'control_port': control_port, 49 | 'instances': [instance_address], 50 | } 51 | 52 | 53 | def create_test_config_file(tmppath, private_key=None, instances=None): 54 | """ 55 | Setup function to create a temp directory with master key and config file. 56 | Returns a path to the temporary config file. 57 | 58 | .. todo:: Refactor settings.py config creation to avoid code duplication 59 | in integration tests. 60 | """ 61 | 62 | if not private_key: 63 | private_key = Crypto.PublicKey.RSA.generate(1024) 64 | 65 | # Write private key file 66 | key_path = tmppath.join('private_key') 67 | key_path.write(private_key.exportKey()) 68 | assert key_path.check() 69 | 70 | # Create YAML OnionBalance settings file for these instances 71 | service_data = {'key': str(key_path)} 72 | service_data['instances'] = [{'address': addr} for addr in instances] 73 | settings_data = { 74 | 'services': [service_data], 75 | 'STATUS_SOCKET_LOCATION': str(tmppath.join('control')), 76 | } 77 | config_yaml = yaml.dump(settings_data, default_flow_style=False) 78 | 79 | config_path = tmppath.join('config.yaml') 80 | config_path.write_binary(config_yaml.encode('utf-8')) 81 | assert config_path.check() 82 | 83 | return str(config_path) 84 | 85 | 86 | def test_master_descriptor_publication(tmpdir): 87 | """ 88 | Functional test to run OnionBalance, publish a master descriptor and 89 | check that it can be retrieved from the DHT. 90 | """ 91 | 92 | chutney_config = parse_chutney_enviroment() 93 | private_key = Crypto.PublicKey.RSA.generate(1024) 94 | master_onion_address = onionbalance.util.calc_onion_address(private_key) 95 | 96 | config_file_path = create_test_config_file( 97 | tmppath=tmpdir, 98 | private_key=private_key, 99 | instances=chutney_config.get('instances', []), 100 | ) 101 | assert config_file_path 102 | 103 | # Start an OnionBalance server and monitor for correct output with pexpect 104 | server = pexpect.spawnu("onionbalance", 105 | args=[ 106 | '-i', chutney_config.get('client_ip'), 107 | '-p', str(chutney_config.get('control_port')), 108 | '-c', config_file_path, 109 | '-v', 'debug', 110 | ], logfile=sys.stdout, timeout=15) 111 | 112 | # Check for expected output from OnionBalance 113 | server.expect(u"Loaded the config file") 114 | server.expect(u"introduction point set has changed") 115 | server.expect(u"Published a descriptor", timeout=120) 116 | 117 | # Check Tor control port gave an uploaded event. 118 | 119 | server.expect(u"HS_DESC UPLOADED") 120 | # Eek, sleep to wait for descriptor upload to all replicas to finish 121 | time.sleep(10) 122 | 123 | # .. todo:: Also need to check and raise for any warnings or errors 124 | # that are emitted 125 | 126 | # Try fetch and validate the descriptor with stem 127 | with stem.control.Controller.from_port( 128 | address=chutney_config.get('client_ip'), 129 | port=chutney_config.get('control_port') 130 | ) as controller: 131 | controller.authenticate() 132 | 133 | # get_hidden_service_descriptor() will raise exceptions if it 134 | # cannot find the descriptors 135 | master_descriptor = controller.get_hidden_service_descriptor( 136 | master_onion_address) 137 | master_ips = master_descriptor.introduction_points() 138 | 139 | # Try retrieve a descriptor for each instance 140 | for instance_address in chutney_config.get('instances'): 141 | instance_descriptor = controller.get_hidden_service_descriptor( 142 | instance_address) 143 | instance_ips = instance_descriptor.introduction_points() 144 | 145 | # Check if all instance IPs were included in the master descriptor 146 | assert (set(ip.identifier for ip in instance_ips) == 147 | set(ip.identifier for ip in master_ips)) 148 | 149 | # Check that the control socket was created 150 | socket_path = tmpdir.join('control') 151 | assert socket_path.check() 152 | 153 | # Connect to the control socket and check the output 154 | sock_client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 155 | sock_client.connect(str(socket_path)) 156 | 157 | # Read the data from the status socket 158 | result = [] 159 | while True: 160 | data = sock_client.recv(1024) 161 | if not data: 162 | break 163 | result.append(data.decode('utf-8')) 164 | result_data = ''.join(result) 165 | 166 | # Check each instance is in the output 167 | for instance_address in chutney_config.get('instances'): 168 | assert instance_address in result_data 169 | 170 | # Check all instances were online and all master descriptors uploaded 171 | assert master_onion_address in result_data 172 | assert '[offline]' not in result_data 173 | assert '[not uploaded]' not in result_data 174 | 175 | -------------------------------------------------------------------------------- /test/scripts/install-chutney.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to install Chutney, configure a Tor network and wait for the hidden 3 | # service system to be available. 4 | git clone https://git.torproject.org/chutney.git 5 | cd chutney 6 | # Stop chutney network if it is already running 7 | ./chutney stop networks/hs 8 | ./chutney configure networks/hs 9 | ./chutney start networks/hs 10 | ./chutney status networks/hs 11 | 12 | # Retry verify until hidden service subsystem is working 13 | n=0 14 | until [ $n -ge 20 ] 15 | do 16 | output=$(./chutney verify networks/hs) 17 | # Check if chutney output included 'Transmission: Success'. 18 | if [[ $output == *"Transmission: Success"* ]]; then 19 | hs_address=$(echo $output | grep -Po "([a-z2-7]{16}.onion:\d{2,5})") 20 | client_address=$(echo $output | grep -Po -m 1 "(localhost:\d{2,5})" | head -n1) 21 | echo "HS system running with service available at $hs_address" 22 | export CHUTNEY_ONION_ADDRESS="$hs_address" 23 | export CHUTNEY_CLIENT_PORT="$client_address" 24 | break 25 | else 26 | echo "HS system not running yet. Sleeping 15 seconds" 27 | n=$[$n+1] 28 | sleep 15 29 | fi 30 | done 31 | cd .. 32 | -------------------------------------------------------------------------------- /test/scripts/install-tor.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Script to install Tor 3 | set -ex 4 | echo "deb http://deb.torproject.org/torproject.org trusty main" | sudo tee -a /etc/apt/sources.list 5 | echo "deb-src http://deb.torproject.org/torproject.org trusty main" | sudo tee -a /etc/apt/sources.list 6 | 7 | # Install Tor repo signing key 8 | sudo apt-key add - << EOF 9 | -----BEGIN PGP PUBLIC KEY BLOCK----- 10 | 11 | mQENBEqg7GsBCACsef8koRT8UyZxiv1Irke5nVpte54TDtTl1za1tOKfthmHbs2I 12 | 4DHWG3qrwGayw+6yb5mMFe0h9Ap9IbilA5a1IdRsdDgViyQQ3kvdfoavFHRxvGON 13 | tknIyk5Goa36GMBl84gQceRs/4Zx3kxqCV+JYXE9CmdkpkVrh2K3j5+ysDWfD/kO 14 | dTzwu3WHaAwL8d5MJAGQn2i6bTw4UHytrYemS1DdG/0EThCCyAnPmmb8iBkZlSW8 15 | 6MzVqTrN37yvYWTXk6MwKH50twaX5hzZAlSh9eqRjZLq51DDomO7EumXP90rS5mT 16 | QrS+wiYfGQttoZfbh3wl5ZjejgEjx+qrnOH7ABEBAAG0JmRlYi50b3Jwcm9qZWN0 17 | Lm9yZyBhcmNoaXZlIHNpZ25pbmcga2V5iQE8BBMBAgAmAhsDBgsJCAcDAgQVAggD 18 | BBYCAwECHgECF4AFAlQDRrwFCRSpj0cACgkQ7oy8noht3YnPxwgAp9e7yRer1v1G 19 | oywrrfam3afWNy7G0bI5gf98WPrhkgc3capVVDpOe87OaeezeICP6duTE8S5Yurw 20 | x+lbcCPZp7Co4uyjAdIjVHAhwGGhpzG34Y8Z6ebCd4z0AElNGpDQpMtKppLnCRRw 21 | knuvpKBIn4sxDgsofIg6vo4i8nL5mrIzhDpfbW9NK9lV4KvmvB4T+X5ZzdTkQ0ya 22 | 1aHtGdMaTtKmOMVk/4ceGRDw65pllGEo4ZQEgGVZ3TmNHidiuShGqiVEbSDGRFEV 23 | OUiF9yvR+u6h/9iqULxOoAOfYMuGtatjrZM46d8DR2O1o00nbGHWYaQVqimGd52W 24 | rCJghAIMxbkBDQRKoO2QAQgA2uKxSRSKpd2JO1ODUDuxppYacY1JkemxDUEHG31c 25 | qCVTuFz4alNyl4I+8pmtX2i+YH7W9ew7uGgjRzPEjTOm8/Zz2ue+eQeroveuo0hy 26 | Fa9Y3CxhNMCE3EH4AufdofuCmnUf/W7TzyIvzecrwFPlyZhqWnmxEqu8FaR+jXK9 27 | Jsx2Zby/EihNoCwQOWtdv3I4Oi5KBbglxfxE7PmYgo9DYqTmHxmsnPiUE4FYZG26 28 | 3Ll1ZqkbwW77nwDEl1uh+tjbOu+Y1cKwecWbyVIuY1eKOnzVC88ldVSKxzKOGu37 29 | My4z65GTByMQfMBnoZ+FZFGYiCiThj+c8i93DIRzYeOsjQARAQABiQJEBBgBAgAP 30 | AhsCBQJUA0bBBQkQ5ycvASnAXSAEGQECAAYFAkqg7ZAACgkQdKlBuiGeyBC0EQf5 31 | Af/G0/2xz0QwH58N6Cx/ZoMctPbxim+F+MtZWtiZdGJ7G1wFGILAtPqSG6WEDa+T 32 | hOeHbZ1uGvzuFS24IlkZHljgTZlL30p8DFdy73pajoqLRfrrkb9DJTGgVhP2axhn 33 | OW/Q6Zu4hoQPSn2VGVOVmuwMb3r1r93fQbw0bQy/oIf9J+q2rbp4/chOodd7XMW9 34 | 5VMwiWIEdpYaD0moeK7+abYzBTG5ADMuZoK2ZrkteQZNQexSu4h0emWerLsMdvcM 35 | LyYiOdWP128+s1e/nibHGFPAeRPkQ+MVPMZlrqgVq9i34XPA9HrtxVBd/PuOHoaS 36 | 1yrGuADspSZTC5on4PMaQgkQ7oy8noht3YmJqQgAqq0NouBzv3pytxnS/BAaV/n4 37 | fc4GP+xiTI0AHIN03Zmy47szUVPg5lwIEeopJxt5J8lCupJCxxIBRFT59MbE0msQ 38 | OT1L3vlgBeIidGTvVdrBQ1aESoRHm+yHIs7H16zkUmj+vDu/bne36/MoSU0bc2EO 39 | cB7hQ5AzvdbZh9tYjpyKTPCJbEe207SgcHJ3+erExQ/aiddAwjx9FGdFCZAoTNdm 40 | rjpNUROno3dbIG7fSCO7PVPCrdCxL0ZrtyuuEeTgTfcWxTQurYYNOxPv6sXF1VNP 41 | IJVBTfdAR2ZlhTpIjFMOWXJgXWiip8lYy3C/AU1bpgSV26gIIlk1AnnNHVBH+Q== 42 | =DMFk 43 | -----END PGP PUBLIC KEY BLOCK----- 44 | EOF 45 | 46 | sudo apt-get update -qq 47 | sudo apt-get install -qq tor deb.torproject.org-keyring 48 | -------------------------------------------------------------------------------- /test/test_consensus.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import pytest 3 | 4 | from onionbalance import consensus 5 | from onionbalance import config 6 | 7 | # Mock hex-encoded HSDir fingerprint list 8 | MOCK_HSDIR_LIST = [ 9 | "1111111111111111111111111111111111111111", 10 | "2222222222222222222222222222222222222222", 11 | "3333333333333333333333333333333333333333", 12 | "4444444444444444444444444444444444444444", 13 | "5555555555555555555555555555555555555555", 14 | "6666666666666666666666666666666666666666", 15 | ] 16 | 17 | config.HSDIR_SET = 3 # Always select 3 responsible HSDirs 18 | 19 | 20 | def test_get_hsdirs_no_consensus(): 21 | """ 22 | `get_hsdirs` should raise an exception when we don't have a valid 23 | HSDir list from the consensus. 24 | """ 25 | 26 | with pytest.raises(ValueError): 27 | consensus.get_hsdirs('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') 28 | 29 | 30 | def test_get_hsdirs(monkeypatch): 31 | """Test for normal responsible HSDir selection""" 32 | 33 | monkeypatch.setattr(consensus, 'HSDIR_LIST', MOCK_HSDIR_LIST) 34 | 35 | # Descriptor ID before '222....'' 36 | descriptor_id_base32 = "eiqaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" 37 | responsible_hsdirs = consensus.get_hsdirs(descriptor_id_base32) 38 | 39 | assert (responsible_hsdirs == [ 40 | "2222222222222222222222222222222222222222", 41 | "3333333333333333333333333333333333333333", 42 | "4444444444444444444444444444444444444444", 43 | ]) 44 | 45 | 46 | def test_get_hsdirs_edge_of_ring(monkeypatch): 47 | """Test that selection wraps around the edge of the HSDir ring""" 48 | 49 | monkeypatch.setattr(consensus, 'HSDIR_LIST', MOCK_HSDIR_LIST) 50 | 51 | # Descriptor ID before '666....'' 52 | descriptor_id_base32 = "mzqaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" 53 | responsible_hsdirs = consensus.get_hsdirs(descriptor_id_base32) 54 | 55 | assert (responsible_hsdirs == [ 56 | "6666666666666666666666666666666666666666", 57 | "1111111111111111111111111111111111111111", 58 | "2222222222222222222222222222222222222222", 59 | ]) 60 | 61 | 62 | def test_get_hsdirs_no_repeat(monkeypatch): 63 | """Test that selection wraps around the edge of the HSDir ring""" 64 | 65 | SHORT_HSDIR_LIST = [ 66 | "1111111111111111111111111111111111111111", 67 | "2222222222222222222222222222222222222222", 68 | ] 69 | monkeypatch.setattr(consensus, 'HSDIR_LIST', SHORT_HSDIR_LIST) 70 | 71 | # Descriptor ID before '111....'' 72 | descriptor_id_base32 = "ceiaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" 73 | responsible_hsdirs = consensus.get_hsdirs(descriptor_id_base32) 74 | 75 | assert (responsible_hsdirs == [ 76 | "1111111111111111111111111111111111111111", 77 | "2222222222222222222222222222222222222222", 78 | ]) 79 | -------------------------------------------------------------------------------- /test/test_descriptor.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import datetime 3 | import string 4 | 5 | import pytest 6 | import Crypto.PublicKey.RSA 7 | import stem.descriptor 8 | import hashlib 9 | 10 | from binascii import unhexlify 11 | 12 | from onionbalance import descriptor 13 | 14 | PEM_PRIVATE_KEY = u'\n'.join([ 15 | '-----BEGIN RSA PRIVATE KEY-----', 16 | 'MIICWwIBAAKBgQDXzP6HGtjPSy7uF9OlY7ZmefTVKcFLsq0mSEzQrW5wSiNuYc+d', 17 | 'oSV2OWxPg+1fVe19ES43AUkq/bS/gjAMLOunP6u9FbPDojyh1Vs/6TVqftS3sPkl', 18 | 'Q0ItrrZwAwhtHC0WaEyrwYJNOSCBq3wpupdQhpRyWJFqMwm9+iBCG1QcJQIDAQAB', 19 | 'AoGAegc2Sqm4vgdyozof+R8Ybnw6ISu6XRbNaJ9rqHjZwW9695khsK4GJAM2pwQf', 20 | '/0/0ukszyfDVMhVC1yREDS59lgzNecItd6nQZWbwr9TFxIoa9ouTqk8PcAoNixTb', 21 | 'wafjPcMmWGakizXeAHiOfazPBH4x2keDQCulxfYxXZxTpyECQQDqZu61kd1S3U7T', 22 | 'BT2NQBd3tHX0Hvonx+IkOKXwpHFY0Mo4d32Bi+MxRuEnd3tO44AaMvlkl13QMTF2', 23 | 'kHFSC70dAkEA669LZavGjW67+rO+f+xyDVby9pD5GJQBb78xRCf93Zcu2KW4NSp3', 24 | 'XC4p4eWfLgff1VuXL7g0VdFm4wUUHqYUqQJAZLmqpjdyBeO3tZIw6vu5meTgMvEE', 25 | 'ygdos+vr0sa3NlUyMKWYNwznqgstQYpkYHf+WkPBS2qIE6iv+qUDLSCCOQJAESSk', 26 | 'CFYxUBJQ7BBs9+Mb/Kppa9Ppuobxf85ZaAq8pYScrLeJKZzYJ8VX2I2aQX/jISLT', 27 | 'YW41qFRd9n9lEkGkWQJAcxPmNI+2r5zJG+K148LLmWCIDTVZ4nxOcxffHka/3tCJ', 28 | 'lDGUw4p2wU6pVRDpNfKrF5Nc9ZKO8NAtC17ZvDyVkQ==', 29 | '-----END RSA PRIVATE KEY-----', 30 | ]) 31 | 32 | INTRODUCTION_POINT_PART = u'\n'.join([ 33 | '-----BEGIN MESSAGE-----', 34 | 'AgEdbps604RR6lqeyoZBzOb6+HvlL2cDt63w8vBtyRaLirq5ZD5GDnr+R0ePj71C', 35 | 'nC7qmRWuwBmzSdSd0lOTaSApBvIifbJksHUeT/rq03dpnnRHdHSVqSvig6bukcWJ', 36 | 'LgJmrRd3ES13LXVHenD3C6AZMHuL9TG+MjLO2PIHu0mFO18aAHVnWY32Dmt144IY', 37 | 'c2eTVZbsKobjjwCYvDf0PBZI+B6H0PZWkDX/ykYjArpLDwydeZyp+Zwj4+k0+nRr', 38 | 'RPlzbHYoBY9pFYDUXDXWdL+vTsgFTG0EngLGlgUWSY5U1T1Db5HfOqc7hbqklgs/', 39 | 'ULG8NUY1k41Wb+dleJI28/+ZOM9zOpHcegNx4Cn8UGbw/Yv3Tj+yki+TMeOtJyhK', 40 | 'PQP8NWq8zThiVhBrfpmVjMYkNeVNyVNoxRwS6rxCQjoLWSJit2Mpf57zY1AOvT1S', 41 | 'EqqFbsX+slD2Uk67imALh4pMtjX29VLIujpum3drLhoTHDszBRhIH61A2eAZqdJy', 42 | '7JkJd1x/8x7U0l8xNWhnj/bhUHdt3OrCvlN+n8x6BwmMNoLF8JIsskTuGHOaAKSQ', 43 | 'WK3z0rHjgIrEjkQeuQtfmptiIgRB9LnNr+YahRnRR6XIOJGaIoVLVM2Uo2RG4MS1', 44 | '2KC3DRJ87WdMv2yNWha3w+lWt/mOALahYrvuNMU8wEuNXSi5yCo1OKirv+d5viGe', 45 | 'hAgVZjRymBQF+vd30zMdOG9qXNoQFUN49JfS8z5FjWmdHRt2MHlqD2isxoeabERY', 46 | 'T4Q50fFH8XHkRRomKBEbCwy/4t2DiqcTOSLGOSbTtf7qlUACp2bRth/g0ySAW8X/', 47 | 'CaWVm53z1vdgF2+t6j1CnuIqf0dUygZ07HEAHgu3rMW0YTk04QkvR3jiKAKijvGH', 48 | '3YcMJz1aJ7psWSsgiwn8a8Cs4fAcLNJcdTrnyxhQI4PMST/QLfp8nPYrhKEeifTc', 49 | 'vYkC4CtGuEFkWyRifIGbeD7FcjkL1zqVNu31vgo3EIVbHzylERgpgTIYBRv7aV7W', 50 | 'X7XAbrrgXL0zgpI0orOyPkr2KRs6CcoEqcc2MLyB6gJ5fYAm69Ige+6gWtRT6qvZ', 51 | 'tJXagfKZivLj73dRD6sUqTCX4tmgo7Q8WFSeNscDAVm/p4dVsw6SOoFcRgaH20yX', 52 | 'MBa3oLNTUNAaGbScUPx2Ja3MQS0UITwk0TFTF7hL++NhTvTp6IdgQW4DG+/bVJ3M', 53 | 'BRR+hsvSz5BSQQj2FUIAsJ+WoVK9ImbgsBbYxSH60jCvxTIdeh2IeUzS2T1bU9AU', 54 | 'jOLzcJZmNh95Nj2Qdrc8/0gin9KpgPmuPQ6CyH3TPFy88lf19v9jHUMO4SKEr7am', 55 | 'DAjbX3D7APKgHyZ61CkuoB3gylIRb8rRJD2ote38M6A1+04yJL/jG+PCL1UnMWdL', 56 | 'yJ4f4LzI9c4ksnGyl9neq0IHnA0Nlky6dmgmE+vLi6OCbEEs2v132wc5PIxRY+TW', 57 | '8JWu+3wUA4tj5uQvQRqU9/lmoHG/Jxubx/HwdD9Ri17G+qX8re5sySmmq7rcZEGJ', 58 | 'LVrlFuvA0NdoTM4AZY23iR6trJ/Ba2Q4pQk4SfOEMSoZJmf0UbxIP0Ez6Fb+Dxzk', 59 | 'WKXfI+D0ScuVjzV0bs8iXTrCcynztRKndNbtpd39hGAR0rNqvnHyQGYV75bWm5dS', 60 | '0S0PQ6DOzicLxjNXZFicQvwfieg9VyJikWLFLu4zAbzHnuoRk6b2KbSU4UCG/BCz', 61 | 'mHqz4y6GfsncsNkmFmsD5Gn9UrloWcEWgIDL05yIikL+L9DPLnNlSYtehDfxlhvh', 62 | 'xHzY/Rad4Nzxe62yXhSxhROLTXIolllyOFJgqZ4hBlXybBqJH7sZUll6PUpDwZdu', 63 | 'BK14pzMIpfxq2eYp8jI7fh4lU9YrkuSUM0Ewa7HfrltAgxMhHyaFjfINt61P9OlO', 64 | 's3nuBY17+KokaSWjACkCimVLH13H5DRhfX8OBRT4LeRMUspX3cyKbccwpOmoBf4y', 65 | 'WPM9QXw7nQy2hwnuX6NiK5QfeCGfY64M06J2tBGcCDmjPSIcJgMcyY7jfH9yPlDt', 66 | 'SKyyXpZnFOJplS2v28A/1csPSGy9kk/uGN0hfFULH4VvyAgNDYzmeOd8FvrbfHH2', 67 | '8BUTI/Tq2pckxwCYBWHcjSdXRAj5moCNSxCUMtK3kWFdxLFYzoiKuiZwq171qb5L', 68 | 'yCHMwNDIWEMeC75XSMswHaBsK6ON0UUg5oedQkOK+II9L/DVyTs3UYJOsWDfM67E', 69 | '312O9/bmsoHvr+rofF7HEc74dtUAcaDGJNyNiB+O4UmWbtEpCfuLmq2vaZa9J7Y0', 70 | 'hXlD2pcibC9CWpKR58cRL+dyYHZGJ4VKg6OHlJlF+JBPeLzObNDz/zQuEt9aL9Ae', 71 | 'QByamqGDGcaVMVZ/A80fRoUUgHbh3bLoAmxLCvMbJ0YMtRujdtGm8ZD0WvLXQA/U', 72 | 'dNmQ6tsP6pyVorWVa/Ma5CR7Em5q7M6639T8WPcu7ETTO19MnWud2lPJ5A==', 73 | '-----END MESSAGE-----', 74 | ]) 75 | 76 | UNSIGNED_DESCRIPTOR = u'\n'.join([ 77 | 'rendezvous-service-descriptor 6wgohrr64y2od75psnrfdkbc74ddqx2v', 78 | 'version 2', 79 | 'permanent-key', 80 | '-----BEGIN RSA PUBLIC KEY-----', 81 | 'MIGJAoGBANfM/oca2M9LLu4X06VjtmZ59NUpwUuyrSZITNCtbnBKI25hz52hJXY5', 82 | 'bE+D7V9V7X0RLjcBSSr9tL+CMAws66c/q70Vs8OiPKHVWz/pNWp+1Lew+SVDQi2u', 83 | 'tnADCG0cLRZoTKvBgk05IIGrfCm6l1CGlHJYkWozCb36IEIbVBwlAgMBAAE=', 84 | '-----END RSA PUBLIC KEY-----', 85 | 'secret-id-part udmoj3e2ykfp73kpvauoq4t4p7kkwsjq', 86 | 'publication-time 2015-06-25 11:00:00', 87 | 'protocol-versions 2,3', 88 | 'introduction-points', 89 | '-----BEGIN MESSAGE-----', 90 | 'AgEdbps604RR6lqeyoZBzOb6+HvlL2cDt63w8vBtyRaLirq5ZD5GDnr+R0ePj71C', 91 | 'nC7qmRWuwBmzSdSd0lOTaSApBvIifbJksHUeT/rq03dpnnRHdHSVqSvig6bukcWJ', 92 | 'LgJmrRd3ES13LXVHenD3C6AZMHuL9TG+MjLO2PIHu0mFO18aAHVnWY32Dmt144IY', 93 | 'c2eTVZbsKobjjwCYvDf0PBZI+B6H0PZWkDX/ykYjArpLDwydeZyp+Zwj4+k0+nRr', 94 | 'RPlzbHYoBY9pFYDUXDXWdL+vTsgFTG0EngLGlgUWSY5U1T1Db5HfOqc7hbqklgs/', 95 | 'ULG8NUY1k41Wb+dleJI28/+ZOM9zOpHcegNx4Cn8UGbw/Yv3Tj+yki+TMeOtJyhK', 96 | 'PQP8NWq8zThiVhBrfpmVjMYkNeVNyVNoxRwS6rxCQjoLWSJit2Mpf57zY1AOvT1S', 97 | 'EqqFbsX+slD2Uk67imALh4pMtjX29VLIujpum3drLhoTHDszBRhIH61A2eAZqdJy', 98 | '7JkJd1x/8x7U0l8xNWhnj/bhUHdt3OrCvlN+n8x6BwmMNoLF8JIsskTuGHOaAKSQ', 99 | 'WK3z0rHjgIrEjkQeuQtfmptiIgRB9LnNr+YahRnRR6XIOJGaIoVLVM2Uo2RG4MS1', 100 | '2KC3DRJ87WdMv2yNWha3w+lWt/mOALahYrvuNMU8wEuNXSi5yCo1OKirv+d5viGe', 101 | 'hAgVZjRymBQF+vd30zMdOG9qXNoQFUN49JfS8z5FjWmdHRt2MHlqD2isxoeabERY', 102 | 'T4Q50fFH8XHkRRomKBEbCwy/4t2DiqcTOSLGOSbTtf7qlUACp2bRth/g0ySAW8X/', 103 | 'CaWVm53z1vdgF2+t6j1CnuIqf0dUygZ07HEAHgu3rMW0YTk04QkvR3jiKAKijvGH', 104 | '3YcMJz1aJ7psWSsgiwn8a8Cs4fAcLNJcdTrnyxhQI4PMST/QLfp8nPYrhKEeifTc', 105 | 'vYkC4CtGuEFkWyRifIGbeD7FcjkL1zqVNu31vgo3EIVbHzylERgpgTIYBRv7aV7W', 106 | 'X7XAbrrgXL0zgpI0orOyPkr2KRs6CcoEqcc2MLyB6gJ5fYAm69Ige+6gWtRT6qvZ', 107 | 'tJXagfKZivLj73dRD6sUqTCX4tmgo7Q8WFSeNscDAVm/p4dVsw6SOoFcRgaH20yX', 108 | 'MBa3oLNTUNAaGbScUPx2Ja3MQS0UITwk0TFTF7hL++NhTvTp6IdgQW4DG+/bVJ3M', 109 | 'BRR+hsvSz5BSQQj2FUIAsJ+WoVK9ImbgsBbYxSH60jCvxTIdeh2IeUzS2T1bU9AU', 110 | 'jOLzcJZmNh95Nj2Qdrc8/0gin9KpgPmuPQ6CyH3TPFy88lf19v9jHUMO4SKEr7am', 111 | 'DAjbX3D7APKgHyZ61CkuoB3gylIRb8rRJD2ote38M6A1+04yJL/jG+PCL1UnMWdL', 112 | 'yJ4f4LzI9c4ksnGyl9neq0IHnA0Nlky6dmgmE+vLi6OCbEEs2v132wc5PIxRY+TW', 113 | '8JWu+3wUA4tj5uQvQRqU9/lmoHG/Jxubx/HwdD9Ri17G+qX8re5sySmmq7rcZEGJ', 114 | 'LVrlFuvA0NdoTM4AZY23iR6trJ/Ba2Q4pQk4SfOEMSoZJmf0UbxIP0Ez6Fb+Dxzk', 115 | 'WKXfI+D0ScuVjzV0bs8iXTrCcynztRKndNbtpd39hGAR0rNqvnHyQGYV75bWm5dS', 116 | '0S0PQ6DOzicLxjNXZFicQvwfieg9VyJikWLFLu4zAbzHnuoRk6b2KbSU4UCG/BCz', 117 | 'mHqz4y6GfsncsNkmFmsD5Gn9UrloWcEWgIDL05yIikL+L9DPLnNlSYtehDfxlhvh', 118 | 'xHzY/Rad4Nzxe62yXhSxhROLTXIolllyOFJgqZ4hBlXybBqJH7sZUll6PUpDwZdu', 119 | 'BK14pzMIpfxq2eYp8jI7fh4lU9YrkuSUM0Ewa7HfrltAgxMhHyaFjfINt61P9OlO', 120 | 's3nuBY17+KokaSWjACkCimVLH13H5DRhfX8OBRT4LeRMUspX3cyKbccwpOmoBf4y', 121 | 'WPM9QXw7nQy2hwnuX6NiK5QfeCGfY64M06J2tBGcCDmjPSIcJgMcyY7jfH9yPlDt', 122 | 'SKyyXpZnFOJplS2v28A/1csPSGy9kk/uGN0hfFULH4VvyAgNDYzmeOd8FvrbfHH2', 123 | '8BUTI/Tq2pckxwCYBWHcjSdXRAj5moCNSxCUMtK3kWFdxLFYzoiKuiZwq171qb5L', 124 | 'yCHMwNDIWEMeC75XSMswHaBsK6ON0UUg5oedQkOK+II9L/DVyTs3UYJOsWDfM67E', 125 | '312O9/bmsoHvr+rofF7HEc74dtUAcaDGJNyNiB+O4UmWbtEpCfuLmq2vaZa9J7Y0', 126 | 'hXlD2pcibC9CWpKR58cRL+dyYHZGJ4VKg6OHlJlF+JBPeLzObNDz/zQuEt9aL9Ae', 127 | 'QByamqGDGcaVMVZ/A80fRoUUgHbh3bLoAmxLCvMbJ0YMtRujdtGm8ZD0WvLXQA/U', 128 | 'dNmQ6tsP6pyVorWVa/Ma5CR7Em5q7M6639T8WPcu7ETTO19MnWud2lPJ5A==', 129 | '-----END MESSAGE-----', 130 | 'signature', 131 | '-----BEGIN SIGNATURE-----', 132 | 'VX4GC6s6zmY84mKsh+YdAqyZqDevJwGYr9yJntBNms4XRQHlgiW/JCspJzCqvrQG', 133 | 'N4Fh8XNTodQFnxz/kz8K3SBFlLnJHzKxSBTSZTLd8hRp84F/XxDcPaIPda8UJZuF', 134 | 'pOT8V0hfhgo8WxLpOyUzxrYugPB2GRkWYLhHaKhxkJY=', 135 | '-----END SIGNATURE-----', 136 | ]) 137 | 138 | SIGNED_DESCRIPTOR = u'\n'.join([ 139 | 'rendezvous-service-descriptor 6wgohrr64y2od75psnrfdkbc74ddqx2v', 140 | 'version 2', 141 | 'permanent-key', 142 | '-----BEGIN RSA PUBLIC KEY-----', 143 | 'MIGJAoGBANfM/oca2M9LLu4X06VjtmZ59NUpwUuyrSZITNCtbnBKI25hz52hJXY5', 144 | 'bE+D7V9V7X0RLjcBSSr9tL+CMAws66c/q70Vs8OiPKHVWz/pNWp+1Lew+SVDQi2u', 145 | 'tnADCG0cLRZoTKvBgk05IIGrfCm6l1CGlHJYkWozCb36IEIbVBwlAgMBAAE=', 146 | '-----END RSA PUBLIC KEY-----', 147 | 'secret-id-part udmoj3e2ykfp73kpvauoq4t4p7kkwsjq', 148 | 'publication-time 2015-06-25 11:00:00', 149 | 'protocol-versions 2,3', 150 | 'introduction-points', 151 | '-----BEGIN MESSAGE-----', 152 | 'AgEdbps604RR6lqeyoZBzOb6+HvlL2cDt63w8vBtyRaLirq5ZD5GDnr+R0ePj71C', 153 | 'nC7qmRWuwBmzSdSd0lOTaSApBvIifbJksHUeT/rq03dpnnRHdHSVqSvig6bukcWJ', 154 | 'LgJmrRd3ES13LXVHenD3C6AZMHuL9TG+MjLO2PIHu0mFO18aAHVnWY32Dmt144IY', 155 | 'c2eTVZbsKobjjwCYvDf0PBZI+B6H0PZWkDX/ykYjArpLDwydeZyp+Zwj4+k0+nRr', 156 | 'RPlzbHYoBY9pFYDUXDXWdL+vTsgFTG0EngLGlgUWSY5U1T1Db5HfOqc7hbqklgs/', 157 | 'ULG8NUY1k41Wb+dleJI28/+ZOM9zOpHcegNx4Cn8UGbw/Yv3Tj+yki+TMeOtJyhK', 158 | 'PQP8NWq8zThiVhBrfpmVjMYkNeVNyVNoxRwS6rxCQjoLWSJit2Mpf57zY1AOvT1S', 159 | 'EqqFbsX+slD2Uk67imALh4pMtjX29VLIujpum3drLhoTHDszBRhIH61A2eAZqdJy', 160 | '7JkJd1x/8x7U0l8xNWhnj/bhUHdt3OrCvlN+n8x6BwmMNoLF8JIsskTuGHOaAKSQ', 161 | 'WK3z0rHjgIrEjkQeuQtfmptiIgRB9LnNr+YahRnRR6XIOJGaIoVLVM2Uo2RG4MS1', 162 | '2KC3DRJ87WdMv2yNWha3w+lWt/mOALahYrvuNMU8wEuNXSi5yCo1OKirv+d5viGe', 163 | 'hAgVZjRymBQF+vd30zMdOG9qXNoQFUN49JfS8z5FjWmdHRt2MHlqD2isxoeabERY', 164 | 'T4Q50fFH8XHkRRomKBEbCwy/4t2DiqcTOSLGOSbTtf7qlUACp2bRth/g0ySAW8X/', 165 | 'CaWVm53z1vdgF2+t6j1CnuIqf0dUygZ07HEAHgu3rMW0YTk04QkvR3jiKAKijvGH', 166 | '3YcMJz1aJ7psWSsgiwn8a8Cs4fAcLNJcdTrnyxhQI4PMST/QLfp8nPYrhKEeifTc', 167 | 'vYkC4CtGuEFkWyRifIGbeD7FcjkL1zqVNu31vgo3EIVbHzylERgpgTIYBRv7aV7W', 168 | 'X7XAbrrgXL0zgpI0orOyPkr2KRs6CcoEqcc2MLyB6gJ5fYAm69Ige+6gWtRT6qvZ', 169 | 'tJXagfKZivLj73dRD6sUqTCX4tmgo7Q8WFSeNscDAVm/p4dVsw6SOoFcRgaH20yX', 170 | 'MBa3oLNTUNAaGbScUPx2Ja3MQS0UITwk0TFTF7hL++NhTvTp6IdgQW4DG+/bVJ3M', 171 | 'BRR+hsvSz5BSQQj2FUIAsJ+WoVK9ImbgsBbYxSH60jCvxTIdeh2IeUzS2T1bU9AU', 172 | 'jOLzcJZmNh95Nj2Qdrc8/0gin9KpgPmuPQ6CyH3TPFy88lf19v9jHUMO4SKEr7am', 173 | 'DAjbX3D7APKgHyZ61CkuoB3gylIRb8rRJD2ote38M6A1+04yJL/jG+PCL1UnMWdL', 174 | 'yJ4f4LzI9c4ksnGyl9neq0IHnA0Nlky6dmgmE+vLi6OCbEEs2v132wc5PIxRY+TW', 175 | '8JWu+3wUA4tj5uQvQRqU9/lmoHG/Jxubx/HwdD9Ri17G+qX8re5sySmmq7rcZEGJ', 176 | 'LVrlFuvA0NdoTM4AZY23iR6trJ/Ba2Q4pQk4SfOEMSoZJmf0UbxIP0Ez6Fb+Dxzk', 177 | 'WKXfI+D0ScuVjzV0bs8iXTrCcynztRKndNbtpd39hGAR0rNqvnHyQGYV75bWm5dS', 178 | '0S0PQ6DOzicLxjNXZFicQvwfieg9VyJikWLFLu4zAbzHnuoRk6b2KbSU4UCG/BCz', 179 | 'mHqz4y6GfsncsNkmFmsD5Gn9UrloWcEWgIDL05yIikL+L9DPLnNlSYtehDfxlhvh', 180 | 'xHzY/Rad4Nzxe62yXhSxhROLTXIolllyOFJgqZ4hBlXybBqJH7sZUll6PUpDwZdu', 181 | 'BK14pzMIpfxq2eYp8jI7fh4lU9YrkuSUM0Ewa7HfrltAgxMhHyaFjfINt61P9OlO', 182 | 's3nuBY17+KokaSWjACkCimVLH13H5DRhfX8OBRT4LeRMUspX3cyKbccwpOmoBf4y', 183 | 'WPM9QXw7nQy2hwnuX6NiK5QfeCGfY64M06J2tBGcCDmjPSIcJgMcyY7jfH9yPlDt', 184 | 'SKyyXpZnFOJplS2v28A/1csPSGy9kk/uGN0hfFULH4VvyAgNDYzmeOd8FvrbfHH2', 185 | '8BUTI/Tq2pckxwCYBWHcjSdXRAj5moCNSxCUMtK3kWFdxLFYzoiKuiZwq171qb5L', 186 | 'yCHMwNDIWEMeC75XSMswHaBsK6ON0UUg5oedQkOK+II9L/DVyTs3UYJOsWDfM67E', 187 | '312O9/bmsoHvr+rofF7HEc74dtUAcaDGJNyNiB+O4UmWbtEpCfuLmq2vaZa9J7Y0', 188 | 'hXlD2pcibC9CWpKR58cRL+dyYHZGJ4VKg6OHlJlF+JBPeLzObNDz/zQuEt9aL9Ae', 189 | 'QByamqGDGcaVMVZ/A80fRoUUgHbh3bLoAmxLCvMbJ0YMtRujdtGm8ZD0WvLXQA/U', 190 | 'dNmQ6tsP6pyVorWVa/Ma5CR7Em5q7M6639T8WPcu7ETTO19MnWud2lPJ5A==', 191 | '-----END MESSAGE-----', 192 | 'signature', 193 | '-----BEGIN SIGNATURE-----', 194 | 'VX4GC6s6zmY84mKsh+YdAqyZqDevJwGYr9yJntBNms4XRQHlgiW/JCspJzCqvrQG', 195 | 'N4Fh8XNTodQFnxz/kz8K3SBFlLnJHzKxSBTSZTLd8hRp84F/XxDcPaIPda8UJZuF', 196 | 'pOT8V0hfhgo8WxLpOyUzxrYugPB2GRkWYLhHaKhxkJY=', 197 | '-----END SIGNATURE-----', 198 | ]) 199 | 200 | PRIVATE_KEY = Crypto.PublicKey.RSA.importKey(PEM_PRIVATE_KEY) 201 | UNIX_TIMESTAMP = 1435233021 202 | 203 | 204 | @pytest.mark.parametrize('intro_point_distribution, selected_ip_count', [ 205 | ([3], 3), 206 | ([3, 3], 6), 207 | ([0], 0), 208 | ([10, 10], 10), 209 | ([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3], 10), 210 | ([10, 10, 10, 10, 10, 10], 10), 211 | pytest.mark.xfail(([0, 3, 3], 10)), 212 | pytest.mark.xfail(([6, 3, 3], 12)), 213 | ]) 214 | def test_introduction_point_selection(intro_point_distribution, 215 | selected_ip_count): 216 | """ 217 | Basic test case to check that the correct number of IPs are selected. 218 | """ 219 | # Create Mock list of instances (index by letter) and their respective 220 | # introduction points. 221 | available_intro_points = [[index] * count for index, count 222 | in zip(string.ascii_lowercase, 223 | intro_point_distribution)] 224 | 225 | intro_set = descriptor.IntroductionPointSet(available_intro_points) 226 | 227 | # Check that we can fetch the same number for each descriptor 228 | for i in range(0, 2): 229 | # Max 10 introduction points per descriptor 230 | choosen_intro_points = intro_set.choose(10) 231 | assert len(choosen_intro_points) == selected_ip_count 232 | 233 | 234 | def test_generate_service_descriptor(monkeypatch, mocker): 235 | """ 236 | Test creation of a fully signed hidden service descriptor 237 | """ 238 | # Mock the datetime function to return a constant timestamp 239 | class frozen_datetime(datetime.datetime): 240 | @classmethod 241 | def utcnow(cls): 242 | return datetime.datetime.utcfromtimestamp(UNIX_TIMESTAMP) 243 | monkeypatch.setattr(datetime, 'datetime', frozen_datetime) 244 | 245 | # Patch make_introduction_points_part to return the test introduction 246 | # point section 247 | mocker.patch('onionbalance.descriptor.make_introduction_points_part', 248 | lambda *_: INTRODUCTION_POINT_PART) 249 | 250 | # Test basic descriptor generation. 251 | signed_descriptor = descriptor.generate_service_descriptor( 252 | PRIVATE_KEY, 253 | introduction_point_list=['mocked-ip-list'], 254 | ).encode('utf-8') 255 | stem.descriptor.hidden_service_descriptor.\ 256 | HiddenServiceDescriptor(signed_descriptor, validate=True) 257 | assert (hashlib.sha1(signed_descriptor).hexdigest() == 258 | 'df4f4a7a15492205f073c32cbcfc4eb9511e4ad8') 259 | 260 | # Test descriptor generation with specified timestamp 261 | signed_descriptor = descriptor.generate_service_descriptor( 262 | PRIVATE_KEY, 263 | introduction_point_list=['mocked-ip-list'], 264 | timestamp=datetime.datetime.utcfromtimestamp(UNIX_TIMESTAMP), 265 | ).encode('utf-8') 266 | stem.descriptor.hidden_service_descriptor.\ 267 | HiddenServiceDescriptor(signed_descriptor, validate=True) 268 | assert (hashlib.sha1(signed_descriptor).hexdigest() == 269 | 'df4f4a7a15492205f073c32cbcfc4eb9511e4ad8') 270 | 271 | # Test descriptor for deviation and replica 1 272 | signed_descriptor = descriptor.generate_service_descriptor( 273 | PRIVATE_KEY, 274 | introduction_point_list=['mocked-ip-list'], 275 | replica=1, 276 | deviation=24*60*60, 277 | ).encode('utf-8') 278 | stem.descriptor.hidden_service_descriptor.\ 279 | HiddenServiceDescriptor(signed_descriptor, validate=True) 280 | assert (hashlib.sha1(signed_descriptor).hexdigest() == 281 | 'd828140cdccb1165dbc5a4b39622fcb45e6438fb') 282 | 283 | 284 | def test_generate_service_descriptor_no_intros(): 285 | with pytest.raises(ValueError): 286 | descriptor.generate_service_descriptor( 287 | PRIVATE_KEY, 288 | introduction_point_list=[], 289 | ) 290 | 291 | 292 | def test_make_public_key_block(): 293 | """ 294 | Test generation of ASN.1 representation of public key 295 | """ 296 | public_key_block = descriptor.make_public_key_block(PRIVATE_KEY) 297 | assert (hashlib.sha1(public_key_block.encode('utf-8')).hexdigest() == 298 | '2cf75da5e1a198ca7cb3db7b0baa6708feaf26e8') 299 | 300 | 301 | def test_sign_digest(): 302 | """ 303 | Test signing a SHA1 digest 304 | """ 305 | test_digest = unhexlify('2a447f044d2f8d8127e8133b2d545450bc58760e') 306 | signature = descriptor.sign_digest(test_digest, PRIVATE_KEY) 307 | assert (hashlib.sha1(signature.encode('utf-8')).hexdigest() == 308 | '27bee071a7e0f0af26a1c176f0c0af00854c05c1') 309 | 310 | 311 | def test_sign_descriptor(): 312 | """ 313 | Test signing a descriptor 314 | """ 315 | 316 | # Test signing an unsigned descriptor 317 | signed_descriptor = descriptor.sign_descriptor( 318 | UNSIGNED_DESCRIPTOR, PRIVATE_KEY).encode('utf-8') 319 | stem.descriptor.hidden_service_descriptor.\ 320 | HiddenServiceDescriptor(signed_descriptor, validate=True) 321 | assert (hashlib.sha1(signed_descriptor).hexdigest() == 322 | 'df4f4a7a15492205f073c32cbcfc4eb9511e4ad8') 323 | 324 | # Test resigning a previously signed descriptor 325 | signed_descriptor = descriptor.sign_descriptor( 326 | SIGNED_DESCRIPTOR, PRIVATE_KEY).encode('utf-8') 327 | stem.descriptor.hidden_service_descriptor.\ 328 | HiddenServiceDescriptor(signed_descriptor, validate=True) 329 | assert (hashlib.sha1(signed_descriptor).hexdigest() == 330 | 'df4f4a7a15492205f073c32cbcfc4eb9511e4ad8') 331 | 332 | 333 | def test_descriptor_received_invalid_descriptor(mocker): 334 | """ 335 | Test invalid descriptor content received from the HSDir 336 | """ 337 | mocker.patch("onionbalance.descriptor.logger.exception", 338 | side_effect=ValueError('InvalidDescriptorException')) 339 | 340 | # Check that the invalid descriptor error is logged. 341 | with pytest.raises(ValueError): 342 | descriptor.descriptor_received(u'not-a-valid-descriptor-input') 343 | assert descriptor.logger.exception.call_count == 1 344 | -------------------------------------------------------------------------------- /test/test_settings.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import io 3 | import os 4 | 5 | import pytest 6 | 7 | from onionbalance import settings 8 | from .util import builtin 9 | 10 | CONFIG_FILE_VALID = u'\n'.join([ 11 | "services:", 12 | " - key: private.key", 13 | " instances:", 14 | " - address: fqyw6ojo2voercr7", 15 | " - address: facebookcorewwwi", 16 | ]) 17 | 18 | CONFIG_FILE_ABSOLUTE = u'\n'.join([ 19 | "services:", 20 | " - key: /absdir/private.key", 21 | " instances:", 22 | " - address: fqyw6ojo2voercr7", 23 | " - address: facebookcorewwwi", 24 | ]) 25 | 26 | 27 | def test_parse_config_file_valid(mocker): 28 | # Patch config file read 29 | mocker.patch('os.path.exists', return_value=True) 30 | mocker.patch(builtin('open'), 31 | lambda *_: io.StringIO(CONFIG_FILE_VALID)) 32 | 33 | parsed_config = settings.parse_config_file('/configdir/config_rel.yaml') 34 | 35 | assert len(parsed_config['services']) == 1 36 | assert len(parsed_config['services'][0]['instances']) == 2 37 | 38 | # Test key with absolute path 39 | assert os.path.dirname(parsed_config['services'][0]['key']) == '/configdir' 40 | 41 | # Test key with absolute path 42 | mocker.patch(builtin('open'), 43 | lambda *_: io.StringIO(CONFIG_FILE_ABSOLUTE)) 44 | parsed_config = settings.parse_config_file('/configdir/config_abs.yaml') 45 | assert os.path.dirname(parsed_config['services'][0]['key']) == '/absdir' 46 | 47 | 48 | def test_parse_config_file_does_not_exist(mocker): 49 | with pytest.raises(SystemExit): 50 | settings.parse_config_file('doesnotexist/config.yaml') 51 | -------------------------------------------------------------------------------- /test/test_util.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from binascii import hexlify, unhexlify 3 | import base64 4 | import datetime 5 | import io 6 | import sys 7 | 8 | import Crypto.PublicKey.RSA 9 | 10 | import pytest 11 | from .util import builtin 12 | 13 | from onionbalance.util import * 14 | 15 | 16 | PEM_PRIVATE_KEY = u'\n'.join([ 17 | "-----BEGIN RSA PRIVATE KEY-----", 18 | "MIICWwIBAAKBgQDXzP6HGtjPSy7uF9OlY7ZmefTVKcFLsq0mSEzQrW5wSiNuYc+d", 19 | "oSV2OWxPg+1fVe19ES43AUkq/bS/gjAMLOunP6u9FbPDojyh1Vs/6TVqftS3sPkl", 20 | "Q0ItrrZwAwhtHC0WaEyrwYJNOSCBq3wpupdQhpRyWJFqMwm9+iBCG1QcJQIDAQAB", 21 | "AoGAegc2Sqm4vgdyozof+R8Ybnw6ISu6XRbNaJ9rqHjZwW9695khsK4GJAM2pwQf", 22 | "/0/0ukszyfDVMhVC1yREDS59lgzNecItd6nQZWbwr9TFxIoa9ouTqk8PcAoNixTb", 23 | "wafjPcMmWGakizXeAHiOfazPBH4x2keDQCulxfYxXZxTpyECQQDqZu61kd1S3U7T", 24 | "BT2NQBd3tHX0Hvonx+IkOKXwpHFY0Mo4d32Bi+MxRuEnd3tO44AaMvlkl13QMTF2", 25 | "kHFSC70dAkEA669LZavGjW67+rO+f+xyDVby9pD5GJQBb78xRCf93Zcu2KW4NSp3", 26 | "XC4p4eWfLgff1VuXL7g0VdFm4wUUHqYUqQJAZLmqpjdyBeO3tZIw6vu5meTgMvEE", 27 | "ygdos+vr0sa3NlUyMKWYNwznqgstQYpkYHf+WkPBS2qIE6iv+qUDLSCCOQJAESSk", 28 | "CFYxUBJQ7BBs9+Mb/Kppa9Ppuobxf85ZaAq8pYScrLeJKZzYJ8VX2I2aQX/jISLT", 29 | "YW41qFRd9n9lEkGkWQJAcxPmNI+2r5zJG+K148LLmWCIDTVZ4nxOcxffHka/3tCJ", 30 | "lDGUw4p2wU6pVRDpNfKrF5Nc9ZKO8NAtC17ZvDyVkQ==", 31 | "-----END RSA PRIVATE KEY-----", 32 | ]) 33 | 34 | PEM_INVALID_KEY = u'\n'.join([ 35 | "-----BEGIN RSA PRIVATE KEY-----", 36 | "MIICWwIBAAKBgQDXzP6HGtjPSy7uF9OlY7ZmefTVKcFLsq0mSEzQrW5wSiNuYc+d", 37 | "oSV2OWxPg+1fVe19ES43AUkq/bS/gjAMLOunP6u9FbPDojyh1Vs/6TVqftS3sPkl", 38 | "Q0ItrrZwAwhtHC0WaEyrwYJNOSCBq3wpupdQhpRyWJFqMwm9+iBCG1QcJQIDAQAB", 39 | "AoGAegc2Sqm4vgdyozof+R8Ybnw6ISu6XRbNaJ9rqHjZwW9695khsK4GJAM2pwQf", 40 | "/0/0ukszyfDVMhVC1yREDS59lgzNecItd6nQZWbwr9TFxIoa9ouTqk8PcAoNixTb", 41 | "wafjPcMmWGakizXeAHiOfazPBH4x2keDQCulxfYxXZxTpyECQQDqZu61kd1S3U7T", 42 | "BT2NQBd3t This is an invalid key lkl13QMTF2", 43 | "kHFSC70dAkEA669LZavGjW67+rO+f+xyDVby9pD5GJQBb78xRCf93Zcu2KW4NSp3", 44 | "XC4p4eWfLgff1VuXL7g0VdFm4wUUHqYUqQJAZLmqpjdyBeO3tZIw6vu5meTgMvEE", 45 | "ygdos+vr0sa3NlUyMKWYNwznqgstQYpkYHf+WkPBS2qIE6iv+qUDLSCCOQJAESSk", 46 | "CFYxUBJQ7BBs9+Mb/Kppa9Ppuobxf85ZaAq8pYScrLeJKZzYJ8VX2I2aQX/jISLT", 47 | "YW41qFRd9n9lEkGkWQJAcxPmNI+2r5zJG+K148LLmWCIDTVZ4nxOcxffHka/3tCJ", 48 | "lDGUw4p2wU6pVRDpNfKrF5Nc9ZKO8NAtC17ZvDyVkQ==", 49 | "-----END RSA PRIVATE KEY-----", 50 | ]) 51 | 52 | # Private key encrypted with the password 'password' 53 | PEM_ENCRYPTED = u'\n'.join([ 54 | "-----BEGIN RSA PRIVATE KEY-----", 55 | "Proc-Type: 4,ENCRYPTED", 56 | "DEK-Info: DES-EDE3-CBC,7CB7069233655F1A", 57 | "", 58 | "EpKWFhHefxQLlKS1M6fPXLUVW0gcrHwYNd2q/0J4emhrHmO50KTC6/nVGTvYS1VC", 59 | "XQwzlla04Ed7kAuP7nkbvT+/6fS72iZmIO/kuhihjaMmRV+peznjEroErndRzWko", 60 | "LCpe70/yMrHhULGR1lLINe+dZddESfYRoGEM1IYhPEEchXZBdqThvaThgeyVmoAV", 61 | "A5qhBOP4QFPSV4J0Jd28wTy+uPmGgCjvfvXjx4JZ2LAfPnLXOoKotRqb/cOtMapp", 62 | "9EmsvjRZH3OLreeQm1BmVzcXGgHLIZWmybGNAW/M0seqeD+NRPXEACOBahXZsSwd", 63 | "krnWALTkcfLw4NXgaHKdsogDV7gWlwkXr05CrSim0+zvg+hQpVp6Phg9qrT3Jh8g", 64 | "988v4Fx/rlVdEpfEeXAmLUpXH3jjeyU1ZOyi8c91Vobxe1dJ9G9P8YBBqnZo1xDa", 65 | "q89FR852v2DKR3xv+GRpzFM43NlWLck9DcNcqIUpbrGd0qRA1k87ZwYSiUPhBvtJ", 66 | "dix6XfeqbqVMYiH0K4sEyuXxJ98UqFzNY3bBi9oqvoQWpo0qrRYAzHrmDg/hJbO6", 67 | "aw8yhe922zw8W9+IQIy2j+ZKkaHSMKqjkIwFxmig5EA+mHNDIP4HwlCxA2e2w6HG", 68 | "ykLE01aHMeS72qRdLVwjib4q2iTEXZVnuyFg/wVprLmLY512iWr03kbj2CVN836b", 69 | "vEpVIvSj5W6oNXjm+hkKA1AMcHVK96y8Ms3BtarDe4tQDh7GjipkoSXrv+2lIl0o", 70 | "XjumCv4Gs63Fv3kUr+jo9N3P0SGe1GggX6MOYIcZF0I=", 71 | "-----END RSA PRIVATE KEY-----", 72 | ]) 73 | 74 | PRIVATE_KEY = Crypto.PublicKey.RSA.importKey(PEM_PRIVATE_KEY) 75 | UNIX_TIMESTAMP = 1435229421 76 | 77 | 78 | def test_add_pkcs1_padding(): 79 | message = unhexlify(b'f42687f4c3c017ce1e14eceb2ff153ff2d0a9e96') 80 | padded_message = add_pkcs1_padding(message) 81 | 82 | assert len(padded_message) == 128 83 | assert (padded_message == unhexlify( 84 | b'0001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff' 85 | b'ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff' 86 | b'ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff' 87 | b'ffffffffffffffffffffff00f42687f4c3c017ce1e14eceb2ff153ff2d0a9e96' 88 | )) 89 | 90 | 91 | def test_get_asn1_sequence(): 92 | asn1_sequence = get_asn1_sequence(PRIVATE_KEY) 93 | assert (asn1_sequence == unhexlify( 94 | b'30818902818100d7ccfe871ad8cf4b2eee17d3a563b66679f4d529c14bb2ad26' 95 | b'484cd0ad6e704a236e61cf9da12576396c4f83ed5f55ed7d112e3701492afdb4' 96 | b'bf82300c2ceba73fabbd15b3c3a23ca1d55b3fe9356a7ed4b7b0f92543422dae' 97 | b'b67003086d1c2d16684cabc1824d392081ab7c29ba975086947258916a3309bd' 98 | b'fa20421b541c250203010001' 99 | )) 100 | 101 | 102 | def test_calc_key_digest(): 103 | key_digest = calc_key_digest(PRIVATE_KEY) 104 | assert hexlify(key_digest) == b'4e2a58768ccb6aa06f95e11646e187879d07fb66' 105 | 106 | 107 | def test_calc_public_key_digest(): 108 | public_key = PRIVATE_KEY.publickey() 109 | key_digest = calc_key_digest(public_key) 110 | assert hexlify(key_digest) == b'4e2a58768ccb6aa06f95e11646e187879d07fb66' 111 | 112 | 113 | def test_calc_permanent_id(): 114 | assert hexlify(calc_permanent_id(PRIVATE_KEY)) == b'4e2a58768ccb6aa06f95' 115 | 116 | 117 | def test_calc_onion_address(): 118 | assert calc_onion_address(PRIVATE_KEY) == u'jyvfq5umznvka34v' 119 | 120 | 121 | def test_get_time_period(): 122 | time_period = get_time_period( 123 | time=UNIX_TIMESTAMP, 124 | permanent_id=unhexlify(b'4e2a58768ccb6aa06f95'), 125 | ) 126 | assert time_period == 16611 127 | 128 | 129 | def test_get_seconds_valid(): 130 | seconds_valid = get_seconds_valid( 131 | time=UNIX_TIMESTAMP, 132 | permanent_id=unhexlify(b'4e2a58768ccb6aa06f95'), 133 | ) 134 | assert seconds_valid == 21054 135 | 136 | 137 | def test_calc_secret_id_part(): 138 | secret_id_part = calc_secret_id_part( 139 | time_period=16611, 140 | descriptor_cookie=None, 141 | replica=0, 142 | ) 143 | assert (hexlify(secret_id_part) == 144 | b'a0d8e4ec9ac28affed4fa828e8727c7fd4ab4930') 145 | 146 | 147 | def test_calc_secret_id_part_descriptor_cookie(): 148 | secret_id_part = calc_secret_id_part( 149 | time_period=16611, 150 | descriptor_cookie=base64.b64decode('dCmx3qIvArbil8A0KM4KgQ=='), 151 | replica=0, 152 | ) 153 | assert (hexlify(secret_id_part) == 154 | b'ea4e24b1a832f1da687f874b40fa9ecfe5221dd9') 155 | 156 | 157 | def test_calc_descriptor_id(): 158 | descriptor_id = calc_descriptor_id( 159 | permanent_id=b'N*Xv\x8c\xcbj\xa0o\x95', 160 | secret_id_part=unhexlify(b'a0d8e4ec9ac28affed4fa828e8727c7fd4ab4930'), 161 | ) 162 | assert (hexlify(descriptor_id) == 163 | b'f58ce3c63ee634e1ffaf936251a822ff06385f55') 164 | 165 | 166 | def test_calc_descriptor_id_full(): 167 | 168 | descriptor_id = calc_descriptor_id_b32( 169 | onion_address='jyvfq5umznvka34v', 170 | time=UNIX_TIMESTAMP, 171 | replica=0) 172 | 173 | assert descriptor_id == '6wgohrr64y2od75psnrfdkbc74ddqx2v' 174 | 175 | 176 | def test_calc_descriptor_id_full_replica(): 177 | descriptor_id = calc_descriptor_id_b32( 178 | onion_address='jyvfq5umznvka34v', 179 | time=UNIX_TIMESTAMP, 180 | replica=1) 181 | 182 | assert descriptor_id == 'he35m4nouhkz6thymvhdvc3y5htqs422' 183 | 184 | 185 | def test_calc_descriptor_id_full_with_deviation(): 186 | 187 | descriptor_id = calc_descriptor_id_b32( 188 | onion_address='jyvfq5umznvka34v', 189 | time=UNIX_TIMESTAMP, 190 | replica=0, 191 | deviation=1) 192 | 193 | assert descriptor_id == 'esnnz2q6dnfwprvc4qhsgsfzz6r6ksrt' 194 | 195 | 196 | def test_rounded_timestamp(): 197 | timestamp = datetime.datetime(2015, 6, 25, 13, 13, 25) 198 | assert rounded_timestamp(timestamp) == u'2015-06-25 13:00:00' 199 | 200 | 201 | def test_rounded_timestamp_none_specified(monkeypatch): 202 | # Freeze datetime returned from datetime.datetime.utcnow() 203 | class frozen_datetime(datetime.datetime): 204 | @classmethod 205 | def utcnow(cls): 206 | return datetime.datetime(2015, 6, 25, 13, 13, 25) 207 | monkeypatch.setattr(datetime, 'datetime', frozen_datetime) 208 | assert rounded_timestamp(timestamp=None) == u'2015-06-25 13:00:00' 209 | 210 | 211 | def test_base32_encode_str(): 212 | assert base32_encode_str(byte_str=b'byte input') == u'mj4xizjanfxha5lu' 213 | 214 | 215 | @pytest.mark.skipif(sys.version_info < (3, 0), reason="python3 only") 216 | def test_base32_encode_str_not_byte_string(): 217 | with pytest.raises(TypeError): 218 | base32_encode_str(byte_str=u'not a byte string') 219 | 220 | 221 | def test_key_decrypt_prompt(mocker): 222 | # Valid private PEM key 223 | mocker.patch(builtin('open'), lambda *_: io.StringIO(PEM_PRIVATE_KEY)) 224 | key = key_decrypt_prompt('private.key') 225 | assert isinstance(key, Crypto.PublicKey.RSA._RSAobj) 226 | assert key.has_private() 227 | 228 | 229 | def test_key_decrypt_prompt_public_key(mocker): 230 | # Valid public PEM key 231 | private_key = Crypto.PublicKey.RSA.importKey(PEM_PRIVATE_KEY) 232 | pem_public_key = private_key.publickey().exportKey().decode('utf-8') 233 | mocker.patch(builtin('open'), lambda *_: io.StringIO(pem_public_key)) 234 | 235 | with pytest.raises(ValueError): 236 | key_decrypt_prompt('public.key') 237 | 238 | 239 | def test_key_decrypt_prompt_malformed_key(mocker): 240 | mocker.patch(builtin('open'), lambda *_: io.StringIO(PEM_INVALID_KEY)) 241 | with pytest.raises(ValueError): 242 | key_decrypt_prompt('private.key') 243 | 244 | 245 | def test_key_decrypt_prompt_incorrect_size(mocker): 246 | # Key which is not 1024 bits 247 | private_key_1280 = Crypto.PublicKey.RSA.generate(1280) 248 | pem_key_1280 = private_key_1280.exportKey().decode('utf-8') 249 | mocker.patch(builtin('open'), lambda *_: io.StringIO(pem_key_1280)) 250 | with pytest.raises(ValueError): 251 | key_decrypt_prompt('512-bit-private.key') 252 | 253 | 254 | def test_key_decrypt_prompt_encrypted(mocker): 255 | mocker.patch(builtin('open'), lambda *_: io.StringIO(PEM_ENCRYPTED)) 256 | 257 | # Load with correct password 258 | mocker.patch('getpass.getpass', lambda *_: u'password') 259 | key = key_decrypt_prompt('encrypted_private.key') 260 | assert isinstance(key, Crypto.PublicKey.RSA._RSAobj) 261 | 262 | # Load with incorrect password 263 | mocker.patch('getpass.getpass', lambda *_: u'incorrect password') 264 | with pytest.raises(ValueError): 265 | key_decrypt_prompt('encrypted_private.key') 266 | 267 | 268 | def test_try_make_dir_makedirs(mocker): 269 | mocker.patch('os.makedirs') 270 | try_make_dir('dir') 271 | os.makedirs.assert_called_once_with('dir') 272 | 273 | 274 | def test_try_make_dir_makedirs_dir_already_exists(mocker): 275 | mocker.patch('os.makedirs', side_effect=OSError) 276 | mocker.patch('os.path.isdir', return_value=True) 277 | try_make_dir('dir') 278 | os.path.isdir.assert_called_once_with('dir') 279 | 280 | 281 | def test_try_make_dir_makedirs_dir_other_error(mocker): 282 | mocker.patch('os.makedirs', side_effect=OSError) 283 | mocker.patch('os.path.isdir', return_value=False) 284 | with pytest.raises(OSError): 285 | try_make_dir('dir') 286 | 287 | 288 | def test_is_directory_empty_empty(mocker): 289 | # Directory is empty 290 | mocker.patch('os.listdir', return_value=[]) 291 | assert is_directory_empty('dir_empty/') 292 | 293 | 294 | def test_is_directory_empty_not_empty(mocker): 295 | # Directory is empty 296 | mocker.patch('os.listdir', return_value=['filename']) 297 | assert not is_directory_empty('dir_not_empty/') 298 | -------------------------------------------------------------------------------- /test/util.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import sys 3 | 4 | 5 | def builtin(name): 6 | """ 7 | Provide the correct import name for builtins on Python 2 or Python 3 8 | """ 9 | if sys.version_info >= (3,): 10 | return 'builtins.{}'.format(name) 11 | else: 12 | return '__builtin__.{}'.format(name) 13 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Tox (http://tox.testrun.org/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | 6 | [tox] 7 | envlist = style, py27, py35, docs 8 | 9 | [testenv] 10 | deps = -rrequirements.txt 11 | -rtest-requirements.txt 12 | # Pass Chutney enviroment variables into tox virtual enviroments. 13 | passenv = CHUTNEY_ONION_ADDRESS CHUTNEY_CLIENT_PORT TRAVIS TRAVIS_JOB_ID TRAVIS_BRANCH 14 | commands = py.test --cov-report=term-missing --cov=onionbalance {posargs} 15 | 16 | [testenv:docs] 17 | basepython=python 18 | changedir=docs 19 | deps=sphinx 20 | sphinxcontrib-autoprogram==0.1.2 21 | commands= 22 | sphinx-build -W -b html -d {envtmpdir}/docs . {envtmpdir}/html 23 | 24 | [testenv:style] 25 | basepython=python 26 | deps=pylint 27 | flake8 28 | commands=pylint onionbalance {posargs: -E} 29 | flake8 onionbalance 30 | --------------------------------------------------------------------------------