├── .bumpversion.cfg ├── .circleci ├── config.yml └── merge_pr.sh ├── .github ├── ISSUE_TEMPLATE.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── .project-template ├── fill_template_vars.sh ├── refill_template_vars.sh └── template_vars.txt ├── .pydocstyle.ini ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── ddht ├── __init__.py ├── _boot.py ├── _utils.py ├── abc.py ├── app.py ├── base_message.py ├── boot_info.py ├── canary.py ├── cli_commands.py ├── cli_parser.py ├── constants.py ├── datagram.py ├── encryption.py ├── endpoint.py ├── enr.py ├── event.py ├── exceptions.py ├── handshake_schemes.py ├── kademlia.py ├── logging.py ├── main.py ├── message_registry.py ├── request_tracker.py ├── resource_queue.py ├── rpc.py ├── rpc_handlers.py ├── sedes.py ├── subscription_manager.py ├── token_bucket.py ├── tools │ ├── __init__.py │ ├── benchmark │ │ └── __init__.py │ ├── driver │ │ ├── __init__.py │ │ ├── _utils.py │ │ ├── abc.py │ │ ├── alexandria.py │ │ ├── node.py │ │ ├── session.py │ │ └── tester.py │ ├── factories │ │ ├── __init__.py │ │ ├── alexandria.py │ │ ├── boot_info.py │ │ ├── content.py │ │ ├── discovery.py │ │ ├── endpoint.py │ │ ├── kademlia.py │ │ ├── keys.py │ │ ├── node_id.py │ │ ├── socket.py │ │ └── v5_1.py │ ├── lru_sql_dict.py │ ├── v5_strategies.py │ ├── w3.py │ └── w3_alexandria.py ├── typing.py ├── upnp.py ├── v5 │ ├── __init__.py │ ├── abc.py │ ├── app.py │ ├── channel_services.py │ ├── client.py │ ├── constants.py │ ├── crawl.py │ ├── endpoint_tracker.py │ ├── handshake.py │ ├── handshake_schemes.py │ ├── message_dispatcher.py │ ├── messages.py │ ├── packer.py │ ├── packets.py │ ├── routing_table.py │ ├── routing_table_manager.py │ ├── tags.py │ ├── topic_table.py │ └── typing.py ├── v5_1 │ ├── __init__.py │ ├── abc.py │ ├── alexandria │ │ ├── __init__.py │ │ ├── _utils.py │ │ ├── abc.py │ │ ├── app.py │ │ ├── boot_info.py │ │ ├── client.py │ │ ├── constants.py │ │ ├── content.py │ │ ├── content_storage.py │ │ ├── messages.py │ │ ├── network.py │ │ ├── payloads.py │ │ ├── rlp_sedes.py │ │ ├── rpc_handlers.py │ │ ├── sedes.py │ │ ├── seeker.py │ │ ├── typing.py │ │ └── xdg.py │ ├── app.py │ ├── client.py │ ├── constants.py │ ├── crawler.py │ ├── dispatcher.py │ ├── envelope.py │ ├── events.py │ ├── exceptions.py │ ├── explorer.py │ ├── handshake_schemes.py │ ├── messages.py │ ├── network.py │ ├── packets.py │ ├── pool.py │ ├── rpc_handlers.py │ └── session.py ├── validation.py └── xdg.py ├── docs ├── Makefile ├── _static │ └── .suppress-sphinx-build-warning ├── conf.py ├── ddht.rst ├── ddht.tools.factories.rst ├── ddht.tools.rst ├── ddht.v5.rst ├── index.rst ├── jsonrpc.rst └── release_notes.rst ├── mypy.ini ├── newsfragments ├── 139.bugfix.rst ├── 140.feature.rst ├── 173.feature.rst ├── 174.misc.rst ├── 177.feature.rst ├── 178.feature.rst ├── 182.feature.rst ├── 183.feature.rst ├── 195.feature.rst ├── 224.feature.rst ├── 225.feature.rst ├── 263.feature.rst ├── 343.bugfix.rst ├── 343.doc.rst ├── 343.internal.rst ├── 343.misc.rst ├── 344.feature.rst ├── 345.feature.rst ├── 346.bugfix.rst ├── 346.feature.rst ├── README.md └── validate_files.py ├── pyproject.toml ├── pytest.ini ├── requirements-docs.txt ├── setup.py ├── stubs └── cached_property.pyi ├── tests ├── conftest.py └── core │ ├── conftest.py │ ├── test_adaptive_timeout_util.py │ ├── test_aesgcm_encryption.py │ ├── test_cli.py │ ├── test_cli_parsing_to_boot_info.py │ ├── test_core_rpc_handlers.py │ ├── test_datagram_services.py │ ├── test_enr_partitioning.py │ ├── test_event.py │ ├── test_every_and_gather_utils.py │ ├── test_humanize_bytes.py │ ├── test_import.py │ ├── test_kademlia.py │ ├── test_node_at_distance.py │ ├── test_request_tracker.py │ ├── test_resource_queue.py │ ├── test_subscription_manager.py │ ├── test_validation_utils.py │ ├── test_weighted_choice_util.py │ ├── tools │ └── test_lru_sql_dict.py │ ├── v5 │ ├── __init__.py │ ├── test_channel_services.py │ ├── test_endpoint_tracker.py │ ├── test_flat_routing_table.py │ ├── test_handshake.py │ ├── test_handshake_schemes.py │ ├── test_kademlia_routing_table.py │ ├── test_message_dispatcher.py │ ├── test_messages.py │ ├── test_packer.py │ ├── test_packet_decryption.py │ ├── test_packet_encoding.py │ ├── test_packet_preparation.py │ ├── test_routing_table_manager.py │ ├── test_tags.py │ ├── test_topic_table.py │ └── test_v5_bootnodes.py │ └── v5_1 │ ├── alexandria │ ├── conftest.py │ ├── test_alexandria_client.py │ ├── test_alexandria_network.py │ ├── test_content_storage.py │ ├── test_message_encoding.py │ └── test_subscriptions.py │ ├── conftest.py │ ├── test_client.py │ ├── test_dispatcher.py │ ├── test_dispatcher_session_management.py │ ├── test_handshake_schemes.py │ ├── test_network.py │ ├── test_packet_encoding.py │ ├── test_pool.py │ ├── test_session.py │ ├── test_specification_fixtures.py │ ├── test_v51_bootnodes.py │ └── test_v51_rpc_handlers.py └── tox.ini /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 0.1.0-alpha.2 3 | commit = True 4 | tag = True 5 | parse = (?P\d+)\.(?P\d+)\.(?P\d+)(-(?P[^.]*)\.(?P\d+))? 6 | serialize = 7 | {major}.{minor}.{patch}-{stage}.{devnum} 8 | {major}.{minor}.{patch} 9 | 10 | [bumpversion:part:stage] 11 | optional_value = stable 12 | first_value = stable 13 | values = 14 | alpha 15 | beta 16 | stable 17 | 18 | [bumpversion:part:devnum] 19 | 20 | [bumpversion:file:setup.py] 21 | search = version="{current_version}", 22 | replace = version="{new_version}", 23 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.0 2 | 3 | # heavily inspired by https://raw.githubusercontent.com/pinax/pinax-wiki/6bd2a99ab6f702e300d708532a6d1d9aa638b9f8/.circleci/config.yml 4 | 5 | common: &common 6 | working_directory: ~/repo 7 | steps: 8 | - checkout 9 | - run: 10 | name: merge pull request base 11 | command: ./.circleci/merge_pr.sh 12 | - run: 13 | name: merge pull request base (2nd try) 14 | command: ./.circleci/merge_pr.sh 15 | when: on_fail 16 | - run: 17 | name: merge pull request base (3nd try) 18 | command: ./.circleci/merge_pr.sh 19 | when: on_fail 20 | - restore_cache: 21 | keys: 22 | - cache-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }} 23 | - run: 24 | name: install dependencies 25 | command: pip install --user tox 26 | - run: 27 | name: run tox 28 | command: ~/.local/bin/tox -r 29 | - save_cache: 30 | paths: 31 | - .hypothesis 32 | - .tox 33 | - ~/.cache/pip 34 | - ~/.local 35 | - ./eggs 36 | key: cache-{{ .Environment.CIRCLE_JOB }}-{{ checksum "setup.py" }}-{{ checksum "tox.ini" }} 37 | 38 | jobs: 39 | docs: 40 | <<: *common 41 | docker: 42 | - image: circleci/python:3.8 43 | environment: 44 | TOXENV: docs 45 | lint: 46 | <<: *common 47 | docker: 48 | - image: circleci/python:3.8 49 | environment: 50 | TOXENV: lint 51 | py38-core: 52 | <<: *common 53 | docker: 54 | - image: circleci/python:3.8 55 | environment: 56 | TOXENV: py38-core 57 | workflows: 58 | version: 2 59 | test: 60 | jobs: 61 | - docs 62 | - lint 63 | - py38-core 64 | -------------------------------------------------------------------------------- /.circleci/merge_pr.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ -n "${CIRCLE_PR_NUMBER}" ]]; then 4 | PR_INFO_URL=https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pulls/$CIRCLE_PR_NUMBER 5 | PR_BASE_BRANCH=$(curl -L "$PR_INFO_URL" | python -c 'import json, sys; obj = json.load(sys.stdin); sys.stdout.write(obj["base"]["ref"])') 6 | git fetch origin +"$PR_BASE_BRANCH":circleci/pr-base 7 | # We need these config values or git complains when creating the 8 | # merge commit 9 | git config --global user.name "Circle CI" 10 | git config --global user.email "circleci@example.com" 11 | git merge --no-edit circleci/pr-base 12 | fi 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | _If this is a bug report, please fill in the following sections. 2 | If this is a feature request, delete and describe what you would like with examples._ 3 | 4 | ## What was wrong? 5 | 6 | ### Code that produced the error 7 | 8 | ```py 9 | CODE_TO_REPRODUCE 10 | ``` 11 | 12 | ### Full error output 13 | 14 | ```sh 15 | ERROR_HERE 16 | ``` 17 | 18 | ### Expected Result 19 | 20 | _This section may be deleted if the expectation is "don't crash"._ 21 | 22 | ```sh 23 | EXPECTED_RESULT 24 | ``` 25 | 26 | ### Environment 27 | 28 | ```sh 29 | # run this: 30 | $ python -m eth_utils 31 | 32 | # then copy the output here: 33 | OUTPUT_HERE 34 | ``` 35 | 36 | ## How can it be fixed? 37 | 38 | Fill this section in if you know how this could or should be fixed. 39 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## What was wrong? 2 | 3 | Issue # 4 | 5 | ## How was it fixed? 6 | 7 | Summary of approach. 8 | 9 | ### To-Do 10 | 11 | [//]: # (Stay ahead of things, add list items here!) 12 | - [ ] Clean up commit history 13 | 14 | [//]: # (For important changes that should go into the release notes please add a newsfragment file as explained here: https://github.com/ethereum/ddht/blob/master/newsfragments/README.md) 15 | 16 | [//]: # (See: https://ddht.readthedocs.io/en/latest/contributing.html#pull-requests) 17 | - [ ] Add entry to the [release notes](https://github.com/ethereum/ddht/blob/master/newsfragments/README.md) 18 | 19 | #### Cute Animal Picture 20 | 21 | ![put a cute animal picture link inside the parentheses]() 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | eggs 12 | .eggs 13 | parts 14 | bin 15 | var 16 | sdist 17 | develop-eggs 18 | .installed.cfg 19 | lib 20 | lib64 21 | venv* 22 | 23 | # Installer logs 24 | pip-log.txt 25 | 26 | # Unit test / coverage reports 27 | .coverage 28 | .tox 29 | nosetests.xml 30 | 31 | # Translations 32 | *.mo 33 | 34 | # Mr Developer 35 | .mr.developer.cfg 36 | .project 37 | .pydevproject 38 | 39 | # Complexity 40 | output/*.html 41 | output/*/index.html 42 | 43 | # Sphinx 44 | docs/_build 45 | docs/modules.rst 46 | docs/*.internal.rst 47 | docs/*.utils.rst 48 | docs/*._utils.* 49 | 50 | # Blockchain 51 | chains 52 | 53 | # Hypothese Property base testing 54 | .hypothesis 55 | 56 | # tox/pytest cache 57 | .cache 58 | 59 | # Test output logs 60 | logs 61 | ### JetBrains template 62 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 63 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 64 | 65 | # User-specific stuff: 66 | .idea/workspace.xml 67 | .idea/tasks.xml 68 | .idea/dictionaries 69 | .idea/vcs.xml 70 | .idea/jsLibraryMappings.xml 71 | 72 | # Sensitive or high-churn files: 73 | .idea/dataSources.ids 74 | .idea/dataSources.xml 75 | .idea/dataSources.local.xml 76 | .idea/sqlDataSources.xml 77 | .idea/dynamic.xml 78 | .idea/uiDesigner.xml 79 | 80 | # Gradle: 81 | .idea/gradle.xml 82 | .idea/libraries 83 | 84 | # Mongo Explorer plugin: 85 | .idea/mongoSettings.xml 86 | 87 | # VIM temp files 88 | *.sw[op] 89 | 90 | # mypy 91 | .mypy_cache 92 | 93 | ## File-based project format: 94 | *.iws 95 | 96 | ## Plugin-specific files: 97 | 98 | # IntelliJ 99 | /out/ 100 | 101 | # mpeltonen/sbt-idea plugin 102 | .idea_modules/ 103 | 104 | # JIRA plugin 105 | atlassian-ide-plugin.xml 106 | 107 | # Crashlytics plugin (for Android Studio and IntelliJ) 108 | com_crashlytics_export_strings.xml 109 | crashlytics.properties 110 | crashlytics-build.properties 111 | fabric.properties 112 | 113 | -------------------------------------------------------------------------------- /.project-template/fill_template_vars.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | PROJECT_ROOT=$(dirname $(dirname $(python -c 'import os, sys; sys.stdout.write(os.path.realpath(sys.argv[1]))' "$0"))) 8 | 9 | echo "What is your python module name?" 10 | read MODULE_NAME 11 | 12 | echo "What is your pypi package name? (default: $MODULE_NAME)" 13 | read PYPI_INPUT 14 | PYPI_NAME=${PYPI_INPUT:-$MODULE_NAME} 15 | 16 | echo "What is your github project name? (default: $PYPI_NAME)" 17 | read REPO_INPUT 18 | REPO_NAME=${REPO_INPUT:-$PYPI_NAME} 19 | 20 | echo "What is your readthedocs.org project name? (default: $PYPI_NAME)" 21 | read RTD_INPUT 22 | RTD_NAME=${RTD_INPUT:-$PYPI_NAME} 23 | 24 | echo "What is your project name (ex: at the top of the README)? (default: $REPO_NAME)" 25 | read PROJECT_INPUT 26 | PROJECT_NAME=${PROJECT_INPUT:-$REPO_NAME} 27 | 28 | echo "What is a one-liner describing the project?" 29 | read SHORT_DESCRIPTION 30 | 31 | _replace() { 32 | echo "Replacing values: $1" 33 | local find_cmd=(find "$PROJECT_ROOT" ! -perm -u=x ! -path '*/.git/*' ! -path '*/venv*/*' -type f) 34 | 35 | if [[ $(uname) == Darwin ]]; then 36 | "${find_cmd[@]}" -exec sed -i '' "$1" {} + 37 | else 38 | "${find_cmd[@]}" -exec sed -i "$1" {} + 39 | fi 40 | } 41 | _replace "s//$MODULE_NAME/g" 42 | _replace "s//$PYPI_NAME/g" 43 | _replace "s//$REPO_NAME/g" 44 | _replace "s//$RTD_NAME/g" 45 | _replace "s//$PROJECT_NAME/g" 46 | _replace "s//$SHORT_DESCRIPTION/g" 47 | 48 | mkdir -p "$PROJECT_ROOT/$MODULE_NAME" 49 | touch "$PROJECT_ROOT/$MODULE_NAME/__init__.py" 50 | -------------------------------------------------------------------------------- /.project-template/refill_template_vars.sh: -------------------------------------------------------------------------------- 1 | TEMPLATE_DIR=$(dirname $(readlink -f "$0")) 2 | <"$TEMPLATE_DIR/template_vars.txt" "$TEMPLATE_DIR/fill_template_vars.sh" 3 | -------------------------------------------------------------------------------- /.project-template/template_vars.txt: -------------------------------------------------------------------------------- 1 | ddht 2 | ddht 3 | ddht 4 | ddht 5 | Discovery V5 DHT 6 | Implementation of the P2P Discoveryv5 Protocol 7 | -------------------------------------------------------------------------------- /.pydocstyle.ini: -------------------------------------------------------------------------------- 1 | [pydocstyle] 2 | ; All error codes found here: 3 | ; http://www.pydocstyle.org/en/3.0.0/error_codes.html 4 | ; 5 | ; Ignored: 6 | ; D1 - Missing docstring error codes 7 | ; 8 | ; Selected: 9 | ; D2 - Whitespace error codes 10 | ; D3 - Quote error codes 11 | ; D4 - Content related error codes 12 | select=D2,D3,D4 13 | 14 | ; Extra ignores: 15 | ; D200 - One-line docstring should fit on one line with quotes 16 | ; D203 - 1 blank line required before class docstring 17 | ; D204 - 1 blank line required after class docstring 18 | ; D205 - 1 blank line required between summary line and description 19 | ; D212 - Multi-line docstring summary should start at the first line 20 | ; D302 - Use u""" for Unicode docstrings 21 | ; D400 - First line should end with a period 22 | ; D401 - First line should be in imperative mood 23 | ; D412 - No blank lines allowed between a section header and its content 24 | add-ignore=D200,D203,D204,D205,D212,D302,D400,D401,D412 25 | 26 | ; Explanation: 27 | ; D400 - Enabling this error code seems to make it a requirement that the first 28 | ; sentence in a docstring is not split across two lines. It also makes it a 29 | ; requirement that no docstring can have a multi-sentence description without a 30 | ; summary line. Neither one of those requirements seem appropriate. 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2020 The Ethereum Foundation 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | include README.md 3 | include requirements-docs.txt 4 | 5 | global-include *.pyi 6 | 7 | recursive-exclude * __pycache__ 8 | recursive-exclude * *.py[co] 9 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CURRENT_SIGN_SETTING := $(shell git config commit.gpgSign) 2 | 3 | .PHONY: clean-pyc clean-build docs 4 | 5 | help: 6 | @echo "clean-build - remove build artifacts" 7 | @echo "clean-pyc - remove Python file artifacts" 8 | @echo "lint - check style with flake8" 9 | @echo "test - run tests quickly with the default Python" 10 | @echo "testall - run tests on every Python version with tox" 11 | @echo "release - package and upload a release" 12 | @echo "dist - package" 13 | 14 | clean: clean-build clean-pyc 15 | 16 | clean-build: 17 | rm -fr build/ 18 | rm -fr dist/ 19 | rm -fr *.egg-info 20 | 21 | clean-pyc: 22 | find . -name '*.pyc' -exec rm -f {} + 23 | find . -name '*.pyo' -exec rm -f {} + 24 | find . -name '*~' -exec rm -f {} + 25 | 26 | lint: 27 | tox -elint 28 | 29 | # Some lint can be auto-corrected. This job will do that and warn about the 30 | # others. It doesn't use tox, because tox is slow and heavy. Because docs are 31 | # always stale, this is probably stale, and you should update it to match the 32 | # tox.ini section on the 'lint' job. 33 | lint-roll: 34 | # Front-load the auto-correcting tools like isort and black 35 | isort ddht tests 36 | black ddht tests 37 | # Back-load the warning-only tools 38 | mypy -p ddht --config-file mypy.ini 39 | flake8 ddht tests 40 | pydocstyle ddht tests 41 | 42 | test: 43 | pytest tests 44 | 45 | test-all: 46 | tox 47 | 48 | build-docs: 49 | sphinx-apidoc -o docs/ . setup.py "*conftest*" 50 | $(MAKE) -C docs clean 51 | $(MAKE) -C docs html 52 | $(MAKE) -C docs doctest 53 | ./newsfragments/validate_files.py 54 | towncrier --draft --version preview 55 | 56 | docs: build-docs 57 | open docs/_build/html/index.html 58 | 59 | linux-docs: build-docs 60 | xdg-open docs/_build/html/index.html 61 | 62 | check-bump: 63 | ifndef bump 64 | $(error bump must be set, typically: major, minor, patch, or devnum) 65 | endif 66 | 67 | notes: check-bump 68 | # Let UPCOMING_VERSION be the version that is used for the current bump 69 | $(eval UPCOMING_VERSION=$(shell bumpversion $(bump) --dry-run --list | grep new_version= | sed 's/new_version=//g')) 70 | # Now generate the release notes to have them included in the release commit 71 | towncrier --yes --version $(UPCOMING_VERSION) 72 | # Before we bump the version, make sure that the towncrier-generated docs will build 73 | make build-docs 74 | git commit -m "Compile release notes" 75 | 76 | release: check-bump clean 77 | # require that you be on a branch that's linked to upstream/master 78 | git status -s -b | head -1 | grep "\.\.upstream/master" 79 | # verify that docs build correctly 80 | ./newsfragments/validate_files.py is-empty 81 | make build-docs 82 | CURRENT_SIGN_SETTING=$(git config commit.gpgSign) 83 | git config commit.gpgSign true 84 | bumpversion $(bump) 85 | git push upstream && git push upstream --tags 86 | python setup.py sdist bdist_wheel 87 | twine upload dist/* 88 | git config commit.gpgSign "$(CURRENT_SIGN_SETTING)" 89 | 90 | 91 | dist: clean 92 | python setup.py sdist bdist_wheel 93 | ls -l dist 94 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Discovery V5 DHT 2 | 3 | [![Join the chat at https://gitter.im/ethereum/ddht](https://badges.gitter.im/ethereum/ddht.svg)](https://gitter.im/ethereum/ddht?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 4 | [![Build Status](https://circleci.com/gh/ethereum/ddht.svg?style=shield)](https://circleci.com/gh/ethereum/ddht) 5 | [![PyPI version](https://badge.fury.io/py/ddht.svg)](https://badge.fury.io/py/ddht) 6 | [![Python versions](https://img.shields.io/pypi/pyversions/ddht.svg)](https://pypi.python.org/pypi/ddht) 7 | [![Docs build](https://readthedocs.org/projects/ddht/badge/?version=latest)](http://ddht.readthedocs.io/en/latest/?badge=latest) 8 | 9 | 10 | Implementation of the P2P Discoveryv5 Protocol 11 | 12 | Read more in the [documentation on ReadTheDocs](https://ddht.readthedocs.io/). [View the change log](https://ddht.readthedocs.io/en/latest/releases.html). 13 | 14 | ## Quickstart 15 | 16 | ```sh 17 | pip install ddht 18 | ``` 19 | 20 | To run it: 21 | 22 | ```sh 23 | ddht 24 | ``` 25 | 26 | `--help` will tell you about the arguments `ddht` accepts. The LOGLEVEL environment 27 | variable can be used to control which log messages are emitted. For example, to suppress 28 | unimportant messages from the Packer you can run: 29 | 30 | ```sh 31 | LOGLEVEL=WARNING:ddht.v5.packer.Packer ddht 32 | ``` 33 | 34 | ## Developer Setup 35 | 36 | If you would like to hack on ddht, please check out the [Snake Charmers 37 | Tactical Manual](https://github.com/ethereum/snake-charmers-tactical-manual) 38 | for information on how we do: 39 | 40 | - Testing 41 | - Pull Requests 42 | - Code Style 43 | - Documentation 44 | 45 | ### Development Environment Setup 46 | 47 | You can set up your dev environment with: 48 | 49 | ```sh 50 | git clone git@github.com:ethereum/ddht.git 51 | cd ddht 52 | virtualenv -p python3 venv 53 | . venv/bin/activate 54 | pip install -e .[dev] 55 | ``` 56 | 57 | ### Testing Setup 58 | 59 | During development, you might like to have tests run on every file save. 60 | 61 | Show flake8 errors on file change: 62 | 63 | ```sh 64 | # Test flake8 65 | when-changed -v -s -r -1 ddht/ tests/ -c "clear; flake8 ddht tests && echo 'flake8 success' || echo 'error'" 66 | ``` 67 | 68 | Run multi-process tests in one command, but without color: 69 | 70 | ```sh 71 | # in the project root: 72 | pytest --numprocesses=4 --looponfail --maxfail=1 73 | # the same thing, succinctly: 74 | pytest -n 4 -f --maxfail=1 75 | ``` 76 | 77 | Run in one thread, with color and desktop notifications: 78 | 79 | ```sh 80 | cd venv 81 | ptw --onfail "notify-send -t 5000 'Test failure ⚠⚠⚠⚠⚠' 'python 3 test on ddht failed'" ../tests ../ddht 82 | ``` 83 | 84 | ### Release setup 85 | 86 | For Debian-like systems: 87 | ``` 88 | apt install pandoc 89 | ``` 90 | 91 | To release a new version: 92 | 93 | ```sh 94 | make release bump=$$VERSION_PART_TO_BUMP$$ 95 | ``` 96 | 97 | #### How to bumpversion 98 | 99 | The version format for this repo is `{major}.{minor}.{patch}` for stable, and 100 | `{major}.{minor}.{patch}-{stage}.{devnum}` for unstable (`stage` can be alpha or beta). 101 | 102 | To issue the next version in line, specify which part to bump, 103 | like `make release bump=minor` or `make release bump=devnum`. This is typically done from the 104 | master branch, except when releasing a beta (in which case the beta is released from master, 105 | and the previous stable branch is released from said branch). 106 | 107 | If you are in a beta version, `make release bump=stage` will switch to a stable. 108 | 109 | To issue an unstable version when the current version is stable, specify the 110 | new version explicitly, like `make release bump="--new-version 4.0.0-alpha.1 devnum"` 111 | -------------------------------------------------------------------------------- /ddht/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.0.1-alpha.0" 2 | -------------------------------------------------------------------------------- /ddht/_boot.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING, AsyncIterator 2 | 3 | from async_service import Service, TrioManager 4 | import trio 5 | 6 | if TYPE_CHECKING: 7 | import signal # noqa: F401 8 | 9 | 10 | async def _main() -> None: 11 | from ddht.main import main 12 | 13 | await main() 14 | 15 | 16 | class BootService(Service): 17 | async def run(self) -> None: 18 | import signal # noqa: F811 19 | 20 | with trio.open_signal_receiver(signal.SIGTERM, signal.SIGINT) as signal_aiter: 21 | ready = trio.Event() 22 | self.manager.run_daemon_task(self._monitor_signals, ready, signal_aiter) 23 | # this is needed to give the async iterable time to be entered. 24 | await ready.wait() 25 | 26 | # imports are triggered at this stage. 27 | await _main() 28 | 29 | import logging 30 | 31 | logger = logging.getLogger("ddht") 32 | logger.info("Stopping: Application Exited") 33 | self.manager.cancel() 34 | 35 | async def _monitor_signals( 36 | self, ready: trio.Event, signal_aiter: AsyncIterator["signal.Signals"] 37 | ) -> None: 38 | import logging 39 | import signal # noqa: F811 40 | 41 | ready.set() 42 | async for sig in signal_aiter: 43 | logger = logging.getLogger() 44 | 45 | if sig == signal.SIGTERM: 46 | logger.info("Stopping: SIGTERM") 47 | elif sig == signal.SIGINT: 48 | logger.info("Stopping: CTRL+C") 49 | else: 50 | logger.error("Stopping: unexpected signal: %s:%s", sig.value, sig.name) 51 | 52 | self.manager.cancel() 53 | 54 | 55 | def _boot() -> None: 56 | try: 57 | manager = TrioManager(BootService()) 58 | 59 | trio.run(manager.run) 60 | except KeyboardInterrupt: 61 | import logging 62 | import sys 63 | 64 | logger = logging.getLogger() 65 | logger.info("Stopping: Fast CTRL+C") 66 | sys.exit(2) 67 | else: 68 | import sys 69 | 70 | sys.exit(0) 71 | 72 | 73 | if __name__ == "__main__": 74 | _boot() 75 | -------------------------------------------------------------------------------- /ddht/app.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | 4 | from async_service import Service 5 | 6 | from ddht.abc import ApplicationAPI 7 | from ddht.boot_info import BootInfo 8 | 9 | 10 | class BaseApplication(Service, ApplicationAPI): 11 | logger = logging.getLogger("ddht.DDHT") 12 | 13 | _args: argparse.Namespace 14 | _boot_info: BootInfo 15 | 16 | def __init__(self, args: argparse.Namespace, boot_info: BootInfo) -> None: 17 | self._args = args 18 | self._boot_info = boot_info 19 | -------------------------------------------------------------------------------- /ddht/base_message.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Any, Generic, Optional, TypeVar 3 | 4 | from eth_typing import NodeID 5 | from eth_utils import int_to_big_endian 6 | import rlp 7 | 8 | from ddht.endpoint import Endpoint 9 | 10 | 11 | class BaseMessage(rlp.Serializable): # type: ignore 12 | message_type: int 13 | 14 | def to_bytes(self) -> bytes: 15 | return b"".join((int_to_big_endian(self.message_type), rlp.encode(self))) 16 | 17 | 18 | class EmptyMessage(BaseMessage): 19 | def to_bytes(self) -> bytes: 20 | return b"" 21 | 22 | 23 | TMessage = TypeVar("TMessage") 24 | TBaseMessage = TypeVar("TBaseMessage", bound=BaseMessage) 25 | TResponseMessage = TypeVar("TResponseMessage", bound=BaseMessage) 26 | 27 | 28 | @dataclass(frozen=True) 29 | class OutboundMessage(Generic[TMessage]): 30 | message: BaseMessage 31 | receiver_endpoint: Endpoint 32 | receiver_node_id: NodeID 33 | 34 | def __str__(self) -> str: 35 | return f"{self.__class__.__name__}[{self.message.__class__.__name__}]" 36 | 37 | 38 | @dataclass(frozen=True) 39 | class InboundMessage(Generic[TMessage]): 40 | message: TMessage 41 | sender_endpoint: Endpoint 42 | sender_node_id: NodeID 43 | explicit_request_id: Optional[bytes] = None 44 | 45 | @property 46 | def request_id(self) -> bytes: 47 | """ 48 | Return the ``request_id`` for this message. 49 | 50 | This API exists to allow this class to be used with both base-protocol 51 | messages which contain the ``request_id`` as well as with 52 | TALKREQ/TALKRESP based sub-protocol messages. In the TALKREQ/TALKRESP 53 | case the ``request_id`` is only present on the base protocol message 54 | and needs to be duplicated into the sub-protocol message. This is 55 | integral to being able to use the ``SubscriptionManagerAPI`` with both 56 | the base protocol and sub-protocols. 57 | """ 58 | if self.explicit_request_id is not None: 59 | return self.explicit_request_id 60 | elif hasattr(self.message, "request_id"): 61 | return self.message.request_id # type: ignore 62 | else: 63 | raise AttributeError( 64 | f"No explicit request_id and message does not have a " 65 | f"`request_id` property: {self.message}" 66 | ) 67 | 68 | def __str__(self) -> str: 69 | return f"{self.__class__.__name__}[{self.message.__class__.__name__}]" 70 | 71 | def to_response( 72 | self, response_message: TResponseMessage 73 | ) -> OutboundMessage[TResponseMessage]: 74 | return OutboundMessage( 75 | message=response_message, 76 | receiver_endpoint=self.sender_endpoint, 77 | receiver_node_id=self.sender_node_id, 78 | ) 79 | 80 | 81 | AnyInboundMessage = InboundMessage[Any] 82 | AnyOutboundMessage = OutboundMessage[Any] 83 | -------------------------------------------------------------------------------- /ddht/boot_info.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from dataclasses import dataclass 3 | import pathlib 4 | import tempfile 5 | from typing import Optional, Sequence, Tuple, TypedDict 6 | 7 | from eth_enr import ENR 8 | from eth_enr.abc import ENRAPI 9 | from eth_keys import keys 10 | from eth_utils import decode_hex 11 | 12 | from ddht._utils import get_open_port 13 | from ddht.constants import DEFAULT_PORT, ProtocolVersion 14 | from ddht.typing import AnyIPAddress 15 | from ddht.v5.constants import DEFAULT_BOOTNODES as DEFAULT_V5_BOOTNODES 16 | from ddht.v5_1.constants import DEFAULT_BOOTNODES as DEFAULT_V51_BOOTNODES 17 | from ddht.xdg import get_xdg_ddht_root 18 | 19 | 20 | class BootInfoKwargs(TypedDict, total=False): 21 | protocol_version: ProtocolVersion 22 | base_dir: pathlib.Path 23 | port: int 24 | listen_on: Optional[AnyIPAddress] 25 | bootnodes: Tuple[ENRAPI, ...] 26 | private_key: Optional[keys.PrivateKey] 27 | is_ephemeral: bool 28 | is_upnp_enabled: bool 29 | is_rpc_enabled: bool 30 | ipc_path: pathlib.Path 31 | 32 | 33 | def _cli_args_to_boot_info_kwargs(args: argparse.Namespace) -> BootInfoKwargs: 34 | protocol_version = args.protocol_version 35 | 36 | is_ephemeral = args.ephemeral is True 37 | is_upnp_enabled = not args.disable_upnp 38 | 39 | if args.base_dir is not None: 40 | base_dir = args.base_dir.expanduser().resolve() 41 | elif is_ephemeral: 42 | base_dir = pathlib.Path(tempfile.TemporaryDirectory().name) 43 | else: 44 | base_dir = get_xdg_ddht_root() 45 | 46 | if args.port is not None: 47 | port = args.port 48 | elif is_ephemeral: 49 | port = get_open_port() 50 | else: 51 | port = DEFAULT_PORT 52 | 53 | listen_on: Optional[AnyIPAddress] 54 | 55 | if args.listen_address is None: 56 | listen_on = None 57 | else: 58 | listen_on = args.listen_address 59 | 60 | if args.bootnodes is None: 61 | if protocol_version is ProtocolVersion.v5: 62 | bootnodes = tuple( 63 | ENR.from_repr(enr_repr) for enr_repr in DEFAULT_V5_BOOTNODES 64 | ) 65 | elif protocol_version is ProtocolVersion.v5_1: 66 | bootnodes = tuple( 67 | ENR.from_repr(enr_repr) for enr_repr in DEFAULT_V51_BOOTNODES 68 | ) 69 | else: 70 | raise Exception(f"Unsupported protocol version: {protocol_version}") 71 | else: 72 | bootnodes = args.bootnodes 73 | 74 | private_key: Optional[keys.PrivateKey] 75 | 76 | if args.private_key is not None: 77 | private_key = keys.PrivateKey(decode_hex(args.private_key)) 78 | else: 79 | private_key = None 80 | 81 | ipc_path: pathlib.Path 82 | 83 | if args.ipc_path is not None: 84 | ipc_path = args.ipc_path 85 | else: 86 | ipc_path = base_dir / "jsonrpc.ipc" 87 | 88 | is_rpc_enabled = args.disable_jsonrpc is not True 89 | 90 | return BootInfoKwargs( 91 | protocol_version=protocol_version, 92 | base_dir=base_dir, 93 | port=port, 94 | listen_on=listen_on, 95 | bootnodes=bootnodes, 96 | private_key=private_key, 97 | is_ephemeral=is_ephemeral, 98 | is_upnp_enabled=is_upnp_enabled, 99 | is_rpc_enabled=is_rpc_enabled, 100 | ipc_path=ipc_path, 101 | ) 102 | 103 | 104 | @dataclass(frozen=True) 105 | class BootInfo: 106 | protocol_version: ProtocolVersion 107 | base_dir: pathlib.Path 108 | port: int 109 | listen_on: Optional[AnyIPAddress] 110 | bootnodes: Tuple[ENRAPI, ...] 111 | private_key: Optional[keys.PrivateKey] 112 | is_ephemeral: bool 113 | is_upnp_enabled: bool 114 | is_rpc_enabled: bool 115 | ipc_path: pathlib.Path 116 | 117 | @classmethod 118 | def from_cli_args(cls, args: Sequence[str]) -> "BootInfo": 119 | # Import here to prevent circular imports 120 | from ddht.cli_parser import parser 121 | 122 | namespace = parser.parse_args(args) 123 | return cls.from_namespace(namespace) 124 | 125 | @classmethod 126 | def from_namespace(cls, args: argparse.Namespace) -> "BootInfo": 127 | kwargs = _cli_args_to_boot_info_kwargs(args) 128 | return cls(**kwargs) 129 | -------------------------------------------------------------------------------- /ddht/canary.py: -------------------------------------------------------------------------------- 1 | from async_service import Service 2 | from eth_utils import get_extended_debug_logger 3 | import trio 4 | 5 | WAKEUP_INTERVAL = 2 6 | DELAY_DEBUG = 0.1 7 | DELAY_WARNING = 1 8 | 9 | 10 | class Canary(Service): 11 | logger = get_extended_debug_logger("ddht.Canary") 12 | 13 | async def run(self) -> None: 14 | while True: 15 | start_at = trio.current_time() 16 | await trio.sleep(WAKEUP_INTERVAL) 17 | elapsed = trio.current_time() - start_at 18 | delay = elapsed - WAKEUP_INTERVAL 19 | self.logger.debug2("Loop monitoring task called; delay=%.3fs", delay) 20 | 21 | if delay < DELAY_DEBUG: 22 | continue 23 | 24 | if delay >= DELAY_WARNING: 25 | log_fn = self.logger.warning 26 | else: 27 | log_fn = self.logger.debug 28 | 29 | stats = trio.lowlevel.current_statistics() 30 | log_fn( 31 | "Event loop blocked or overloaded: delay=%.3fs, tasks=%d, stats=%s", 32 | delay, 33 | stats.tasks_living, 34 | stats.io_statistics, 35 | ) 36 | -------------------------------------------------------------------------------- /ddht/cli_commands.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | 4 | from async_service import background_trio_service, run_trio_service 5 | import trio 6 | 7 | from ddht.abc import ApplicationAPI 8 | from ddht.boot_info import BootInfo 9 | from ddht.constants import ProtocolVersion 10 | from ddht.v5.app import Application as ApplicationV5 11 | from ddht.v5.crawl import Crawler as CrawlerV5 12 | from ddht.v5_1.alexandria.app import AlexandriaApplication 13 | from ddht.v5_1.app import Application as ApplicationV5_1 14 | from ddht.v5_1.crawler import Crawler as CrawlerV51 15 | 16 | logger = logging.getLogger("ddht") 17 | 18 | 19 | async def do_main(args: argparse.Namespace, boot_info: BootInfo) -> None: 20 | app: ApplicationAPI 21 | 22 | if boot_info.protocol_version is ProtocolVersion.v5: 23 | app = ApplicationV5(args, boot_info) 24 | elif boot_info.protocol_version is ProtocolVersion.v5_1: 25 | app = ApplicationV5_1(args, boot_info) 26 | else: 27 | raise Exception(f"Unsupported protocol version: {boot_info.protocol_version}") 28 | 29 | async with background_trio_service(app) as manager: 30 | await manager.wait_finished() 31 | 32 | 33 | async def do_crawl(args: argparse.Namespace, boot_info: BootInfo) -> None: 34 | if boot_info.protocol_version is ProtocolVersion.v5: 35 | crawler_v5 = CrawlerV5(args, boot_info) 36 | 37 | await run_trio_service(crawler_v5) 38 | elif boot_info.protocol_version is ProtocolVersion.v5_1: 39 | app = ApplicationV5_1(args, boot_info) 40 | async with background_trio_service(app): 41 | await app.wait_ready() 42 | await app.network.events.session_handshake_complete.wait() 43 | await trio.sleep(1) 44 | crawler_v51 = CrawlerV51( 45 | network=app.network, concurrency=args.crawl_concurrency, # type: ignore 46 | ) 47 | await run_trio_service(crawler_v51) 48 | else: 49 | raise ValueError(f"Unsupported protocol version: {boot_info.protocol_version}") 50 | 51 | 52 | async def do_alexandria(args: argparse.Namespace, boot_info: BootInfo) -> None: 53 | app = AlexandriaApplication(args, boot_info) 54 | 55 | async with background_trio_service(app) as manager: 56 | await manager.wait_finished() 57 | -------------------------------------------------------------------------------- /ddht/constants.py: -------------------------------------------------------------------------------- 1 | import enum 2 | import ipaddress 3 | 4 | # Default port number 5 | DEFAULT_PORT: int = 30303 6 | 7 | # Default listen address 8 | DEFAULT_LISTEN: ipaddress.IPv4Address = ipaddress.ip_address("0.0.0.0") 9 | 10 | # Max size of discovery packets. 11 | DISCOVERY_MAX_PACKET_SIZE = 1280 12 | 13 | # Buffer size used for inbound discovery UDP datagrams (must be larger than 14 | # DISCOVERY_MAX_PACKET_SIZE) 15 | DISCOVERY_DATAGRAM_BUFFER_SIZE = DISCOVERY_MAX_PACKET_SIZE * 2 16 | 17 | NEIGHBOURS_RESPONSE_ITEMS = 16 18 | AES128_KEY_SIZE = 16 # size of an AES218 key 19 | HKDF_INFO = b"discovery v5 key agreement" 20 | ENR_REPR_PREFIX = "enr:" # prefix used when printing an ENR 21 | MAX_ENR_SIZE = 300 # maximum allowed size of an ENR 22 | IP_V4_ADDRESS_ENR_KEY = b"ip" 23 | UDP_PORT_ENR_KEY = b"udp" 24 | TCP_PORT_ENR_KEY = b"tcp" 25 | IP_V4_SIZE = 4 # size of an IPv4 address 26 | IP_V6_SIZE = 16 # size of an IPv6 address 27 | NUM_ROUTING_TABLE_BUCKETS = 256 # number of buckets in the routing table 28 | ROUTING_TABLE_BUCKET_SIZE = 16 29 | 30 | 31 | class ProtocolVersion(enum.Enum): 32 | v5 = "v5" 33 | v5_1 = "v5.1" 34 | 35 | 36 | UINT8_TO_BYTES = {v: bytes([v]) for v in range(256)} 37 | -------------------------------------------------------------------------------- /ddht/datagram.py: -------------------------------------------------------------------------------- 1 | from socket import inet_aton, inet_ntoa 2 | from typing import NamedTuple 3 | 4 | from async_service import ManagerAPI, as_service 5 | from eth_utils import get_extended_debug_logger 6 | import trio 7 | from trio.abc import ReceiveChannel, SendChannel 8 | from trio.socket import SocketType 9 | 10 | from ddht.constants import DISCOVERY_DATAGRAM_BUFFER_SIZE 11 | from ddht.endpoint import Endpoint 12 | 13 | 14 | # 15 | # Data structures 16 | # 17 | class InboundDatagram(NamedTuple): 18 | datagram: bytes 19 | sender_endpoint: Endpoint 20 | 21 | 22 | class OutboundDatagram(NamedTuple): 23 | datagram: bytes 24 | receiver_endpoint: Endpoint 25 | 26 | 27 | # 28 | # UDP 29 | # 30 | @as_service 31 | async def DatagramReceiver( 32 | manager: ManagerAPI, 33 | sock: SocketType, 34 | inbound_datagram_send_channel: SendChannel[InboundDatagram], 35 | ) -> None: 36 | """Read datagrams from a socket and send them to a channel.""" 37 | logger = get_extended_debug_logger("ddht.DatagramReceiver") 38 | 39 | async with inbound_datagram_send_channel: 40 | while manager.is_running: 41 | datagram, (ip_address, port) = await sock.recvfrom( 42 | DISCOVERY_DATAGRAM_BUFFER_SIZE 43 | ) 44 | endpoint = Endpoint(inet_aton(ip_address), port) 45 | logger.debug2("Received %d bytes from %s", len(datagram), endpoint) 46 | inbound_datagram = InboundDatagram(datagram, endpoint) 47 | try: 48 | await inbound_datagram_send_channel.send(inbound_datagram) 49 | except trio.BrokenResourceError: 50 | logger.debug( 51 | "DatagramReceiver exiting due to `trio.BrokenResourceError`" 52 | ) 53 | manager.cancel() 54 | return 55 | 56 | 57 | async def send_datagram(sock: SocketType, datagram: bytes, endpoint: Endpoint) -> None: 58 | await sock.sendto(datagram, (inet_ntoa(endpoint.ip_address), endpoint.port)) 59 | 60 | 61 | @as_service 62 | async def DatagramSender( 63 | manager: ManagerAPI, 64 | outbound_datagram_receive_channel: ReceiveChannel[OutboundDatagram], 65 | sock: SocketType, 66 | ) -> None: 67 | """Take datagrams from a channel and send them via a socket to their designated receivers.""" 68 | logger = get_extended_debug_logger("ddht.DatagramSender") 69 | 70 | async with outbound_datagram_receive_channel: 71 | async for datagram, endpoint in outbound_datagram_receive_channel: 72 | await send_datagram(sock, datagram, endpoint) 73 | logger.debug2("Sending %d bytes to %s", len(datagram), endpoint) 74 | -------------------------------------------------------------------------------- /ddht/encryption.py: -------------------------------------------------------------------------------- 1 | import math 2 | from typing import Iterator 3 | 4 | from cryptography.exceptions import InvalidTag 5 | from cryptography.hazmat.backends import default_backend 6 | from cryptography.hazmat.primitives.ciphers import Cipher 7 | from cryptography.hazmat.primitives.ciphers.aead import AESGCM 8 | from cryptography.hazmat.primitives.ciphers.algorithms import AES 9 | from cryptography.hazmat.primitives.ciphers.modes import CTR 10 | 11 | from ddht.constants import AES128_KEY_SIZE 12 | from ddht.exceptions import DecryptionError 13 | from ddht.typing import AES128Key, Nonce 14 | from ddht.v5.constants import NONCE_SIZE 15 | from ddht.validation import validate_length 16 | 17 | 18 | def validate_aes128_key(key: AES128Key) -> None: 19 | validate_length(key, AES128_KEY_SIZE, "AES128 key") 20 | 21 | 22 | def validate_nonce(nonce: bytes) -> None: 23 | validate_length(nonce, NONCE_SIZE, "nonce") 24 | 25 | 26 | def aesgcm_encrypt( 27 | key: AES128Key, nonce: Nonce, plain_text: bytes, authenticated_data: bytes, 28 | ) -> bytes: 29 | validate_aes128_key(key) 30 | validate_nonce(nonce) 31 | 32 | aesgcm = AESGCM(key) 33 | cipher_text = aesgcm.encrypt(nonce, plain_text, authenticated_data) 34 | return cipher_text 35 | 36 | 37 | def aesgcm_decrypt( 38 | key: AES128Key, nonce: Nonce, cipher_text: bytes, authenticated_data: bytes, 39 | ) -> bytes: 40 | validate_aes128_key(key) 41 | validate_nonce(nonce) 42 | 43 | aesgcm = AESGCM(key) 44 | try: 45 | plain_text = aesgcm.decrypt(nonce, cipher_text, authenticated_data) 46 | except InvalidTag as error: 47 | raise DecryptionError(str(error)) from error 48 | else: 49 | return plain_text 50 | 51 | 52 | def aesctr_encrypt(key: AES128Key, iv: bytes, plain_text: bytes) -> bytes: 53 | cipher = Cipher(AES(key), CTR(iv), backend=default_backend()) 54 | encryptor = cipher.encryptor() 55 | return encryptor.update(plain_text) + encryptor.finalize() 56 | 57 | 58 | def aesctr_decrypt_stream( 59 | key: AES128Key, iv: bytes, cipher_text: bytes 60 | ) -> Iterator[int]: 61 | aes_key = AES(key) 62 | ctr = CTR(iv) 63 | 64 | try: 65 | cipher = Cipher(aes_key, ctr, backend=default_backend()) 66 | except ValueError as err: 67 | raise DecryptionError(str(err)) from err 68 | 69 | decryptor = cipher.decryptor() 70 | num_blocks = int(math.ceil(len(cipher_text) / 16)) 71 | for i in range(num_blocks): 72 | cipher_text_block = cipher_text[i * 16 : (i + 1) * 16] 73 | plain_text_block = decryptor.update(cipher_text_block) 74 | yield from plain_text_block 75 | yield from decryptor.finalize() 76 | 77 | 78 | def aesctr_decrypt(key: AES128Key, iv: bytes, cipher_text: bytes) -> bytes: 79 | return bytes(aesctr_decrypt_stream(key, iv, cipher_text)) 80 | -------------------------------------------------------------------------------- /ddht/endpoint.py: -------------------------------------------------------------------------------- 1 | from socket import inet_ntoa 2 | from typing import NamedTuple 3 | 4 | from eth_enr import ENRAPI 5 | from eth_enr.constants import IP_V4_ADDRESS_ENR_KEY, UDP_PORT_ENR_KEY 6 | 7 | from ddht.exceptions import MissingEndpointFields 8 | 9 | 10 | class Endpoint(NamedTuple): 11 | ip_address: bytes 12 | port: int 13 | 14 | def __str__(self) -> str: 15 | return f"{inet_ntoa(self.ip_address)}:{self.port}" 16 | 17 | @classmethod 18 | def from_enr(self, enr: ENRAPI) -> "Endpoint": 19 | try: 20 | ip_address = enr[IP_V4_ADDRESS_ENR_KEY] 21 | port = enr[UDP_PORT_ENR_KEY] 22 | except KeyError as err: 23 | raise MissingEndpointFields(f"Missing endpoint address information: {err}") 24 | 25 | return Endpoint(ip_address, port) 26 | -------------------------------------------------------------------------------- /ddht/enr.py: -------------------------------------------------------------------------------- 1 | from typing import Iterable, Sequence, Tuple 2 | 3 | from eth_enr import ENRAPI 4 | from eth_enr.constants import MAX_ENR_SIZE 5 | from eth_enr.sedes import ENRSedes 6 | import rlp 7 | from rlp.sedes import CountableList 8 | 9 | 10 | def _partition_enrs( 11 | enrs: Sequence[ENRAPI], max_payload_size: int 12 | ) -> Iterable[Tuple[ENRAPI, ...]]: 13 | num_records = len(enrs) 14 | min_records = max_payload_size // MAX_ENR_SIZE 15 | left = 0 16 | right = min_records 17 | 18 | while right < num_records: 19 | encoded = rlp.encode(enrs[left : right + 1], CountableList(ENRSedes)) 20 | if len(encoded) > max_payload_size: 21 | yield tuple(enrs[left:right]) 22 | left, right = right, right + min_records 23 | continue 24 | else: 25 | right += 1 26 | 27 | yield tuple(enrs[left:right]) 28 | 29 | 30 | def partition_enrs( 31 | enrs: Sequence[ENRAPI], max_payload_size: int 32 | ) -> Tuple[Tuple[ENRAPI, ...], ...]: 33 | """ 34 | Partition a list of ENRs to groups to be sent in separate NODES messages. 35 | 36 | The goal is to send as few messages as possible, but each message must not exceed the maximum 37 | allowed size. 38 | 39 | If a single ENR exceeds the maximum payload size, it will be dropped. 40 | """ 41 | if max_payload_size < MAX_ENR_SIZE: 42 | raise ValueError( 43 | "Cannot parition ENR records under the max singular ENR record size" 44 | ) 45 | 46 | return tuple(_partition_enrs(enrs, max_payload_size)) 47 | -------------------------------------------------------------------------------- /ddht/event.py: -------------------------------------------------------------------------------- 1 | from typing import AsyncIterator, Set 2 | 3 | from async_generator import asynccontextmanager 4 | from eth_utils import get_extended_debug_logger 5 | import trio 6 | 7 | from ddht.abc import EventAPI, TEventPayload 8 | 9 | 10 | class Event(EventAPI[TEventPayload]): 11 | 12 | _channels: Set[trio.abc.SendChannel[TEventPayload]] 13 | 14 | def __init__(self, name: str, buffer_size: int = 256) -> None: 15 | self.logger = get_extended_debug_logger("ddht.Event") 16 | self.name = name 17 | self._buffer_size = buffer_size 18 | self._lock = trio.Lock() 19 | self._channels = set() 20 | 21 | async def trigger(self, payload: TEventPayload) -> None: 22 | self.logger.debug2("%s: triggered: %s", self.name, payload) 23 | if not self._channels: 24 | return 25 | async with self._lock: 26 | for send_channel in self._channels: 27 | try: 28 | await send_channel.send(payload) 29 | except trio.BrokenResourceError: 30 | pass 31 | 32 | def trigger_nowait(self, payload: TEventPayload) -> None: 33 | self.logger.debug2("%s: triggered: %s", self.name, payload) 34 | for send_channel in self._channels: 35 | try: 36 | send_channel.send_nowait(payload) # type: ignore 37 | except trio.BrokenResourceError: 38 | pass 39 | 40 | @asynccontextmanager 41 | async def subscribe(self) -> AsyncIterator[trio.abc.ReceiveChannel[TEventPayload]]: 42 | send_channel, receive_channel = trio.open_memory_channel[TEventPayload]( 43 | self._buffer_size 44 | ) 45 | 46 | async with self._lock: 47 | self._channels.add(send_channel) 48 | 49 | try: 50 | async with receive_channel: 51 | yield receive_channel 52 | finally: 53 | async with self._lock: 54 | self._channels.remove(send_channel) 55 | 56 | @asynccontextmanager 57 | async def subscribe_and_wait(self) -> AsyncIterator[None]: 58 | async with self.subscribe() as subscription: 59 | yield 60 | await subscription.receive() 61 | 62 | async def wait(self) -> TEventPayload: 63 | async with self.subscribe() as subscription: 64 | result = await subscription.receive() 65 | return result 66 | -------------------------------------------------------------------------------- /ddht/exceptions.py: -------------------------------------------------------------------------------- 1 | class BaseDDHTError(Exception): 2 | """ 3 | The base class for all Discovery-DHT errors. 4 | """ 5 | 6 | pass 7 | 8 | 9 | class DecodingError(BaseDDHTError): 10 | """ 11 | Raised when a datagram could not be decoded. 12 | """ 13 | 14 | pass 15 | 16 | 17 | class ParseError(BaseDDHTError): 18 | """ 19 | Raised as a generic error when trying to parse something. 20 | """ 21 | 22 | pass 23 | 24 | 25 | class DecryptionError(BaseDDHTError): 26 | """ 27 | Raised when a message could not be decrypted. 28 | """ 29 | 30 | pass 31 | 32 | 33 | class HandshakeFailure(BaseDDHTError): 34 | """ 35 | Raised when the protocol handshake was unsuccessful. 36 | """ 37 | 38 | pass 39 | 40 | 41 | class UnexpectedMessage(BaseDDHTError): 42 | """ 43 | Raised when the received message was unexpected. 44 | """ 45 | 46 | pass 47 | 48 | 49 | class DuplicateProtocol(BaseDDHTError): 50 | """ 51 | Raised when attempting to register a TALK protocol when one is already registered. 52 | """ 53 | 54 | pass 55 | 56 | 57 | class EmptyFindNodesResponse(BaseDDHTError): 58 | """ 59 | Raised when we ask a remote node for its ENR and it returns nothing. 60 | """ 61 | 62 | pass 63 | 64 | 65 | class MissingEndpointFields(BaseDDHTError): 66 | """ 67 | Raised when trying to extract and `Endpoint` from an ENR record when the 68 | ENR record is missing the necessary fields 69 | """ 70 | 71 | pass 72 | -------------------------------------------------------------------------------- /ddht/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import logging.handlers 3 | import os 4 | import pathlib 5 | import sys 6 | from typing import Dict, Optional, Tuple 7 | 8 | 9 | class DDHTFormatter(logging.Formatter): 10 | def format(self, record: logging.LogRecord) -> str: 11 | record.shortname = record.name.split(".")[-1] # type: ignore 12 | 13 | return super().format(record) 14 | 15 | 16 | LOG_FORMATTER = DDHTFormatter( 17 | fmt="%(levelname)8s %(asctime)s %(name)20s %(message)s" 18 | ) 19 | 20 | 21 | LOG_LEVEL_CHOICES = { 22 | "DEBUG": logging.DEBUG, 23 | "INFO": logging.INFO, 24 | "WARN": logging.WARNING, 25 | "WARNING": logging.WARNING, 26 | "ERROR": logging.ERROR, 27 | "CRITICAL": logging.CRITICAL, 28 | } 29 | 30 | 31 | def parse_raw_log_level(raw_level: str) -> int: 32 | if raw_level.upper() in LOG_LEVEL_CHOICES: 33 | return LOG_LEVEL_CHOICES[raw_level.upper()] 34 | 35 | raise Exception(f"Invalid log level: {raw_level}") 36 | 37 | 38 | def parse_log_level_spec(log_level_spec: str) -> Tuple[Optional[str], int]: 39 | raw_level, _, raw_path = log_level_spec.partition(":") 40 | level = parse_raw_log_level(raw_level) 41 | 42 | # if there is no colon then this is a spec for the root logger and path will be '' 43 | path = raw_path if raw_path != "" else None 44 | return path, level 45 | 46 | 47 | def environment_to_log_levels(raw_levels: Optional[str]) -> Dict[Optional[str], int]: 48 | if raw_levels is None: 49 | return dict() 50 | 51 | levels = dict() 52 | for raw_level in raw_levels.split(","): 53 | path, level = parse_log_level_spec(raw_level) 54 | levels[path] = level 55 | 56 | return levels 57 | 58 | 59 | def setup_logging( 60 | logfile: pathlib.Path, file_log_level: int, stderr_level: int = None 61 | ) -> None: 62 | if stderr_level is None: 63 | stderr_level = logging.INFO 64 | if file_log_level is None: 65 | file_log_level = logging.DEBUG 66 | 67 | raw_environ_log_levels = os.environ.get("LOGLEVEL", None) 68 | environ_log_levels = environment_to_log_levels(raw_environ_log_levels) 69 | 70 | for path, level in environ_log_levels.items(): 71 | logging.getLogger(path).setLevel(level) 72 | 73 | logger = logging.getLogger() 74 | logger.setLevel(min(stderr_level, file_log_level)) 75 | 76 | setup_stderr_logging(stderr_level) 77 | setup_file_logging(logfile, file_log_level) 78 | 79 | 80 | def setup_stderr_logging(level: int) -> logging.StreamHandler: 81 | handler_stream = logging.StreamHandler(sys.stderr) 82 | handler_stream.setLevel(level) 83 | handler_stream.setFormatter(LOG_FORMATTER) 84 | 85 | logger = logging.getLogger() 86 | logger.addHandler(handler_stream) 87 | 88 | return handler_stream 89 | 90 | 91 | def setup_file_logging( 92 | logfile: pathlib.Path, level: int 93 | ) -> logging.handlers.RotatingFileHandler: 94 | 95 | ten_MB = 10 * 1024 * 1024 96 | 97 | handler_file = logging.handlers.RotatingFileHandler( 98 | logfile, 99 | maxBytes=ten_MB, 100 | backupCount=5, 101 | delay=True, # don't open the file until we try to write a logline 102 | ) 103 | handler_file.setLevel(level) 104 | handler_file.setFormatter(LOG_FORMATTER) 105 | 106 | if logfile.exists(): 107 | # begin a new file every time we launch, this makes debugging much easier 108 | handler_file.doRollover() 109 | 110 | logger = logging.getLogger() 111 | logger.addHandler(handler_file) 112 | 113 | return handler_file 114 | -------------------------------------------------------------------------------- /ddht/main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import shutil 4 | 5 | from ddht.boot_info import BootInfo 6 | from ddht.cli_parser import parser 7 | from ddht.logging import setup_logging 8 | from ddht.xdg import get_xdg_data_home, get_xdg_ddht_root 9 | 10 | DDHT_HEADER = "\n".join( 11 | ( 12 | "", 13 | r"-. .-. .-. .-. .-. .-.", 14 | r" \ D \ / \ H \ / \\", 15 | r" / \ \ D / \ \ T / \\", 16 | r"~ `-~ `-` `-~ `-` `-", 17 | "", 18 | ) 19 | ) 20 | 21 | 22 | logger = logging.getLogger("ddht") 23 | 24 | 25 | async def main() -> None: 26 | args = parser.parse_args() 27 | 28 | boot_info = BootInfo.from_namespace(args) 29 | 30 | if not boot_info.base_dir.exists(): 31 | if boot_info.is_ephemeral or get_xdg_data_home() in boot_info.base_dir.parents: 32 | boot_info.base_dir.mkdir(exist_ok=True) 33 | else: 34 | raise FileNotFoundError( 35 | "Not creating DDHT root directory as it is not present and is " 36 | f"not under the $XDG_DATA_HOME: {boot_info.base_dir}" 37 | ) 38 | 39 | if args.log_file is None: 40 | logdir = log_file = get_xdg_ddht_root() / "logs" 41 | logdir.mkdir(parents=True, exist_ok=True) 42 | 43 | log_file = logdir / "ddht.log" 44 | else: 45 | log_file = args.log_file 46 | 47 | setup_logging(log_file, args.log_level_file, args.log_level_stderr) 48 | 49 | logger.info(DDHT_HEADER) 50 | 51 | logger.info("Started main process (pid=%d)", os.getpid()) 52 | 53 | try: 54 | await args.func(args, boot_info) 55 | finally: 56 | if boot_info.is_ephemeral: 57 | shutil.rmtree(boot_info.base_dir) 58 | -------------------------------------------------------------------------------- /ddht/message_registry.py: -------------------------------------------------------------------------------- 1 | from typing import Type 2 | 3 | from ddht.abc import MessageTypeRegistryAPI 4 | from ddht.base_message import BaseMessage 5 | 6 | 7 | class MessageTypeRegistry(MessageTypeRegistryAPI): 8 | def register(self, message_data_class: Type[BaseMessage]) -> Type[BaseMessage]: 9 | """Class Decorator to register BaseMessage classes.""" 10 | message_type = message_data_class.message_type 11 | if message_type is None: 12 | raise ValueError("Message type must be defined") 13 | 14 | if message_type in self: 15 | raise ValueError(f"Message with type {message_type} is already defined") 16 | 17 | if not self: 18 | expected_message_type = 1 19 | else: 20 | expected_message_type = max(self.keys()) + 1 21 | 22 | if not message_type == expected_message_type: 23 | raise ValueError( 24 | f"Expected message type {expected_message_type}, but got {message_type}" 25 | ) 26 | 27 | self[message_type] = message_data_class 28 | 29 | return message_data_class 30 | 31 | def get_message_id(self, message_data_class: Type[BaseMessage]) -> int: 32 | for message_id, message_type in self.items(): 33 | if message_data_class is message_type: 34 | return message_id 35 | else: 36 | raise KeyError(message_data_class) 37 | -------------------------------------------------------------------------------- /ddht/request_tracker.py: -------------------------------------------------------------------------------- 1 | from contextlib import contextmanager 2 | import secrets 3 | from typing import Iterator, Optional, Set, Tuple 4 | 5 | from eth_typing import NodeID 6 | 7 | from ddht.abc import RequestTrackerAPI 8 | 9 | MAX_REQUEST_ID_ATTEMPTS = 3 10 | 11 | 12 | def _get_random_request_id() -> bytes: 13 | return secrets.token_bytes(4) 14 | 15 | 16 | class RequestTracker(RequestTrackerAPI): 17 | def __init__(self) -> None: 18 | self._reserved_request_ids: Set[Tuple[NodeID, bytes]] = set() 19 | 20 | def get_free_request_id(self, node_id: NodeID) -> bytes: 21 | for _ in range(MAX_REQUEST_ID_ATTEMPTS): 22 | request_id = _get_random_request_id() 23 | 24 | if (node_id, request_id) in self._reserved_request_ids: 25 | continue 26 | else: 27 | return request_id 28 | else: 29 | # The improbability of picking three already used request ids in a 30 | # row is sufficiently improbable that we can generally assume it 31 | # just will not ever happen (< 1/2**96) 32 | raise ValueError( 33 | f"Failed to get free request id ({len(self._reserved_request_ids)} " 34 | f"handlers added right now)" 35 | ) 36 | 37 | @contextmanager 38 | def reserve_request_id( 39 | self, node_id: NodeID, request_id: Optional[bytes] = None 40 | ) -> Iterator[bytes]: 41 | """ 42 | Reserve a `request_id` during the lifecycle of the context block. 43 | 44 | If a `request_id` is not provided, one will be generated lazily. 45 | 46 | .. note:: 47 | 48 | If an explicit `request_id` is provided, it is not guaranteed to be 49 | collision free. 50 | """ 51 | if request_id is None: 52 | request_id = self.get_free_request_id(node_id) 53 | try: 54 | self._reserved_request_ids.add((node_id, request_id)) 55 | yield request_id 56 | finally: 57 | self._reserved_request_ids.remove((node_id, request_id)) 58 | 59 | def is_request_id_active(self, node_id: NodeID, request_id: bytes) -> bool: 60 | return (node_id, request_id) in self._reserved_request_ids 61 | -------------------------------------------------------------------------------- /ddht/resource_queue.py: -------------------------------------------------------------------------------- 1 | import collections 2 | from typing import Any, AsyncIterator, Collection, Set 3 | 4 | from async_generator import asynccontextmanager 5 | import trio 6 | 7 | from ddht.abc import ResourceQueueAPI, TResource 8 | 9 | 10 | class RemoveResource(Exception): 11 | pass 12 | 13 | 14 | class ResourceQueue(ResourceQueueAPI[TResource]): 15 | """ 16 | Allow some set of "worker" processes to share the underlying resources, 17 | ensuring that any given resource is only in use by a single worker at any 18 | given time. 19 | 20 | .. code-block:: python 21 | 22 | things = (thing_1, thing_2, ...) 23 | queue = ResourceQueue(things) 24 | 25 | async def worker(): 26 | while True: 27 | # within the following async context block the `resource` is 28 | # reserved for this worker. 29 | async with queue.reserve() as resource: 30 | 31 | # We typically want to do something with the resource. 32 | do_work(resource) # do 33 | 34 | # We can remove the resource from the queue 35 | if done_with(resource): 36 | queue.remove() 37 | 38 | # We can add new resources to the queue 39 | if has_new_resources(resource): 40 | new_resource = ... # 41 | queue.add(new_resource) 42 | 43 | # Upon exiting the context block the resource is automatically 44 | # added back into the queue (unless it was explicitely 45 | # removed). 46 | """ 47 | 48 | resources: Set[TResource] 49 | 50 | def __init__(self, resources: Collection[TResource],) -> None: 51 | self.resources = set(resources) 52 | self._queue = collections.deque(self.resources) 53 | self._lock = trio.Lock() 54 | 55 | async def add(self, resource: TResource) -> None: 56 | if resource in self: 57 | return 58 | async with self._lock: 59 | self._queue.appendleft(resource) 60 | self.resources.add(resource) 61 | 62 | def __contains__(self, value: Any) -> bool: 63 | return value in self.resources 64 | 65 | def __len__(self) -> int: 66 | return len(self.resources) 67 | 68 | async def remove(self, resource: TResource) -> None: 69 | async with self._lock: 70 | self.resources.discard(resource) 71 | try: 72 | self._queue.remove(resource) 73 | except ValueError: 74 | pass 75 | 76 | @asynccontextmanager 77 | async def reserve(self) -> AsyncIterator[TResource]: 78 | # Fetch a new resource from the queue. If the resource is no longer 79 | # part of the tracked resources discard it and move onto the next 80 | # resource in the queue. 81 | while True: 82 | async with self._lock: 83 | try: 84 | resource = self._queue.pop() 85 | except IndexError: 86 | continue 87 | 88 | if resource in self: 89 | break 90 | else: 91 | continue 92 | 93 | try: 94 | yield resource 95 | finally: 96 | # The resource could have been removed during the context block so 97 | # only add it back to the queue if it is still part of the tracked 98 | # resources. 99 | async with self._lock: 100 | if resource in self: 101 | self._queue.appendleft(resource) 102 | -------------------------------------------------------------------------------- /ddht/sedes.py: -------------------------------------------------------------------------------- 1 | from rlp.sedes import Binary 2 | 3 | from ddht.constants import IP_V4_SIZE, IP_V6_SIZE 4 | 5 | 6 | # 7 | # Custom sedes objects 8 | # 9 | class IPAddressSedes(Binary): # type: ignore 10 | def __init__(self) -> None: 11 | super().__init__() 12 | 13 | def is_valid_length(self, length: int) -> bool: 14 | return length in (IP_V4_SIZE, IP_V6_SIZE) 15 | 16 | 17 | ip_address_sedes = IPAddressSedes() 18 | -------------------------------------------------------------------------------- /ddht/subscription_manager.py: -------------------------------------------------------------------------------- 1 | import collections 2 | from typing import Any, AsyncIterator, DefaultDict, NamedTuple, Optional, Set, Type 3 | 4 | from async_generator import asynccontextmanager 5 | from eth_typing import NodeID 6 | from eth_utils import get_extended_debug_logger 7 | import trio 8 | 9 | from ddht.abc import SubscriptionManagerAPI 10 | from ddht.base_message import AnyInboundMessage, InboundMessage, TMessage 11 | from ddht.endpoint import Endpoint 12 | 13 | 14 | class _Subcription(NamedTuple): 15 | send_channel: trio.abc.SendChannel[AnyInboundMessage] 16 | filter_by_endpoint: Optional[Endpoint] 17 | filter_by_node_id: Optional[NodeID] 18 | 19 | 20 | class SubscriptionManager(SubscriptionManagerAPI[TMessage]): 21 | _subscriptions: DefaultDict[Type[Any], Set[_Subcription]] 22 | 23 | def __init__(self) -> None: 24 | self.logger = get_extended_debug_logger("ddht.SubscriptionManager") 25 | 26 | self._subscriptions = collections.defaultdict(set) 27 | 28 | def feed_subscriptions(self, message: AnyInboundMessage) -> None: 29 | message_type = type(message.message) 30 | 31 | subscriptions = tuple(self._subscriptions[message_type]) 32 | self.logger.debug2( 33 | "Handling %d subscriptions for message: %s", len(subscriptions), message, 34 | ) 35 | for subscription in subscriptions: 36 | if subscription.filter_by_endpoint is not None: 37 | if message.sender_endpoint != subscription.filter_by_endpoint: 38 | continue 39 | if subscription.filter_by_node_id is not None: 40 | if message.sender_node_id != subscription.filter_by_node_id: 41 | continue 42 | 43 | try: 44 | subscription.send_channel.send_nowait(message) # type: ignore 45 | except trio.WouldBlock: 46 | self.logger.debug( 47 | "Discarding message for subscription %s due to full channel: %s", 48 | subscription, 49 | message, 50 | ) 51 | except trio.BrokenResourceError: 52 | pass 53 | 54 | @asynccontextmanager 55 | async def subscribe( 56 | self, 57 | message_type: Type[TMessage], 58 | endpoint: Optional[Endpoint] = None, 59 | node_id: Optional[NodeID] = None, 60 | ) -> AsyncIterator[trio.abc.ReceiveChannel[InboundMessage[TMessage]]]: 61 | send_channel, receive_channel = trio.open_memory_channel[ 62 | InboundMessage[TMessage] 63 | ](256) 64 | subscription = _Subcription(send_channel, endpoint, node_id) 65 | self._subscriptions[message_type].add(subscription) 66 | 67 | self.logger.debug2("Subscription setup for: %s", message_type) 68 | 69 | try: 70 | async with receive_channel: 71 | yield receive_channel 72 | finally: 73 | self._subscriptions[message_type].remove(subscription) 74 | -------------------------------------------------------------------------------- /ddht/token_bucket.py: -------------------------------------------------------------------------------- 1 | import time 2 | from typing import AsyncGenerator, Union 3 | 4 | import trio 5 | 6 | 7 | class NotEnoughTokens(Exception): 8 | """ 9 | Raised if the token bucket is empty when trying to take a token in blocking 10 | mode. 11 | """ 12 | 13 | pass 14 | 15 | 16 | class TokenBucket: 17 | def __init__(self, rate: Union[int, float], capacity: Union[int, float]) -> None: 18 | self._rate = rate 19 | self._capacity = capacity 20 | self._num_tokens = self._capacity 21 | self._last_refill = time.perf_counter() 22 | self._seconds_per_token = 1 / self._rate 23 | self._take_lock = trio.Lock() 24 | 25 | async def __aiter__(self) -> AsyncGenerator[None, None]: 26 | """ 27 | Can be used as an async iterator to limit the rate at which a loop can 28 | run. 29 | """ 30 | while True: 31 | await self.take() 32 | yield 33 | 34 | def get_num_tokens(self) -> float: 35 | """ 36 | Return the number of tokens current in the bucket. 37 | """ 38 | return max(0, self._get_num_tokens(time.perf_counter())) 39 | 40 | def _get_num_tokens(self, when: float) -> float: 41 | # Note that the implementation of the `take` method requires that this 42 | # function to allow negative results.. 43 | return min( 44 | self._capacity, 45 | self._num_tokens + (self._rate * (when - self._last_refill)), 46 | ) 47 | 48 | def _take(self, num: Union[int, float] = 1) -> None: 49 | now = time.perf_counter() 50 | if num < 0: 51 | raise ValueError("Cannot take negative token quantity") 52 | 53 | # refill the bucket 54 | self._num_tokens = self._get_num_tokens(now) 55 | self._last_refill = now 56 | 57 | # deduct the requested tokens. this operation is allowed to result in 58 | # a negative internal representation of the number of tokens in the 59 | # bucket. 60 | self._num_tokens -= num 61 | 62 | async def take(self, num: Union[int, float] = 1) -> None: 63 | """ 64 | Take `num` tokens out of the bucket. If the bucket does not have 65 | enough tokens, blocks until the bucket will be full enough to fulfill 66 | the request. 67 | """ 68 | # the lock ensures that we don't have two processes take from the 69 | # bucket at the same time while the inner sleep is happening 70 | async with self._take_lock: 71 | self._take(num) 72 | 73 | # if the bucket balance is negative, wait an amount of seconds 74 | # adequatet to fill it. Note that this requires that `_get_num_tokens` 75 | # be able to return a negative value. 76 | if self._num_tokens < 0: 77 | sleep_for = abs(self._num_tokens) * self._seconds_per_token 78 | await trio.sleep(sleep_for) 79 | 80 | def take_nowait(self, num: Union[int, float] = 1) -> None: 81 | # we calculate this value locally to ensure that in the case of not 82 | # having enough tokens the error message is accurate due to race 83 | # condition between calculating capacity and raising the error message. 84 | num_tokens = self.get_num_tokens() 85 | if num_tokens >= num: 86 | self._take(num) 87 | else: 88 | raise NotEnoughTokens( 89 | f"Insufficient capacity. Needed {num:.2f} but only has {num_tokens:.2f}" 90 | ) 91 | 92 | def can_take(self, num: Union[int, float] = 1) -> bool: 93 | """ 94 | Return boolean whether the bucket has enough tokens to take `num` tokens. 95 | """ 96 | return num <= self.get_num_tokens() 97 | -------------------------------------------------------------------------------- /ddht/tools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/ddht/142911d134ff839f3f79ff8fe9e45d3fe5a58cd0/ddht/tools/__init__.py -------------------------------------------------------------------------------- /ddht/tools/benchmark/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/ddht/142911d134ff839f3f79ff8fe9e45d3fe5a58cd0/ddht/tools/benchmark/__init__.py -------------------------------------------------------------------------------- /ddht/tools/driver/__init__.py: -------------------------------------------------------------------------------- 1 | from .tester import Tester # noqa: F401 2 | -------------------------------------------------------------------------------- /ddht/tools/driver/_utils.py: -------------------------------------------------------------------------------- 1 | import functools 2 | from typing import Any, AsyncIterator, Callable, Optional, TypeVar, cast 3 | 4 | from async_generator import asynccontextmanager 5 | import trio 6 | 7 | HANG_TIMEOUT = 10 8 | 9 | 10 | TCallable = TypeVar("TCallable", bound=Callable[..., Any]) 11 | 12 | 13 | def no_hang(fn: TCallable) -> TCallable: 14 | @functools.wraps(fn) 15 | async def wrapper(*args: Any, **kwargs: Any) -> Any: 16 | with trio.fail_after(HANG_TIMEOUT): 17 | return await fn(*args, **kwargs) 18 | 19 | return cast(TCallable, wrapper) 20 | 21 | 22 | class NamedLock: 23 | """ 24 | A thin wrapper around a lock that also attributes a *name* to the current 25 | owner. Used to make debugging tests easier when someone accidentally tries 26 | to use two parts of the `NodeAPI` which cannot be used concurrently 27 | (such as when both would try to open a socket on the same port). 28 | """ 29 | 30 | _listen_lock: trio.Lock 31 | _lock_owner: Optional[str] 32 | 33 | def __init__(self) -> None: 34 | self._listen_lock = trio.Lock() 35 | self._lock_owner = None 36 | 37 | @asynccontextmanager 38 | async def acquire(self, owner: str) -> AsyncIterator[None]: 39 | try: 40 | self._listen_lock.acquire_nowait() 41 | except trio.WouldBlock: 42 | raise Exception(f"Locked: owner={self._lock_owner} acquiring-for={owner}") 43 | else: 44 | self._lock_owner = owner 45 | try: 46 | yield 47 | finally: 48 | self._lock_owner = None 49 | self._listen_lock.release() 50 | -------------------------------------------------------------------------------- /ddht/tools/driver/alexandria.py: -------------------------------------------------------------------------------- 1 | from contextlib import AsyncExitStack 2 | from typing import AsyncContextManager, AsyncIterator, Collection, Optional, Tuple 3 | 4 | from async_generator import asynccontextmanager 5 | from async_service import background_trio_service 6 | from eth_enr import ENRAPI 7 | 8 | from ddht._utils import asyncnullcontext 9 | from ddht.tools.driver._utils import NamedLock 10 | from ddht.tools.driver.abc import ( 11 | AlexandriaNodeAPI, 12 | AlexandriaTesterAPI, 13 | NodeAPI, 14 | TesterAPI, 15 | ) 16 | from ddht.v5_1.abc import NetworkAPI 17 | from ddht.v5_1.alexandria.abc import AlexandriaClientAPI, AlexandriaNetworkAPI 18 | from ddht.v5_1.alexandria.client import AlexandriaClient 19 | from ddht.v5_1.alexandria.content_storage import ContentStorage 20 | from ddht.v5_1.alexandria.network import AlexandriaNetwork 21 | 22 | 23 | class AlexandriaNode(AlexandriaNodeAPI): 24 | _lock: NamedLock 25 | 26 | def __init__(self, node: NodeAPI) -> None: 27 | self.node = node 28 | self.storage = ContentStorage.memory() 29 | self._lock = NamedLock() 30 | 31 | @property 32 | def enr(self) -> ENRAPI: 33 | return self.node.enr 34 | 35 | @asynccontextmanager 36 | async def client( 37 | self, network: Optional[NetworkAPI] = None, 38 | ) -> AsyncIterator[AlexandriaClientAPI]: 39 | network_context: AsyncContextManager[NetworkAPI] 40 | 41 | if network is None: 42 | network_context = self.node.network() 43 | else: 44 | # unclear why the typing isn't work for `asyncnullcontext` 45 | network_context = asyncnullcontext(network) # type: ignore 46 | 47 | async with self._lock.acquire("AlexandriaNode.client(...)"): 48 | async with network_context as network: 49 | alexandria_client = AlexandriaClient(network) 50 | async with background_trio_service(alexandria_client): 51 | yield alexandria_client 52 | 53 | @asynccontextmanager 54 | async def network( 55 | self, network: Optional[NetworkAPI] = None, bootnodes: Collection[ENRAPI] = (), 56 | ) -> AsyncIterator[AlexandriaNetworkAPI]: 57 | network_context: AsyncContextManager[NetworkAPI] 58 | 59 | if network is None: 60 | network_context = self.node.network() 61 | else: 62 | # unclear why the typing isn't work for `asyncnullcontext` 63 | network_context = asyncnullcontext(network) # type: ignore 64 | 65 | async with self._lock.acquire("AlexandriaNode.network(...)"): 66 | async with network_context as network: 67 | alexandria_network = AlexandriaNetwork( 68 | network=network, bootnodes=bootnodes, storage=self.storage, 69 | ) 70 | async with background_trio_service(alexandria_network): 71 | await alexandria_network.ready() 72 | yield alexandria_network 73 | 74 | 75 | class AlexandriaTester(AlexandriaTesterAPI): 76 | def __init__(self, tester: TesterAPI) -> None: 77 | self._tester = tester 78 | 79 | def node(self) -> AlexandriaNodeAPI: 80 | return self._tester.node().alexandria 81 | 82 | @asynccontextmanager 83 | async def network_group( 84 | self, num_networks: int, bootnodes: Collection[ENRAPI] = (), 85 | ) -> AsyncIterator[Tuple[AlexandriaNetworkAPI, ...]]: 86 | all_bootnodes = list(bootnodes) 87 | networks = [] 88 | async with AsyncExitStack() as stack: 89 | for _ in range(num_networks): 90 | node = self.node() 91 | network = await stack.enter_async_context( 92 | node.network(bootnodes=all_bootnodes) 93 | ) 94 | all_bootnodes.append(node.enr) 95 | networks.append(network) 96 | 97 | yield tuple(networks) 98 | -------------------------------------------------------------------------------- /ddht/tools/driver/node.py: -------------------------------------------------------------------------------- 1 | from typing import AsyncIterator, Collection, Optional 2 | 3 | from async_generator import asynccontextmanager 4 | from async_service import background_trio_service 5 | from eth_enr import ENRAPI, QueryableENRDatabaseAPI 6 | from eth_enr.constants import IP_V4_ADDRESS_ENR_KEY, UDP_PORT_ENR_KEY 7 | from eth_enr.tools.factories import ENRFactory 8 | from eth_keys import keys 9 | from eth_typing import NodeID 10 | from eth_utils import humanize_hash 11 | 12 | from ddht.endpoint import Endpoint 13 | from ddht.tools.driver._utils import NamedLock 14 | from ddht.tools.driver.abc import NodeAPI 15 | from ddht.tools.driver.alexandria import AlexandriaNode 16 | from ddht.v5_1.abc import ClientAPI, EventsAPI, NetworkAPI 17 | from ddht.v5_1.client import Client 18 | from ddht.v5_1.events import Events 19 | from ddht.v5_1.network import Network 20 | 21 | 22 | class Node(NodeAPI): 23 | _lock: NamedLock 24 | 25 | def __init__( 26 | self, 27 | private_key: keys.PrivateKey, 28 | endpoint: Endpoint, 29 | enr_db: QueryableENRDatabaseAPI, 30 | events: Optional[EventsAPI] = None, 31 | ) -> None: 32 | self.private_key = private_key 33 | self.enr_db = enr_db 34 | self.enr = ENRFactory( 35 | private_key=private_key.to_bytes(), 36 | address__ip=endpoint.ip_address, 37 | address__udp_port=endpoint.port, 38 | ) 39 | self.enr_db.set_enr(self.enr) 40 | if events is None: 41 | events = Events() 42 | self.events = events 43 | self.alexandria = AlexandriaNode(self) 44 | 45 | self._lock = NamedLock() 46 | 47 | def __str__(self) -> str: 48 | return f"{humanize_hash(self.node_id)}@{self.endpoint}" # type: ignore 49 | 50 | @property 51 | def endpoint(self) -> Endpoint: 52 | return Endpoint(self.enr[IP_V4_ADDRESS_ENR_KEY], self.enr[UDP_PORT_ENR_KEY],) 53 | 54 | @property 55 | def node_id(self) -> NodeID: 56 | return self.enr.node_id 57 | 58 | @asynccontextmanager 59 | async def client(self) -> AsyncIterator[ClientAPI]: 60 | async with self._lock.acquire("Node.client(...)"): 61 | client = Client( 62 | local_private_key=self.private_key, 63 | listen_on=self.endpoint, 64 | enr_db=self.enr_db, 65 | session_cache_size=1024, 66 | events=self.events, 67 | ) 68 | async with background_trio_service(client): 69 | await client.wait_listening() 70 | yield client 71 | 72 | @asynccontextmanager 73 | async def network( 74 | self, bootnodes: Collection[ENRAPI] = () 75 | ) -> AsyncIterator[NetworkAPI]: 76 | async with self._lock.acquire("Node.network(...)"): 77 | client = Client( 78 | local_private_key=self.private_key, 79 | listen_on=self.endpoint, 80 | enr_db=self.enr_db, 81 | session_cache_size=1024, 82 | events=self.events, 83 | ) 84 | network = Network(client, bootnodes) 85 | async with background_trio_service(network): 86 | await client.wait_listening() 87 | await network.ready() 88 | yield network 89 | -------------------------------------------------------------------------------- /ddht/tools/factories/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/ddht/142911d134ff839f3f79ff8fe9e45d3fe5a58cd0/ddht/tools/factories/__init__.py -------------------------------------------------------------------------------- /ddht/tools/factories/alexandria.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import secrets 3 | from typing import Any 4 | 5 | import factory 6 | 7 | from ddht.tools.factories.keys import PrivateKeyFactory 8 | from ddht.v5_1.alexandria.advertisements import ( 9 | Advertisement, 10 | create_advertisement_signature, 11 | ) 12 | from ddht.v5_1.alexandria.content import content_key_to_content_id 13 | 14 | ONE_HOUR = datetime.timedelta(seconds=60 * 60) 15 | 16 | 17 | class AdvertisementFactory(factory.Factory): # type: ignore 18 | content_key = factory.LazyFunction( 19 | lambda: secrets.token_bytes(33 + secrets.randbelow(127)) 20 | ) 21 | hash_tree_root = factory.LazyFunction(lambda: secrets.token_bytes(32)) 22 | expires_at = factory.LazyFunction( 23 | lambda: datetime.datetime.utcnow().replace(microsecond=0) + ONE_HOUR 24 | ) 25 | 26 | signature_v = factory.LazyAttribute(lambda o: o.signature.v) 27 | signature_r = factory.LazyAttribute(lambda o: o.signature.r) 28 | signature_s = factory.LazyAttribute(lambda o: o.signature.s) 29 | 30 | class Params: 31 | private_key = factory.SubFactory(PrivateKeyFactory) 32 | signature = factory.LazyAttribute( 33 | lambda o: create_advertisement_signature( 34 | content_id=content_key_to_content_id(o.content_key), 35 | hash_tree_root=o.hash_tree_root, 36 | expires_at=o.expires_at, 37 | private_key=o.private_key, 38 | ) 39 | ) 40 | 41 | class Meta: 42 | model = Advertisement 43 | 44 | @classmethod 45 | def expired(cls, **kwargs: Any) -> "Advertisement": 46 | expires_at = datetime.datetime.utcnow().replace(microsecond=0) - ONE_HOUR 47 | return cls(**kwargs, expires_at=expires_at) 48 | 49 | @classmethod 50 | def invalid(cls, **kwargs: Any) -> "Advertisement": 51 | return cls(**kwargs, signature_v=1, signature_r=12345, signature_s=24689) 52 | -------------------------------------------------------------------------------- /ddht/tools/factories/boot_info.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Tuple 2 | 3 | from eth_enr import ENR 4 | import factory 5 | 6 | from ddht.boot_info import BootInfo 7 | from ddht.constants import DEFAULT_PORT, ProtocolVersion 8 | from ddht.v5.constants import DEFAULT_BOOTNODES as DEFAULT_V5_BOOTNODES 9 | from ddht.v5_1.constants import DEFAULT_BOOTNODES as DEFAULT_V51_BOOTNODES 10 | from ddht.xdg import get_xdg_ddht_root 11 | 12 | BOOTNODES_V5 = tuple(ENR.from_repr(enr_repr) for enr_repr in DEFAULT_V5_BOOTNODES) 13 | BOOTNODES_V5_1 = tuple(ENR.from_repr(enr_repr) for enr_repr in DEFAULT_V51_BOOTNODES) 14 | 15 | BOOTNODES: Dict[ProtocolVersion, Tuple[ENR, ...]] = { 16 | ProtocolVersion.v5: BOOTNODES_V5, 17 | ProtocolVersion.v5_1: BOOTNODES_V5_1, 18 | } 19 | 20 | 21 | class BootInfoFactory(factory.Factory): # type: ignore 22 | class Meta: 23 | model = BootInfo 24 | 25 | protocol_version = ProtocolVersion.v5_1 26 | private_key = None 27 | base_dir = factory.LazyFunction(get_xdg_ddht_root) 28 | port = DEFAULT_PORT 29 | listen_on = None 30 | bootnodes = factory.LazyAttribute(lambda o: BOOTNODES[o.protocol_version]) 31 | is_ephemeral = False 32 | is_upnp_enabled = True 33 | is_rpc_enabled = True 34 | ipc_path = factory.LazyAttribute(lambda o: o.base_dir / "jsonrpc.ipc") 35 | -------------------------------------------------------------------------------- /ddht/tools/factories/content.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import hashlib 3 | 4 | 5 | @functools.lru_cache(maxsize=512) 6 | def ContentFactory(length: int = 2048) -> bytes: 7 | base_content = b"".join((_hash_idx(idx) for idx in range((length + 31) // 32))) 8 | return base_content[:length] 9 | 10 | 11 | @functools.lru_cache(maxsize=4096) 12 | def _hash_idx(idx: int) -> bytes: 13 | return hashlib.sha256(idx.to_bytes(32, "big")).digest() 14 | -------------------------------------------------------------------------------- /ddht/tools/factories/endpoint.py: -------------------------------------------------------------------------------- 1 | import socket 2 | from typing import Any 3 | 4 | import factory 5 | 6 | from ddht.endpoint import Endpoint 7 | from ddht.tools.factories.socket import robust_get_open_port 8 | 9 | LOCALHOST = socket.inet_aton("127.0.0.1") 10 | 11 | 12 | class EndpointFactory(factory.Factory): # type: ignore 13 | class Meta: 14 | model = Endpoint 15 | 16 | ip_address = factory.LazyFunction( 17 | lambda: socket.inet_aton(factory.Faker("ipv4").generate({})) 18 | ) 19 | port = factory.LazyFunction(robust_get_open_port) 20 | 21 | @classmethod 22 | def localhost(cls, *args: Any, **kwargs: Any) -> Endpoint: 23 | return cls(*args, ip_address=LOCALHOST, **kwargs) 24 | -------------------------------------------------------------------------------- /ddht/tools/factories/kademlia.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | import factory 4 | 5 | from ddht.abc import AddressAPI 6 | from ddht.kademlia import Address 7 | from ddht.tools.factories.socket import robust_get_open_port 8 | 9 | IPAddressFactory = factory.Faker("ipv4") 10 | 11 | 12 | class AddressFactory(factory.Factory): # type: ignore 13 | class Meta: 14 | model = Address 15 | 16 | ip = IPAddressFactory 17 | udp_port = tcp_port = factory.LazyFunction(robust_get_open_port) 18 | 19 | @classmethod 20 | def localhost(cls, *args: Any, **kwargs: Any) -> AddressAPI: 21 | return cls(*args, ip="127.0.0.1", **kwargs) 22 | -------------------------------------------------------------------------------- /ddht/tools/factories/keys.py: -------------------------------------------------------------------------------- 1 | import secrets 2 | 3 | from eth_keys import keys 4 | from eth_utils import int_to_big_endian 5 | import factory 6 | 7 | 8 | def _mk_private_key_bytes() -> bytes: 9 | return int_to_big_endian(secrets.randbits(256)).rjust(32, b"\x00") 10 | 11 | 12 | class PrivateKeyFactory(factory.Factory): # type: ignore 13 | class Meta: 14 | model = keys.PrivateKey 15 | 16 | private_key_bytes = factory.LazyFunction(_mk_private_key_bytes) 17 | 18 | 19 | def _mk_public_key_bytes() -> bytes: 20 | return PrivateKeyFactory().public_key.to_bytes() # type: ignore 21 | 22 | 23 | class PublicKeyFactory(factory.Factory): # type: ignore 24 | class Meta: 25 | model = keys.PublicKey 26 | 27 | public_key_bytes = factory.LazyFunction(_mk_public_key_bytes) 28 | -------------------------------------------------------------------------------- /ddht/tools/factories/node_id.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | from eth_typing import NodeID 4 | from eth_utils import big_endian_to_int, int_to_big_endian 5 | from eth_utils.toolz import reduce 6 | import factory 7 | 8 | 9 | def bytes_to_bits(input_bytes: bytes) -> Tuple[bool, ...]: 10 | num_bits = len(input_bytes) * 8 11 | as_int = big_endian_to_int(input_bytes) 12 | as_bits = tuple(bool(as_int & (1 << index)) for index in range(num_bits))[::-1] 13 | return as_bits 14 | 15 | 16 | def bits_to_bytes(input_bits: Tuple[bool, ...]) -> bytes: 17 | if len(input_bits) % 8 != 0: 18 | raise ValueError("Number of input bits must be a multiple of 8") 19 | num_bytes = len(input_bits) // 8 20 | 21 | as_int = reduce(lambda rest, bit: rest * 2 + bit, input_bits) 22 | as_bytes_unpadded = int_to_big_endian(as_int) 23 | padding = b"\x00" * (num_bytes - len(as_bytes_unpadded)) 24 | return padding + as_bytes_unpadded 25 | 26 | 27 | class NodeIDFactory(factory.Factory): # type: ignore 28 | class Meta: 29 | model = NodeID 30 | inline_args = ("node_id",) 31 | 32 | node_id = factory.Faker("binary", length=32) 33 | 34 | @classmethod 35 | def at_log_distance(cls, reference: NodeID, log_distance: int) -> NodeID: 36 | from ddht.kademlia import at_log_distance as _at_log_distance 37 | 38 | return _at_log_distance(reference, log_distance) 39 | -------------------------------------------------------------------------------- /ddht/tools/factories/socket.py: -------------------------------------------------------------------------------- 1 | import collections 2 | from typing import Deque 3 | 4 | from ddht._utils import get_open_port 5 | 6 | RECENT_PORTS: Deque[int] = collections.deque(maxlen=2048) 7 | 8 | 9 | def robust_get_open_port() -> int: 10 | while True: 11 | port = get_open_port() 12 | if port not in RECENT_PORTS: 13 | break 14 | RECENT_PORTS.appendleft(port) 15 | return port 16 | -------------------------------------------------------------------------------- /ddht/tools/v5_strategies.py: -------------------------------------------------------------------------------- 1 | from eth_keys.constants import SECPK1_N 2 | from eth_utils import int_to_big_endian 3 | from hypothesis import strategies as st 4 | 5 | from ddht.constants import AES128_KEY_SIZE 6 | from ddht.v5.constants import ID_NONCE_SIZE, MAGIC_SIZE, NONCE_SIZE, TAG_SIZE 7 | 8 | tag_st = st.binary(min_size=TAG_SIZE, max_size=TAG_SIZE) 9 | nonce_st = st.binary(min_size=NONCE_SIZE, max_size=NONCE_SIZE) 10 | key_st = st.binary(min_size=AES128_KEY_SIZE, max_size=AES128_KEY_SIZE) 11 | random_data_st = st.binary(min_size=3, max_size=8) 12 | # arbitrary size as we're not specifying an identity scheme 13 | public_key_st = st.binary(min_size=32, max_size=32) 14 | node_id_st = st.binary(min_size=32, max_size=32) 15 | magic_st = st.binary(min_size=MAGIC_SIZE, max_size=MAGIC_SIZE) 16 | iv_st = st.binary(min_size=16, max_size=16) 17 | id_nonce_st = st.binary(min_size=ID_NONCE_SIZE, max_size=ID_NONCE_SIZE) 18 | enr_seq_st = st.integers(min_value=0) 19 | 20 | private_key_st = ( 21 | st.integers(min_value=1, max_value=SECPK1_N) 22 | .map(int_to_big_endian) 23 | .map(lambda key: key.rjust(32, b"\x00")) 24 | ) 25 | -------------------------------------------------------------------------------- /ddht/tools/w3_alexandria.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Callable, Tuple 2 | 3 | from eth_utils import decode_hex, encode_hex 4 | 5 | try: 6 | import web3 # noqa: F401 7 | except ImportError: 8 | raise ImportError("The web3.py library is required") 9 | 10 | 11 | from eth_typing import HexStr 12 | from web3.method import Method 13 | from web3.module import Module 14 | from web3.types import RPCEndpoint 15 | 16 | from ddht.v5_1.alexandria.typing import ContentKey 17 | 18 | 19 | class RPC: 20 | # core 21 | getContent = RPCEndpoint("alexandria_getContent") 22 | retrieveContent = RPCEndpoint("alexandria_retrieveContent") 23 | 24 | # pinned 25 | addContent = RPCEndpoint("alexandria_addContent") 26 | deleteContent = RPCEndpoint("alexandria_deleteContent") 27 | 28 | 29 | # 30 | # Mungers 31 | # See: https://github.com/ethereum/web3.py/blob/002151020cecd826a694ded2fdc10cc70e73e636/web3/method.py#L77 # noqa: E501 32 | # 33 | def content_key_munger(module: Any, content_key: ContentKey,) -> Tuple[HexStr]: 34 | """ 35 | Normalizes the inputs JSON-RPC endpoints that take a single `ContentKey` 36 | """ 37 | return (encode_hex(content_key),) 38 | 39 | 40 | def content_key_and_content_munger( 41 | module: Any, content_key: ContentKey, content: bytes, 42 | ) -> Tuple[HexStr, HexStr]: 43 | """ 44 | Normalizes the inputs JSON-RPC endpoints that take a 2-tuple of 45 | `(ContentKey, bytes)` 46 | """ 47 | return ( 48 | encode_hex(content_key), 49 | encode_hex(content), 50 | ) 51 | 52 | 53 | class AlexandriaModule(Module): 54 | """ 55 | A web3.py module that exposes high level APIs for interacting with the 56 | discovery v5 network. 57 | """ 58 | 59 | # 60 | # Live Content Retrieval 61 | # 62 | retrieve_content: Method[Callable[[ContentKey], bytes]] = Method( 63 | RPC.retrieveContent, 64 | result_formatters=lambda method, module: decode_hex, 65 | mungers=(content_key_munger,), 66 | ) 67 | 68 | # 69 | # Local Storage 70 | # 71 | get_content: Method[Callable[[ContentKey], bytes]] = Method( 72 | RPC.getContent, 73 | result_formatters=lambda method, module: decode_hex, 74 | mungers=(content_key_munger,), 75 | ) 76 | add_content: Method[Callable[[ContentKey], bytes]] = Method( 77 | RPC.addContent, mungers=(content_key_and_content_munger,), 78 | ) 79 | delete_content: Method[Callable[[ContentKey], bytes]] = Method( 80 | RPC.deleteContent, mungers=(content_key_munger,), 81 | ) 82 | -------------------------------------------------------------------------------- /ddht/typing.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | from typing import Any, Dict, List, NamedTuple, NewType, Tuple, Union 3 | 4 | AES128Key = NewType("AES128Key", bytes) 5 | Nonce = NewType("Nonce", bytes) 6 | IDNonce = NewType("IDNonce", bytes) 7 | NodeID = NewType("NodeID", bytes) 8 | 9 | 10 | class SessionKeys(NamedTuple): 11 | encryption_key: AES128Key 12 | decryption_key: AES128Key 13 | auth_response_key: AES128Key 14 | 15 | 16 | AnyIPAddress = Union[ipaddress.IPv4Address, ipaddress.IPv6Address] 17 | 18 | ENR_KV = Tuple[bytes, Union[int, bytes]] 19 | 20 | JSON = Union[Dict[Any, Any], str, int, List[Any]] 21 | -------------------------------------------------------------------------------- /ddht/upnp.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Tuple 3 | 4 | from async_service import Service, external_trio_api 5 | import trio 6 | from upnp_port_forward import PortMapFailed, setup_port_map 7 | 8 | from ddht._utils import every 9 | from ddht.typing import AnyIPAddress 10 | 11 | # UPnP discovery can take a long time, so use a loooong timeout here. 12 | UPNP_DISCOVER_TIMEOUT_SECONDS = 30 13 | UPNP_PORTMAP_DURATION = 30 * 60 # 30 minutes 14 | 15 | 16 | class UPnPService(Service): 17 | logger = logging.getLogger("ddht.upnp") 18 | _internal_ip: AnyIPAddress 19 | _external_ip: AnyIPAddress 20 | 21 | def __init__(self, port: int) -> None: 22 | """ 23 | :param port: The port that a server wants to bind to on this machine, and 24 | make publicly accessible. 25 | """ 26 | self.port = port 27 | self._has_ip_addresses = trio.Event() 28 | self._ip_changed = trio.Condition() 29 | self._ready = trio.Event() 30 | 31 | async def ready(self) -> None: 32 | await self._ready.wait() 33 | 34 | @external_trio_api 35 | async def get_ip_addresses(self) -> Tuple[AnyIPAddress, AnyIPAddress]: 36 | await self._has_ip_addresses.wait() 37 | return (self._internal_ip, self._external_ip) 38 | 39 | @external_trio_api 40 | async def wait_ip_changed(self) -> Tuple[AnyIPAddress, AnyIPAddress]: 41 | async with self._ip_changed: 42 | await self._ip_changed.wait() 43 | return (self._internal_ip, self._external_ip) 44 | 45 | async def run(self) -> None: 46 | """ 47 | Run an infinite loop refreshing our NAT port mapping. 48 | 49 | On every iteration we configure the port mapping with a lifetime of 30 minutes and then 50 | sleep for that long as well. 51 | """ 52 | while self.manager.is_running: 53 | async for _ in every(UPNP_PORTMAP_DURATION): 54 | with trio.move_on_after(UPNP_DISCOVER_TIMEOUT_SECONDS) as scope: 55 | self._ready.set() 56 | try: 57 | internal_ip, external_ip = await trio.to_thread.run_sync( 58 | setup_port_map, self.port, UPNP_PORTMAP_DURATION, 59 | ) 60 | self._external_ip = external_ip 61 | self._internal_ip = internal_ip 62 | self._has_ip_addresses.set() 63 | async with self._ip_changed: 64 | self._ip_changed.notify_all() 65 | 66 | self.logger.debug( 67 | "NAT portmap created: internal=%s external=%s", 68 | internal_ip, 69 | external_ip, 70 | ) 71 | except PortMapFailed as err: 72 | self.logger.error("Failed to setup NAT portmap: %s", err) 73 | except Exception: 74 | self.logger.exception("Error setuping NAT portmap") 75 | 76 | if scope.cancelled_caught: 77 | self.logger.error("Timeout attempting to setup UPnP port map") 78 | -------------------------------------------------------------------------------- /ddht/v5/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/ddht/142911d134ff839f3f79ff8fe9e45d3fe5a58cd0/ddht/v5/__init__.py -------------------------------------------------------------------------------- /ddht/v5/channel_services.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import NamedTuple 3 | 4 | from async_service import ManagerAPI, as_service 5 | from eth_utils import ValidationError 6 | from trio.abc import ReceiveChannel, SendChannel 7 | 8 | from ddht.datagram import InboundDatagram, OutboundDatagram 9 | from ddht.endpoint import Endpoint 10 | from ddht.v5.packets import Packet, decode_packet 11 | 12 | 13 | # 14 | # Data structures 15 | # 16 | class InboundPacket(NamedTuple): 17 | packet: Packet 18 | sender_endpoint: Endpoint 19 | 20 | def __str__(self) -> str: 21 | return f"{self.__class__.__name__}[{self.packet.__class__.__name__}]" 22 | 23 | 24 | class OutboundPacket(NamedTuple): 25 | packet: Packet 26 | receiver_endpoint: Endpoint 27 | 28 | def __str__(self) -> str: 29 | return f"{self.__class__.__name__}[{self.packet.__class__.__name__}]" 30 | 31 | 32 | # 33 | # Packet encoding/decoding 34 | # 35 | @as_service 36 | async def PacketDecoder( 37 | manager: ManagerAPI, 38 | inbound_datagram_receive_channel: ReceiveChannel[InboundDatagram], 39 | inbound_packet_send_channel: SendChannel[InboundPacket], 40 | ) -> None: 41 | """Decodes inbound datagrams to packet objects.""" 42 | logger = logging.getLogger("ddht.v5.channel_services.PacketDecoder") 43 | 44 | async with inbound_datagram_receive_channel, inbound_packet_send_channel: 45 | async for datagram, endpoint in inbound_datagram_receive_channel: 46 | try: 47 | packet = decode_packet(datagram) 48 | logger.debug( 49 | f"Successfully decoded {packet.__class__.__name__} from {endpoint}" 50 | ) 51 | except ValidationError: 52 | logger.debug( 53 | f"Failed to decode a packet from {endpoint}", exc_info=True 54 | ) 55 | else: 56 | await inbound_packet_send_channel.send(InboundPacket(packet, endpoint)) 57 | 58 | 59 | @as_service 60 | async def PacketEncoder( 61 | manager: ManagerAPI, 62 | outbound_packet_receive_channel: ReceiveChannel[OutboundPacket], 63 | outbound_datagram_send_channel: SendChannel[OutboundDatagram], 64 | ) -> None: 65 | """Encodes outbound packets to datagrams.""" 66 | logger = logging.getLogger("ddht.v5.channel_services.PacketEncoder") 67 | 68 | async with outbound_packet_receive_channel, outbound_datagram_send_channel: 69 | async for packet, endpoint in outbound_packet_receive_channel: 70 | outbound_datagram = OutboundDatagram(packet.to_wire_bytes(), endpoint) 71 | logger.debug(f"Encoded {packet.__class__.__name__} for {endpoint}") 72 | await outbound_datagram_send_channel.send(outbound_datagram) 73 | -------------------------------------------------------------------------------- /ddht/v5/client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from async_service import Service 4 | from eth_enr import ENRDatabaseAPI 5 | from eth_keys import keys 6 | from eth_typing import NodeID 7 | import trio 8 | 9 | from ddht.base_message import AnyInboundMessage, AnyOutboundMessage 10 | from ddht.datagram import ( 11 | DatagramReceiver, 12 | DatagramSender, 13 | InboundDatagram, 14 | OutboundDatagram, 15 | ) 16 | from ddht.v5.channel_services import ( 17 | InboundPacket, 18 | OutboundPacket, 19 | PacketDecoder, 20 | PacketEncoder, 21 | ) 22 | from ddht.v5.message_dispatcher import MessageDispatcher 23 | from ddht.v5.messages import v5_registry 24 | from ddht.v5.packer import Packer 25 | 26 | 27 | class Client(Service): 28 | logger = logging.getLogger("ddht.Client") 29 | 30 | def __init__( 31 | self, 32 | local_private_key: keys.PrivateKey, 33 | enr_db: ENRDatabaseAPI, 34 | node_id: NodeID, 35 | sock: trio.socket.SocketType, 36 | ) -> None: 37 | 38 | self.enr_db = enr_db 39 | 40 | outbound_datagram_channels = trio.open_memory_channel[OutboundDatagram](0) 41 | inbound_datagram_channels = trio.open_memory_channel[InboundDatagram](0) 42 | outbound_packet_channels = trio.open_memory_channel[OutboundPacket](0) 43 | inbound_packet_channels = trio.open_memory_channel[InboundPacket](0) 44 | outbound_message_channels = trio.open_memory_channel[AnyOutboundMessage](0) 45 | inbound_message_channels = trio.open_memory_channel[AnyInboundMessage](0) 46 | 47 | # types ignored due to https://github.com/ethereum/async-service/issues/5 48 | datagram_sender = DatagramSender( # type: ignore 49 | outbound_datagram_channels[1], sock 50 | ) 51 | datagram_receiver = DatagramReceiver( # type: ignore 52 | sock, inbound_datagram_channels[0] 53 | ) 54 | 55 | packet_encoder = PacketEncoder( # type: ignore 56 | outbound_packet_channels[1], outbound_datagram_channels[0] 57 | ) 58 | packet_decoder = PacketDecoder( # type: ignore 59 | inbound_datagram_channels[1], inbound_packet_channels[0] 60 | ) 61 | 62 | self.packer = Packer( 63 | local_private_key=local_private_key.to_bytes(), 64 | local_node_id=node_id, 65 | enr_db=self.enr_db, 66 | message_type_registry=v5_registry, 67 | inbound_packet_receive_channel=inbound_packet_channels[1], 68 | inbound_message_send_channel=inbound_message_channels[0], 69 | outbound_message_receive_channel=outbound_message_channels[1], 70 | outbound_packet_send_channel=outbound_packet_channels[0], 71 | ) 72 | 73 | self.message_dispatcher = MessageDispatcher( 74 | enr_db=self.enr_db, 75 | inbound_message_receive_channel=inbound_message_channels[1], 76 | outbound_message_send_channel=outbound_message_channels[0], 77 | ) 78 | 79 | self.services = ( 80 | packet_encoder, 81 | datagram_sender, 82 | datagram_receiver, 83 | packet_decoder, 84 | self.packer, 85 | self.message_dispatcher, 86 | ) 87 | 88 | self.outbound_message_send_channel = outbound_message_channels[0] 89 | 90 | def discard_peer(self, remote_node_id: NodeID) -> None: 91 | """ 92 | Signals that we intend not to send any more messages to the remote peer, stops the 93 | service associated with that peer. 94 | """ 95 | if remote_node_id in self.packer.managed_peer_packers: 96 | self.packer.managed_peer_packers[remote_node_id].manager.cancel() 97 | 98 | async def run(self) -> None: 99 | for service in self.services: 100 | self.manager.run_daemon_child_service(service) 101 | await self.manager.wait_finished() 102 | -------------------------------------------------------------------------------- /ddht/v5/constants.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | from ddht.constants import DISCOVERY_MAX_PACKET_SIZE 4 | from ddht.typing import Nonce 5 | 6 | # Default bootnodes 7 | DEFAULT_BOOTNODES: Tuple[str, ...] = ( 8 | "enr:-LK4QHAlBrRpcx9d6JTRA5kVnTNSPwVs-v_QIwBE8wfZxIqPWqqMDGGKpZDXI2lhbbnO66cmGK3eEzot3D_P_MGbcUAhh2F0dG5ldHOIgRebSXZucWmEZXRoMpCA4XabAAAAAP__________gmlkgnY0gmlwhBLDX_-Jc2VjcDI1NmsxoQOnyC60XGPSxv86ncxxezh0khFdgu7E3Cqr4imui_h_6oN0Y3CCIyiDdWRwgiMo", # noqa: E501 9 | # https://github.com/goerli/medalla/blob/cd5c2042f6249de86bfad10d0cd141c988a42089/medalla/bootnodes.txt 10 | "enr:-LK4QKWk9yZo258PQouLshTOEEGWVHH7GhKwpYmB5tmKE4eHeSfman0PZvM2Rpp54RWgoOagAsOfKoXgZSbiCYzERWABh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAAAAAAAAAAAAAAAAAAAAAAgmlkgnY0gmlwhDQlA5CJc2VjcDI1NmsxoQOYiWqrQtQksTEtS3qY6idxJE5wkm0t9wKqpzv2gCR21oN0Y3CCIyiDdWRwgiMo", # noqa: E501 11 | "enr:-LK4QEnIS-PIxxLCadJdnp83VXuJqgKvC9ZTIWaJpWqdKlUFCiup2sHxWihF9EYGlMrQLs0mq_2IyarhNq38eoaOHUoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAAAAAAAAAAAAAAAAAAAAAAgmlkgnY0gmlwhA37LMaJc2VjcDI1NmsxoQJ7k0mKtTd_kdEq251flOjD1HKpqgMmIETDoD-Msy_O-4N0Y3CCIyiDdWRwgiMo", # noqa: E501 12 | "enr:-KG4QIOJRu0BBlcXJcn3lI34Ub1aBLYipbnDaxBnr2uf2q6nE1TWnKY5OAajg3eG6mHheQSfRhXLuy-a8V5rqXKSoUEChGV0aDKQGK5MywAAAAH__________4JpZIJ2NIJpcIQKAAFhiXNlY3AyNTZrMaEDESplmV9c2k73v0DjxVXJ6__2bWyP-tK28_80lf7dUhqDdGNwgiMog3VkcIIjKA", # noqa: E501 13 | # CatDog: bridge bootnodes 14 | "enr:-Ku4QKYN_qSG6WnGMs33F4STy8canm2X7vLaz0MB6bA84YJ-GtT5CeUvkuYvMUX-mwuU3Ju14-2wZj7rjwx7eAthAL4Dh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDaNQiCAAAAA___________gmlkgnY0gmlwhBK4vdCJc2VjcDI1NmsxoQNYtv_PfWUWNRo99-21Y4dXl5Z-XGalHp-bJmDHod4x14N1ZHCCI1o", # noqa: E501 15 | ) 16 | 17 | 18 | NONCE_SIZE = 12 # size of an AESGCM nonce 19 | TAG_SIZE = 32 # size of the tag packet prefix 20 | MAGIC_SIZE = 32 # size of the magic hash in the who are you packet 21 | ID_NONCE_SIZE = 32 # size of the id nonce in who are you and auth tag packets 22 | RANDOM_ENCRYPTED_DATA_SIZE = 12 # size of random data we send to initiate a handshake 23 | # safe upper bound on the size of the ENR list in a nodes message 24 | NODES_MESSAGE_PAYLOAD_SIZE = DISCOVERY_MAX_PACKET_SIZE - 200 25 | 26 | ZERO_NONCE = Nonce(b"\x00" * NONCE_SIZE) # nonce used for the auth header packet 27 | AUTH_RESPONSE_VERSION = 5 # version number used in auth response 28 | AUTH_SCHEME_NAME = b"gcm" # the name of the only supported authentication scheme 29 | 30 | TOPIC_HASH_SIZE = 32 # size of a topic hash 31 | 32 | WHO_ARE_YOU_MAGIC_SUFFIX = b"WHOAREYOU" 33 | 34 | MAX_REQUEST_ID = 2 ** 32 - 1 # highest request id used for outbound requests 35 | MAX_REQUEST_ID_ATTEMPTS = ( 36 | 100 # number of attempts we take to guess a available request id 37 | ) 38 | 39 | REQUEST_RESPONSE_TIMEOUT = ( 40 | 0.5 # timeout for waiting for response after request was sent 41 | ) 42 | # timeout for waiting for node messages in response to find node requests 43 | FIND_NODE_RESPONSE_TIMEOUT = 1.0 44 | HANDSHAKE_TIMEOUT = 1 # timeout for performing a handshake 45 | ROUTING_TABLE_PING_INTERVAL = ( 46 | 30 # interval of outbound pings sent to maintain the routing table 47 | ) 48 | ROUTING_TABLE_LOOKUP_INTERVAL = 60 # intervals between lookups 49 | LOOKUP_RETRY_THRESHOLD = ( 50 | 5 # minimum number of ENRs desired in responses to FindNode requests 51 | ) 52 | LOOKUP_PARALLELIZATION_FACTOR = 3 # number of parallel lookup requests (aka alpha) 53 | 54 | MAX_NODES_MESSAGE_TOTAL = 8 # max allowed total value for nodes messages 55 | 56 | ID_NONCE_SIGNATURE_PREFIX = b"discovery-id-nonce" 57 | -------------------------------------------------------------------------------- /ddht/v5/endpoint_tracker.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import NamedTuple 3 | 4 | from async_service import Service 5 | from eth_enr import ENRDatabaseAPI, IdentitySchemeRegistryAPI, UnsignedENR 6 | from eth_enr.constants import IP_V4_ADDRESS_ENR_KEY, UDP_PORT_ENR_KEY 7 | from eth_typing import NodeID 8 | from eth_utils import encode_hex 9 | from eth_utils.toolz import merge 10 | from trio.abc import ReceiveChannel 11 | 12 | from ddht.endpoint import Endpoint 13 | 14 | 15 | class EndpointVote(NamedTuple): 16 | endpoint: Endpoint 17 | node_id: NodeID 18 | timestamp: float 19 | 20 | 21 | class EndpointTracker(Service): 22 | 23 | logger = logging.getLogger("ddht.v5.endpoint_tracker.EndpointTracker") 24 | 25 | def __init__( 26 | self, 27 | local_private_key: bytes, 28 | local_node_id: NodeID, 29 | enr_db: ENRDatabaseAPI, 30 | identity_scheme_registry: IdentitySchemeRegistryAPI, 31 | vote_receive_channel: ReceiveChannel[EndpointVote], 32 | ) -> None: 33 | self.local_private_key = local_private_key 34 | self.local_node_id = local_node_id 35 | self.enr_db = enr_db 36 | self.identity_scheme_registry = identity_scheme_registry 37 | 38 | self.vote_receive_channel = vote_receive_channel 39 | 40 | async def run(self) -> None: 41 | async with self.vote_receive_channel: 42 | async for vote in self.vote_receive_channel: 43 | await self.handle_vote(vote) 44 | 45 | async def handle_vote(self, vote: EndpointVote) -> None: 46 | self.logger.debug( 47 | "Received vote for %s from %s", vote.endpoint, encode_hex(vote.node_id) 48 | ) 49 | 50 | current_enr = self.enr_db.get_enr(self.local_node_id) 51 | 52 | # TODO: majority voting, discard old votes 53 | are_endpoint_keys_present = ( 54 | IP_V4_ADDRESS_ENR_KEY in current_enr and UDP_PORT_ENR_KEY in current_enr 55 | ) 56 | enr_needs_update = not are_endpoint_keys_present or ( 57 | vote.endpoint.ip_address != current_enr[IP_V4_ADDRESS_ENR_KEY] 58 | or vote.endpoint.port != current_enr[UDP_PORT_ENR_KEY] 59 | ) 60 | if enr_needs_update: 61 | kv_pairs = merge( 62 | current_enr, 63 | { 64 | IP_V4_ADDRESS_ENR_KEY: vote.endpoint.ip_address, 65 | UDP_PORT_ENR_KEY: vote.endpoint.port, 66 | }, 67 | ) 68 | new_unsigned_enr = UnsignedENR( 69 | kv_pairs=kv_pairs, 70 | sequence_number=current_enr.sequence_number + 1, 71 | identity_scheme_registry=self.identity_scheme_registry, 72 | ) 73 | signed_enr = new_unsigned_enr.to_signed_enr(self.local_private_key) 74 | self.logger.info( 75 | "Updating local endpoint to %s (new ENR sequence number: %d)", 76 | vote.endpoint, 77 | signed_enr.sequence_number, 78 | ) 79 | self.enr_db.set_enr(signed_enr) 80 | -------------------------------------------------------------------------------- /ddht/v5/handshake_schemes.py: -------------------------------------------------------------------------------- 1 | from hashlib import sha256 2 | from typing import NamedTuple 3 | 4 | from eth_keys.datatypes import PrivateKey 5 | 6 | from ddht.handshake_schemes import BaseV4HandshakeScheme, HandshakeSchemeRegistry 7 | from ddht.typing import IDNonce 8 | from ddht.v5.constants import ID_NONCE_SIGNATURE_PREFIX 9 | 10 | v5_handshake_scheme_registry = HandshakeSchemeRegistry() 11 | 12 | 13 | class SignatureInputs(NamedTuple): 14 | id_nonce: IDNonce 15 | ephemeral_public_key: bytes 16 | 17 | 18 | @v5_handshake_scheme_registry.register 19 | class V4HandshakeScheme(BaseV4HandshakeScheme[SignatureInputs]): 20 | signature_inputs_cls = SignatureInputs 21 | 22 | @classmethod 23 | def create_id_nonce_signature( 24 | cls, *, signature_inputs: SignatureInputs, private_key: bytes, 25 | ) -> bytes: 26 | private_key_object = PrivateKey(private_key) 27 | signature_input = cls.create_id_nonce_signature_input( 28 | signature_inputs=signature_inputs 29 | ) 30 | signature = private_key_object.sign_msg_hash_non_recoverable(signature_input) 31 | return bytes(signature) 32 | 33 | @classmethod 34 | def validate_id_nonce_signature( 35 | cls, *, signature_inputs: SignatureInputs, signature: bytes, public_key: bytes, 36 | ) -> None: 37 | signature_input = cls.create_id_nonce_signature_input( 38 | signature_inputs=signature_inputs 39 | ) 40 | cls.identity_scheme.validate_signature( 41 | message_hash=signature_input, signature=signature, public_key=public_key 42 | ) 43 | 44 | @classmethod 45 | def create_id_nonce_signature_input( 46 | cls, *, signature_inputs: SignatureInputs, 47 | ) -> bytes: 48 | preimage = b"".join((ID_NONCE_SIGNATURE_PREFIX,) + signature_inputs) 49 | return sha256(preimage).digest() 50 | -------------------------------------------------------------------------------- /ddht/v5/messages.py: -------------------------------------------------------------------------------- 1 | from eth_enr.sedes import ENRSedes 2 | from rlp.sedes import Binary, CountableList, big_endian_int, binary, boolean 3 | 4 | from ddht.base_message import BaseMessage 5 | from ddht.message_registry import MessageTypeRegistry 6 | from ddht.sedes import ip_address_sedes 7 | from ddht.v5.constants import TOPIC_HASH_SIZE 8 | 9 | # 10 | # Custom sedes objects 11 | # 12 | topic_sedes = Binary.fixed_length(TOPIC_HASH_SIZE) 13 | 14 | 15 | v5_registry = MessageTypeRegistry() 16 | 17 | 18 | # 19 | # Message types 20 | # 21 | @v5_registry.register 22 | class PingMessage(BaseMessage): 23 | message_type = 1 24 | 25 | fields = (("request_id", big_endian_int), ("enr_seq", big_endian_int)) 26 | 27 | 28 | @v5_registry.register 29 | class PongMessage(BaseMessage): 30 | message_type = 2 31 | 32 | fields = ( 33 | ("request_id", big_endian_int), 34 | ("enr_seq", big_endian_int), 35 | ("packet_ip", ip_address_sedes), 36 | ("packet_port", big_endian_int), 37 | ) 38 | 39 | 40 | @v5_registry.register 41 | class FindNodeMessage(BaseMessage): 42 | message_type = 3 43 | 44 | fields = (("request_id", big_endian_int), ("distance", big_endian_int)) 45 | 46 | 47 | @v5_registry.register 48 | class NodesMessage(BaseMessage): 49 | message_type = 4 50 | 51 | fields = ( 52 | ("request_id", big_endian_int), 53 | ("total", big_endian_int), 54 | ("enrs", CountableList(ENRSedes)), 55 | ) 56 | 57 | 58 | @v5_registry.register 59 | class ReqTicketMessage(BaseMessage): 60 | message_type = 5 61 | 62 | fields = (("request_id", big_endian_int), ("topic", topic_sedes)) 63 | 64 | 65 | @v5_registry.register 66 | class TicketMessage(BaseMessage): 67 | message_type = 6 68 | 69 | fields = ( 70 | ("request_id", big_endian_int), 71 | ("ticket", binary), 72 | ("wait_time", big_endian_int), 73 | ) 74 | 75 | 76 | @v5_registry.register 77 | class RegTopicMessage(BaseMessage): 78 | message_type = 7 79 | 80 | fields = ( 81 | ("request_id", big_endian_int), 82 | ("ticket", binary), 83 | ("node_record", ENRSedes), 84 | ) 85 | 86 | 87 | @v5_registry.register 88 | class RegConfirmationMessage(BaseMessage): 89 | message_type = 8 90 | 91 | fields = (("request_id", big_endian_int), ("registered", boolean)) 92 | 93 | 94 | @v5_registry.register 95 | class TopicQueryMessage(BaseMessage): 96 | message_type = 9 97 | 98 | fields = (("request_id", big_endian_int), ("topic", topic_sedes)) 99 | -------------------------------------------------------------------------------- /ddht/v5/routing_table.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import logging 3 | import secrets 4 | from typing import Any, Collection, Deque, Iterator 5 | 6 | from eth_typing import NodeID 7 | from eth_utils import encode_hex 8 | 9 | 10 | class FlatRoutingTable(Collection[NodeID]): 11 | 12 | logger = logging.getLogger("ddht.v5.routing_table_manager.FlatRoutingTable") 13 | 14 | def __init__(self) -> None: 15 | self.entries: Deque[NodeID] = collections.deque() 16 | 17 | def add(self, node_id: NodeID) -> None: 18 | if node_id not in self: 19 | self.logger.debug("Adding entry %s", encode_hex(node_id)) 20 | self.entries.appendleft(node_id) 21 | else: 22 | raise ValueError( 23 | f"Entry {encode_hex(node_id)} already present in the routing table" 24 | ) 25 | 26 | def update(self, node_id: NodeID) -> None: 27 | self.remove(node_id) 28 | self.add(node_id) 29 | 30 | def add_or_update(self, node_id: NodeID) -> None: 31 | try: 32 | self.remove(node_id) 33 | except KeyError: 34 | pass 35 | finally: 36 | self.add(node_id) 37 | 38 | def remove(self, node_id: NodeID) -> None: 39 | try: 40 | self.entries.remove(node_id) 41 | except ValueError: 42 | raise KeyError( 43 | f"Entry {encode_hex(node_id)} not present in the routing table" 44 | ) 45 | else: 46 | self.logger.debug("Removing entry %s", encode_hex(node_id)) 47 | 48 | def __contains__(self, node_id: Any) -> bool: 49 | return node_id in self.entries 50 | 51 | def __len__(self) -> int: 52 | return len(self.entries) 53 | 54 | def __iter__(self) -> Iterator[NodeID]: 55 | return iter(self.entries) 56 | 57 | def get_random_entry(self) -> NodeID: 58 | return secrets.choice(self.entries) 59 | 60 | def get_oldest_entry(self) -> NodeID: 61 | return self.entries[-1] 62 | -------------------------------------------------------------------------------- /ddht/v5/tags.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | 3 | from eth_typing import NodeID 4 | 5 | from ddht._utils import sxor 6 | from ddht.v5.typing import Tag 7 | 8 | 9 | def compute_tag(source_node_id: NodeID, destination_node_id: NodeID) -> Tag: 10 | """Compute the tag used in message packets sent between two nodes.""" 11 | destination_node_id_hash = hashlib.sha256(destination_node_id).digest() 12 | tag = sxor(destination_node_id_hash, source_node_id) 13 | return Tag(tag) 14 | 15 | 16 | def recover_source_id_from_tag(tag: Tag, destination_node_id: NodeID) -> NodeID: 17 | """Recover the node id of the source from the tag in a message packet.""" 18 | destination_node_id_hash = hashlib.sha256(destination_node_id).digest() 19 | source_node_id = sxor(tag, destination_node_id_hash) 20 | return NodeID(source_node_id) 21 | -------------------------------------------------------------------------------- /ddht/v5/topic_table.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import math 3 | import operator 4 | from typing import DefaultDict, Deque, NamedTuple, Tuple 5 | 6 | from eth_enr.abc import ENRAPI 7 | from eth_utils import encode_hex, toolz 8 | 9 | from ddht.v5.typing import Topic 10 | 11 | 12 | class Ad(NamedTuple): 13 | enr: ENRAPI 14 | registration_time: float 15 | 16 | 17 | class TopicTable: 18 | def __init__( 19 | self, max_queue_size: int, max_total_size: int, target_ad_lifetime: float 20 | ) -> None: 21 | self.max_queue_size = max_queue_size 22 | self.max_total_size = max_total_size 23 | self.target_ad_lifetime = target_ad_lifetime 24 | 25 | self.topic_queues: DefaultDict[Topic, Deque[Ad]] = collections.defaultdict( 26 | lambda: collections.deque(maxlen=self.max_queue_size) 27 | ) 28 | self.total_size = 0 29 | 30 | def __len__(self) -> int: 31 | """Return the total number of ads in the table across all queues.""" 32 | return self.total_size 33 | 34 | @property 35 | def is_full(self) -> bool: 36 | return len(self) >= self.max_total_size 37 | 38 | def is_queue_full(self, topic: Topic) -> bool: 39 | return len(self.topic_queues[topic]) >= self.max_queue_size 40 | 41 | def get_enrs_for_topic(self, topic: Topic) -> Tuple[ENRAPI, ...]: 42 | """ 43 | Get all ENRs registered for a given topic. 44 | 45 | The result will be ordered from newest to oldest entry. 46 | """ 47 | return tuple(ad.enr for ad in self.topic_queues[topic]) 48 | 49 | def get_wait_time(self, topic: Topic, current_time: float) -> float: 50 | """Return the time at which the next ad for a given topic can be added.""" 51 | is_table_full = self.is_full 52 | is_queue_full = self.is_queue_full(topic) 53 | 54 | if not is_queue_full and not is_table_full: 55 | return 0 56 | 57 | if is_queue_full: 58 | queue = self.topic_queues[topic] 59 | oldest_registration_time_queue = queue[-1].registration_time 60 | else: 61 | oldest_registration_time_queue = -math.inf 62 | 63 | if is_table_full: 64 | oldest_ads = [queue[-1] for queue in self.topic_queues.values() if queue] 65 | oldest_reg_time = min(ad.registration_time for ad in oldest_ads) 66 | oldest_registration_time_table = oldest_reg_time 67 | else: 68 | oldest_registration_time_table = -math.inf 69 | 70 | next_registration_time = ( 71 | max(oldest_registration_time_queue, oldest_registration_time_table) 72 | + self.target_ad_lifetime 73 | ) 74 | return max(next_registration_time - current_time, 0) 75 | 76 | def register(self, topic: Topic, enr: ENRAPI, current_time: float) -> None: 77 | """ 78 | Register a new ad. 79 | 80 | A `ValueError` will be raised if the ad cannot be added because the table is full, 81 | because the node already is present in the queue, or because the topic's wait time is 82 | non-zero. 83 | """ 84 | queue = self.topic_queues[topic] 85 | 86 | wait_time = self.get_wait_time(topic, current_time) 87 | if wait_time > 0: 88 | raise ValueError( 89 | f"Topic queue or table is full (time to wait: {wait_time})" 90 | ) 91 | 92 | present_node_ids = tuple( 93 | entry.node_id for entry in self.get_enrs_for_topic(topic) 94 | ) 95 | if enr.node_id in present_node_ids[: self.max_queue_size - 1]: 96 | raise ValueError( 97 | f"Topic queue already contains entry for node {encode_hex(enr.node_id)}" 98 | ) 99 | 100 | if self.is_full: 101 | queues = [queue for queue in self.topic_queues.values() if queue] 102 | queue_with_oldest_ad = min( 103 | queues, 104 | key=toolz.compose( 105 | operator.attrgetter("registration_time"), operator.itemgetter(-1) 106 | ), 107 | ) 108 | queue_with_oldest_ad.pop() 109 | self.total_size -= 1 110 | 111 | self.total_size -= len(queue) 112 | queue.appendleft(Ad(enr=enr, registration_time=current_time)) 113 | self.total_size += len(queue) 114 | -------------------------------------------------------------------------------- /ddht/v5/typing.py: -------------------------------------------------------------------------------- 1 | from typing import TYPE_CHECKING, NamedTuple, NewType, Optional 2 | 3 | from eth_enr.abc import ENRAPI 4 | from eth_typing import Hash32 5 | 6 | from ddht.base_message import BaseMessage 7 | from ddht.typing import SessionKeys 8 | 9 | if TYPE_CHECKING: 10 | from ddht.v5.packets import AuthHeaderPacket # noqa: F401 11 | 12 | Tag = NewType("Tag", bytes) 13 | 14 | Topic = NewType("Topic", Hash32) 15 | 16 | 17 | class HandshakeResult(NamedTuple): 18 | session_keys: SessionKeys 19 | enr: Optional[ENRAPI] 20 | message: Optional[BaseMessage] 21 | auth_header_packet: Optional["AuthHeaderPacket"] 22 | -------------------------------------------------------------------------------- /ddht/v5_1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/ddht/142911d134ff839f3f79ff8fe9e45d3fe5a58cd0/ddht/v5_1/__init__.py -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/ddht/142911d134ff839f3f79ff8fe9e45d3fe5a58cd0/ddht/v5_1/alexandria/__init__.py -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/_utils.py: -------------------------------------------------------------------------------- 1 | from .constants import MAX_RADIUS 2 | 3 | 4 | def humanize_advertisement_radius(radius: int, max_radius: int = MAX_RADIUS) -> float: 5 | if radius == MAX_RADIUS: 6 | return 1.0 * MAX_RADIUS.bit_length() 7 | 8 | display_whole = radius.bit_length() - 1 9 | 10 | remainder = radius % 2 ** display_whole 11 | remainder_max = 2 ** (display_whole + 1) - 2 ** display_whole 12 | 13 | display_fraction = remainder / remainder_max 14 | 15 | display_value = display_whole + display_fraction 16 | return display_value # type: ignore 17 | -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/app.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from ddht.app import BaseApplication 4 | from ddht.boot_info import BootInfo 5 | from ddht.v5_1.alexandria.boot_info import AlexandriaBootInfo 6 | from ddht.v5_1.alexandria.content_storage import ContentStorage 7 | from ddht.v5_1.alexandria.network import AlexandriaNetwork 8 | from ddht.v5_1.alexandria.rpc_handlers import get_alexandria_rpc_handlers 9 | from ddht.v5_1.alexandria.xdg import get_xdg_alexandria_root 10 | from ddht.v5_1.app import Application 11 | 12 | 13 | class AlexandriaApplication(BaseApplication): 14 | base_protocol_app: Application 15 | 16 | def __init__(self, args: argparse.Namespace, boot_info: BootInfo) -> None: 17 | super().__init__(args, boot_info) 18 | self._alexandria_boot_info = AlexandriaBootInfo.from_namespace(self._args) 19 | self.base_protocol_app = Application(self._args, self._boot_info) 20 | 21 | async def run(self) -> None: 22 | self.manager.run_daemon_child_service(self.base_protocol_app) 23 | 24 | await self.base_protocol_app.wait_ready() 25 | 26 | xdg_alexandria_root = get_xdg_alexandria_root() 27 | xdg_alexandria_root.mkdir(parents=True, exist_ok=True) 28 | 29 | alexandria_network = AlexandriaNetwork( 30 | network=self.base_protocol_app.network, 31 | bootnodes=self._alexandria_boot_info.bootnodes, 32 | storage=ContentStorage.memory(), 33 | ) 34 | 35 | self.manager.run_daemon_child_service(alexandria_network) 36 | 37 | self.logger.info("Starting Alexandria...") 38 | self.logger.info("Root Directory : %s", xdg_alexandria_root) 39 | await alexandria_network.ready() 40 | 41 | if self._boot_info.is_rpc_enabled: 42 | self.base_protocol_app.rpc_server.add_handers( 43 | get_alexandria_rpc_handlers(alexandria_network) 44 | ) 45 | 46 | await self.manager.wait_finished() 47 | -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/boot_info.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from dataclasses import dataclass 3 | import pathlib 4 | from typing import Literal, Optional, Sequence, Tuple, TypedDict, Union 5 | 6 | from eth_enr import ENR 7 | from eth_enr.abc import ENRAPI 8 | 9 | from ddht.v5_1.alexandria.constants import ( 10 | DEFAULT_BOOTNODES, 11 | DEFAULT_COMMONS_STORAGE_SIZE, 12 | DEFAULT_MAX_ADVERTISEMENTS, 13 | ) 14 | 15 | 16 | class AlexandriaBootInfoKwargs(TypedDict, total=False): 17 | bootnodes: Tuple[ENRAPI, ...] 18 | 19 | max_advertisement_count: int 20 | 21 | commons_storage_size: int 22 | commons_storage: Optional[Union[Literal[":memory:"], pathlib.Path]] 23 | 24 | pinned_storage: Optional[Union[Literal[":memory:"], pathlib.Path]] 25 | 26 | 27 | def _cli_args_to_boot_info_kwargs(args: argparse.Namespace) -> AlexandriaBootInfoKwargs: 28 | if args.alexandria_bootnodes is None: 29 | bootnodes = tuple(ENR.from_repr(enr_repr) for enr_repr in DEFAULT_BOOTNODES) 30 | else: 31 | bootnodes = args.alexandria_bootnodes 32 | 33 | max_advertisement_count: int 34 | 35 | if args.alexandria_max_advertisement_count is None: 36 | max_advertisement_count = DEFAULT_MAX_ADVERTISEMENTS 37 | else: 38 | max_advertisement_count = args.alexandria_max_advertisement_count 39 | 40 | commons_storage_size: int 41 | 42 | if args.alexandria_commons_storage_size is None: 43 | commons_storage_size = DEFAULT_COMMONS_STORAGE_SIZE 44 | else: 45 | commons_storage_size = args.alexandria_commons_storage_size 46 | 47 | commons_storage: Optional[Union[Literal[":memory:"], pathlib.Path]] 48 | 49 | if args.alexandria_commons_storage == ":memory:": 50 | commons_storage = ":memory:" 51 | elif args.alexandria_commons_storage is not None: 52 | commons_storage = ( 53 | pathlib.Path(args.alexandria_commons_storage).expanduser().resolve() 54 | ) 55 | else: 56 | commons_storage = None 57 | 58 | pinned_storage: Optional[Union[Literal[":memory:"], pathlib.Path]] 59 | 60 | if args.alexandria_pinned_storage == ":memory:": 61 | pinned_storage = ":memory:" 62 | elif args.alexandria_pinned_storage is not None: 63 | pinned_storage = ( 64 | pathlib.Path(args.alexandria_pinned_storage).expanduser().resolve() 65 | ) 66 | else: 67 | pinned_storage = None 68 | 69 | return AlexandriaBootInfoKwargs( 70 | bootnodes=bootnodes, 71 | max_advertisement_count=max_advertisement_count, 72 | commons_storage_size=commons_storage_size, 73 | commons_storage=commons_storage, 74 | pinned_storage=pinned_storage, 75 | ) 76 | 77 | 78 | @dataclass(frozen=True) 79 | class AlexandriaBootInfo: 80 | bootnodes: Tuple[ENRAPI, ...] 81 | 82 | max_advertisement_count: int 83 | 84 | commons_storage_size: int 85 | commons_storage: Optional[Union[Literal[":memory:"], pathlib.Path]] 86 | 87 | pinned_storage: Optional[Union[Literal[":memory:"], pathlib.Path]] 88 | 89 | @classmethod 90 | def from_cli_args(cls, args: Sequence[str]) -> "AlexandriaBootInfo": 91 | # Import here to prevent circular imports 92 | from ddht.cli_parser import parser 93 | 94 | namespace = parser.parse_args(args) 95 | return cls.from_namespace(namespace) 96 | 97 | @classmethod 98 | def from_namespace(cls, args: argparse.Namespace) -> "AlexandriaBootInfo": 99 | kwargs = _cli_args_to_boot_info_kwargs(args) 100 | return cls(**kwargs) 101 | -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/constants.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | from ddht.constants import DISCOVERY_MAX_PACKET_SIZE 4 | 5 | ALEXANDRIA_PROTOCOL_ID = b"portal" 6 | 7 | DEFAULT_BOOTNODES: Tuple[str, ...] = () 8 | 9 | # 1 gigabyte 10 | GB = 1024 * 1024 * 1024 # 2**30 11 | 12 | 13 | # All of the powers of two 14 | POWERS_OF_TWO = tuple(2 ** n for n in range(256)) 15 | 16 | # Safe upper bound for the raw payload of an alexandria packet. We expect the following overhead: 17 | # 18 | # - 23 bytes for the packet header 19 | # - up-to 34 bytes of packet overhead. 20 | # - up-to 4 bytes for request_id 21 | # - 10 bytes for the `protocol_id` 22 | # - 1 byte for the alexandria message type 23 | # - up-to 10 bytes for RLP encoding overhead. 24 | # 25 | MAX_PAYLOAD_SIZE = DISCOVERY_MAX_PACKET_SIZE - 90 26 | 27 | 28 | # One hour in seconds 29 | ONE_HOUR = 60 * 60 30 | 31 | 32 | # One Megabyte 33 | MB = 1024 * 1024 34 | 35 | 36 | # Default max bytes for "commons" storage 37 | DEFAULT_COMMONS_STORAGE_SIZE = 100 * MB 38 | 39 | 40 | # Default maximum number of advertisements 41 | DEFAULT_MAX_ADVERTISEMENTS = 1048576 42 | 43 | 44 | MAX_RADIUS = 2 ** 256 - 1 45 | -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/content.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | 3 | from eth_typing import NodeID 4 | 5 | from .typing import ContentID, ContentKey 6 | 7 | 8 | def content_key_to_content_id(key: ContentKey) -> ContentID: 9 | return ContentID(hashlib.sha256(key).digest()) 10 | 11 | 12 | def compute_content_distance(node_id: NodeID, content_id: ContentID) -> int: 13 | node_id_int = int.from_bytes(node_id, "big") 14 | content_id_int = int.from_bytes(content_id, "big") 15 | return node_id_int ^ content_id_int 16 | 17 | 18 | def compute_content_log_distance(node_id: NodeID, content_id: ContentID) -> int: 19 | return compute_content_distance(node_id, content_id).bit_length() 20 | -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/payloads.py: -------------------------------------------------------------------------------- 1 | from typing import NamedTuple, Sequence, Tuple 2 | 3 | from eth_enr import ENR, ENRAPI 4 | import rlp 5 | 6 | from ddht.v5_1.alexandria.typing import ContentKey 7 | 8 | 9 | class PingPayload(NamedTuple): 10 | enr_seq: int 11 | advertisement_radius: int 12 | 13 | 14 | class PongPayload(NamedTuple): 15 | enr_seq: int 16 | advertisement_radius: int 17 | 18 | 19 | class FindNodesPayload(NamedTuple): 20 | distances: Tuple[int, ...] 21 | 22 | 23 | class FoundNodesPayload(NamedTuple): 24 | total: int 25 | encoded_enrs: Tuple[bytes, ...] 26 | 27 | @property 28 | def enrs(self) -> Tuple[ENRAPI]: 29 | return tuple( # type: ignore 30 | rlp.decode(raw_enr, sedes=ENR) for raw_enr in self.encoded_enrs 31 | ) 32 | 33 | @classmethod 34 | def from_enrs(cls, total: int, enrs: Sequence[ENRAPI]) -> "FoundNodesPayload": 35 | encoded_enrs = tuple(rlp.encode(enr) for enr in enrs) 36 | return cls(total, encoded_enrs) 37 | 38 | 39 | class FindContentPayload(NamedTuple): 40 | content_key: ContentKey 41 | 42 | 43 | class FoundContentPayload(NamedTuple): 44 | encoded_enrs: Tuple[bytes, ...] 45 | content: bytes 46 | 47 | @property 48 | def is_content(self) -> bool: 49 | return bool(self.content) 50 | 51 | @property 52 | def enrs(self) -> Tuple[ENRAPI]: 53 | return tuple( # type: ignore 54 | rlp.decode(raw_enr, sedes=ENR) for raw_enr in self.encoded_enrs 55 | ) 56 | -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/rlp_sedes.py: -------------------------------------------------------------------------------- 1 | from rlp import sedes 2 | 3 | address = sedes.Binary.fixed_length(20, allow_empty=True) 4 | hash32 = sedes.Binary.fixed_length(32) 5 | uint32 = sedes.BigEndianInt(32) 6 | uint256 = sedes.BigEndianInt(256) 7 | -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/sedes.py: -------------------------------------------------------------------------------- 1 | from ssz.sedes import Container, List, uint8, uint16, uint64, uint256 2 | 3 | 4 | class ByteList(List): # type: ignore 5 | def __init__(self, max_length: int) -> None: 6 | super().__init__(element_sedes=uint8, max_length=max_length) 7 | 8 | def serialize(self, value: bytes) -> bytes: 9 | return value 10 | 11 | def deserialize(self, value: bytes) -> bytes: 12 | return value 13 | 14 | 15 | byte_list = ByteList(max_length=2048) 16 | 17 | 18 | PingSedes = Container(field_sedes=(uint64, uint256)) 19 | PongSedes = Container(field_sedes=(uint64, uint256)) 20 | 21 | FindNodesSedes = Container(field_sedes=(List(uint16, max_length=256),)) 22 | FoundNodesSedes = Container(field_sedes=(uint8, List(byte_list, max_length=32))) 23 | 24 | FindContentSedes = Container(field_sedes=(byte_list,)) 25 | FoundContentSedes = Container(field_sedes=(List(byte_list, max_length=32), byte_list)) 26 | -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/typing.py: -------------------------------------------------------------------------------- 1 | from typing import NewType 2 | 3 | ContentID = NewType("ContentID", bytes) 4 | ContentKey = NewType("ContentKey", bytes) 5 | -------------------------------------------------------------------------------- /ddht/v5_1/alexandria/xdg.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | from ddht.xdg import get_xdg_ddht_root 5 | 6 | 7 | def get_xdg_alexandria_root() -> Path: 8 | """ 9 | Returns the base directory under which alexandria will store data. 10 | """ 11 | try: 12 | return Path(os.environ["XDG_ALEXANDRIA_ROOT"]) 13 | except KeyError: 14 | return get_xdg_ddht_root() / "alexandria" 15 | -------------------------------------------------------------------------------- /ddht/v5_1/constants.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | from ddht.constants import DISCOVERY_MAX_PACKET_SIZE 4 | 5 | SESSION_IDLE_TIMEOUT = 60 6 | 7 | ROUTING_TABLE_KEEP_ALIVE = 300 8 | 9 | REQUEST_RESPONSE_TIMEOUT = 10 10 | 11 | # safe upper bound on the size of the ENR list in a nodes message 12 | FOUND_NODES_MAX_PAYLOAD_SIZE = DISCOVERY_MAX_PACKET_SIZE - 200 13 | 14 | 15 | DEFAULT_BOOTNODES_ORIG: Tuple[str, ...] = ( 16 | # DDHT: Alice 17 | "enr:-IS4QNIktXW8LPFA2B5n8jbF6fwScqUnO59gyZyg7CExFPHOO5z7nHBUjqbtbuS7Mk6Z2TL3eZiECpGmYCeGPlJzrLIDgmlkgnY0gmlwhC1PSnGJc2VjcDI1NmsxoQLvfEFi6FaFI7bp7Cw8yfZ17AdDwceRSQH7BxL5VhUNd4N1ZHCCdl8", # noqa: E501 18 | # DDHT: Bob 19 | "enr:-IS4QKcAHi77_OQBuGolVX-I1dmQxyuZAsSTh3Z7Jck3LrzbYQ2NXzMEKvpit0cyH2coB55ddVDvKA8p5IUcg7DLQj4DgmlkgnY0gmlwhC1PW26Jc2VjcDI1NmsxoQPNz0D8sSVKyNTZuGRTTnPabutpJ8IUxpAyMqrVosZ14IN1ZHCCdl8", # noqa: E501 20 | # CatDog: bridge bootnodes 21 | "enr:-Ku4QJmPsyq4lmDdFebMKXk7vdt8WsLWkArYT2K8eN057oFudm2tITrZJD9sq1x92-bRmXTyAJgb2FD4ior-KHIU3KcDh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDaNQiCAAAAA___________gmlkgnY0gmlwhBK4vdCJc2VjcDI1NmsxoQMWAsR84_ETgq4-14FV2x00ptmI-YU3tdkZV9CUgYPEnIN1ZHCCI1s", # noqa: E501 22 | # Lighthouse nodes 23 | "enr:-LK4QCGFeQXjpQkgOfLHsbTjD65IOtSqV7Qo-Qdqv6SrL8lqFY7INPMMGP5uGKkVDcJkeXimSeNeypaZV3MHkcJgr9QCh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDnp11aAAAAAf__________gmlkgnY0gmlwhA37LMaJc2VjcDI1NmsxoQJ7k0mKtTd_kdEq251flOjD1HKpqgMmIETDoD-Msy_O-4N0Y3CCIyiDdWRwgiMo", # noqa: E501 24 | "enr:-LK4QCpyWmMLYwC2umMJ_g0c9VY7YOFwZyaR80_tuQNTWOzJbaR82DDhVQYqmE_0gvN6Du5jwnxzIaaNRZQlVXzfIK0Dh2F0dG5ldHOIAAAAAAAAAACEZXRoMpDnp11aAAAAAf__________gmlkgnY0gmlwhCLR2xuJc2VjcDI1NmsxoQOYiWqrQtQksTEtS3qY6idxJE5wkm0t9wKqpzv2gCR21oN0Y3CCIyiDdWRwgiMo", # noqa: E501 25 | # Prismatic 26 | "enr:-Ku4QHWezvidY_m0dWEwERrNrqjEQWrlIx7b8K4EIxGgTrLmUxHCZPW5-t8PsS8nFxAJ8k8YacKP5zPRk5gbsTSsRTQBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAYrkzLAAAAAf__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMypP_ODwTuBq2v0oIdjPGCEyu9Hb_jHDbuIX_iNvBRGoN1ZHCCGWQ", # noqa: E501 27 | "enr:-Ku4QOnVSyvzS3VbF87J8MubaRuTyfPi6B67XQg6-5eAV_uILAhn9geTTQmfqDIOcIeAxWHUUajQp6lYniAXPWncp6UBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAYrkzLAAAAAf__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQKekYKqUtwbaJKKCct_srE5-g7tBUm68mj_jpeSb7CCqYN1ZHCCC7g", # noqa: E501 28 | ) 29 | 30 | 31 | DEFAULT_BOOTNODES: Tuple[str, ...] = () 32 | 33 | 34 | PACKET_VERSION_1 = b"\x00\x01" 35 | 36 | ID_NONCE_SIGNATURE_PREFIX = b"discovery v5 identity proof" 37 | 38 | HEADER_PACKET_SIZE = 23 39 | 40 | PROTOCOL_ID = b"discv5" 41 | 42 | WHO_ARE_YOU_PACKET_SIZE = 24 43 | 44 | HANDSHAKE_HEADER_PACKET_SIZE = 34 45 | 46 | MESSAGE_PACKET_SIZE = 32 47 | -------------------------------------------------------------------------------- /ddht/v5_1/crawler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Iterator 3 | 4 | from async_service import Service 5 | from eth_typing import NodeID 6 | 7 | from ddht._utils import every 8 | from ddht.kademlia import at_log_distance 9 | from ddht.v5_1.abc import NetworkProtocol 10 | from ddht.v5_1.explorer import Explorer 11 | 12 | 13 | class CrawlerExplorer(Explorer): 14 | def _get_ordered_candidates(self) -> Iterator[NodeID]: 15 | return iter(self.seen) 16 | 17 | 18 | class Crawler(Service): 19 | logger = logging.getLogger("ddht.Crawler") 20 | 21 | def __init__(self, network: NetworkProtocol, concurrency: int = 32) -> None: 22 | target = at_log_distance(network.local_node_id, 256) 23 | self._explorer = CrawlerExplorer(network, target, concurrency) 24 | 25 | async def run(self) -> None: 26 | self.logger.info("crawl-starting") 27 | 28 | self.manager.run_child_service(self._explorer) 29 | await self._explorer.ready() 30 | 31 | self.manager.run_daemon_task(self._periodically_report_crawl_stats) 32 | 33 | async with self._explorer.stream() as enrs_aiter: 34 | async for enr in enrs_aiter: 35 | pass 36 | 37 | stats = self._explorer.get_stats() 38 | citizen_count = ( 39 | stats.seen 40 | - stats.unresponsive 41 | - stats.invalid 42 | - stats.unreachable 43 | - stats.pending 44 | ) 45 | self.logger.info("crawl-finished: citizens=%d %s", citizen_count, stats) 46 | self.manager.cancel() 47 | 48 | async def _periodically_report_crawl_stats(self) -> None: 49 | async for _ in every(5, 5): 50 | stats = self._explorer.get_stats() 51 | citizen_count = ( 52 | stats.seen 53 | - stats.unresponsive 54 | - stats.invalid 55 | - stats.unreachable 56 | - stats.pending 57 | ) 58 | self.logger.info("crawl-stats: citizens=%d %s", citizen_count, stats) 59 | -------------------------------------------------------------------------------- /ddht/v5_1/exceptions.py: -------------------------------------------------------------------------------- 1 | from ddht.exceptions import BaseDDHTError 2 | 3 | 4 | class SessionNotFound(BaseDDHTError): 5 | pass 6 | 7 | 8 | class ProtocolNotSupported(BaseDDHTError): 9 | pass 10 | -------------------------------------------------------------------------------- /ddht/v5_1/handshake_schemes.py: -------------------------------------------------------------------------------- 1 | from hashlib import sha256 2 | from typing import NamedTuple 3 | 4 | from eth_keys.datatypes import PrivateKey 5 | from eth_typing import NodeID 6 | 7 | from ddht.handshake_schemes import BaseV4HandshakeScheme, HandshakeSchemeRegistry 8 | from ddht.v5_1.constants import ID_NONCE_SIGNATURE_PREFIX 9 | from ddht.v5_1.packets import Header, WhoAreYouPacket 10 | 11 | v51_handshake_scheme_registry = HandshakeSchemeRegistry() 12 | 13 | 14 | class SignatureInputs(NamedTuple): 15 | iv: bytes 16 | header: Header 17 | who_are_you: WhoAreYouPacket 18 | ephemeral_public_key: bytes 19 | recipient_node_id: NodeID 20 | 21 | 22 | @v51_handshake_scheme_registry.register 23 | class V4HandshakeScheme(BaseV4HandshakeScheme[SignatureInputs]): 24 | signature_inputs_cls = SignatureInputs 25 | 26 | @classmethod 27 | def create_id_nonce_signature( 28 | cls, *, signature_inputs: SignatureInputs, private_key: bytes, 29 | ) -> bytes: 30 | private_key_object = PrivateKey(private_key) 31 | signature_input = cls.create_id_nonce_signature_input( 32 | signature_inputs=signature_inputs 33 | ) 34 | signature = private_key_object.sign_msg_hash_non_recoverable(signature_input) 35 | return bytes(signature) 36 | 37 | @classmethod 38 | def validate_id_nonce_signature( 39 | cls, *, signature_inputs: SignatureInputs, signature: bytes, public_key: bytes, 40 | ) -> None: 41 | signature_input = cls.create_id_nonce_signature_input( 42 | signature_inputs=signature_inputs 43 | ) 44 | cls.identity_scheme.validate_signature( 45 | message_hash=signature_input, signature=signature, public_key=public_key 46 | ) 47 | 48 | @classmethod 49 | def create_id_nonce_signature_input( 50 | cls, *, signature_inputs: SignatureInputs, 51 | ) -> bytes: 52 | preimage = b"".join( 53 | ( 54 | ID_NONCE_SIGNATURE_PREFIX, 55 | signature_inputs.iv, 56 | signature_inputs.header.to_wire_bytes(), 57 | signature_inputs.who_are_you.to_wire_bytes(), 58 | signature_inputs.ephemeral_public_key, 59 | signature_inputs.recipient_node_id, 60 | ) 61 | ) 62 | return sha256(preimage).digest() 63 | -------------------------------------------------------------------------------- /ddht/v5_1/messages.py: -------------------------------------------------------------------------------- 1 | from typing import cast 2 | 3 | from eth_enr.sedes import ENRSedes 4 | import rlp 5 | from rlp.sedes import Binary, CountableList, big_endian_int, binary 6 | 7 | from ddht.base_message import BaseMessage 8 | from ddht.encryption import aesgcm_decrypt 9 | from ddht.message_registry import MessageTypeRegistry 10 | from ddht.sedes import ip_address_sedes 11 | from ddht.typing import AES128Key, Nonce 12 | 13 | topic_sedes = Binary.fixed_length(32) 14 | 15 | 16 | v51_registry = MessageTypeRegistry() 17 | 18 | 19 | # 20 | # Message types 21 | # 22 | @v51_registry.register 23 | class PingMessage(BaseMessage): 24 | message_type = 1 25 | 26 | fields = (("request_id", binary), ("enr_seq", big_endian_int)) 27 | 28 | 29 | @v51_registry.register 30 | class PongMessage(BaseMessage): 31 | message_type = 2 32 | 33 | fields = ( 34 | ("request_id", binary), 35 | ("enr_seq", big_endian_int), 36 | ("packet_ip", ip_address_sedes), 37 | ("packet_port", big_endian_int), 38 | ) 39 | 40 | 41 | @v51_registry.register 42 | class FindNodeMessage(BaseMessage): 43 | message_type = 3 44 | 45 | fields = ( 46 | ("request_id", binary), 47 | ("distances", CountableList(big_endian_int)), 48 | ) 49 | 50 | 51 | @v51_registry.register 52 | class FoundNodesMessage(BaseMessage): 53 | message_type = 4 54 | 55 | fields = ( 56 | ("request_id", binary), 57 | ("total", big_endian_int), 58 | ("enrs", CountableList(ENRSedes)), 59 | ) 60 | 61 | 62 | @v51_registry.register 63 | class TalkRequestMessage(BaseMessage): 64 | message_type = 5 65 | 66 | protocol: bytes 67 | payload: bytes 68 | 69 | fields = (("request_id", binary), ("protocol", binary), ("payload", binary)) 70 | 71 | 72 | @v51_registry.register 73 | class TalkResponseMessage(BaseMessage): 74 | message_type = 6 75 | 76 | payload: bytes 77 | 78 | fields = (("request_id", binary), ("payload", binary)) 79 | 80 | 81 | @v51_registry.register 82 | class RegisterTopicMessage(BaseMessage): 83 | message_type = 7 84 | 85 | fields = ( 86 | ("request_id", binary), 87 | ("topic", topic_sedes), 88 | ("enr", ENRSedes), 89 | ("ticket", binary), 90 | ) 91 | 92 | 93 | @v51_registry.register 94 | class TicketMessage(BaseMessage): 95 | message_type = 8 96 | 97 | fields = ( 98 | ("request_id", binary), 99 | ("ticket", binary), 100 | ("wait_time", big_endian_int), 101 | ) 102 | 103 | 104 | @v51_registry.register 105 | class RegistrationConfirmationMessage(BaseMessage): 106 | message_type = 9 107 | 108 | fields = (("request_id", binary), ("topic", binary)) 109 | 110 | 111 | @v51_registry.register 112 | class TopicQueryMessage(BaseMessage): 113 | message_type = 10 114 | 115 | fields = (("request_id", binary), ("topic", topic_sedes)) 116 | 117 | 118 | def decode_message( 119 | decryption_key: AES128Key, 120 | aes_gcm_nonce: Nonce, 121 | message_cipher_text: bytes, 122 | authenticated_data: bytes, 123 | message_type_registry: MessageTypeRegistry = v51_registry, 124 | ) -> BaseMessage: 125 | message_plain_text = aesgcm_decrypt( 126 | key=decryption_key, 127 | nonce=aes_gcm_nonce, 128 | cipher_text=message_cipher_text, 129 | authenticated_data=authenticated_data, 130 | ) 131 | message_type = message_plain_text[0] 132 | message_sedes = message_type_registry[message_type] 133 | message = rlp.decode(message_plain_text[1:], sedes=message_sedes) 134 | 135 | return cast(BaseMessage, message) 136 | -------------------------------------------------------------------------------- /ddht/xdg.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | 5 | def get_home() -> Path: 6 | try: 7 | return Path(os.environ["HOME"]) 8 | except KeyError: 9 | raise Exception("$HOME environment variable not set") 10 | 11 | 12 | def get_xdg_cache_home() -> Path: 13 | try: 14 | return Path(os.environ["XDG_CACHE_HOME"]) 15 | except KeyError: 16 | return get_home() / ".cache" 17 | 18 | 19 | def get_xdg_config_home() -> Path: 20 | try: 21 | return Path(os.environ["XDG_CONFIG_HOME"]) 22 | except KeyError: 23 | return get_home() / ".config" 24 | 25 | 26 | def get_xdg_data_home() -> Path: 27 | try: 28 | return Path(os.environ["XDG_DATA_HOME"]) 29 | except KeyError: 30 | return get_home() / ".local" / "share" 31 | 32 | 33 | def get_xdg_ddht_root() -> Path: 34 | """ 35 | Returns the base directory under which ddht will store all data. 36 | """ 37 | try: 38 | return Path(os.environ["XDG_DDHT_ROOT"]) 39 | except KeyError: 40 | return get_xdg_data_home() / "ddht" 41 | -------------------------------------------------------------------------------- /docs/_static/.suppress-sphinx-build-warning: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/ddht/142911d134ff839f3f79ff8fe9e45d3fe5a58cd0/docs/_static/.suppress-sphinx-build-warning -------------------------------------------------------------------------------- /docs/ddht.rst: -------------------------------------------------------------------------------- 1 | ddht package 2 | ============ 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | ddht.tools 10 | ddht.v5 11 | 12 | Submodules 13 | ---------- 14 | 15 | ddht.abc module 16 | --------------- 17 | 18 | .. automodule:: ddht.abc 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | ddht.app module 24 | --------------- 25 | 26 | .. automodule:: ddht.app 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | ddht.boot\_info module 32 | ---------------------- 33 | 34 | .. automodule:: ddht.boot_info 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | ddht.cli\_commands module 40 | ------------------------- 41 | 42 | .. automodule:: ddht.cli_commands 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | ddht.cli\_parser module 48 | ----------------------- 49 | 50 | .. automodule:: ddht.cli_parser 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | ddht.constants module 56 | --------------------- 57 | 58 | .. automodule:: ddht.constants 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | ddht.enr module 64 | --------------- 65 | 66 | .. automodule:: ddht.enr 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | ddht.exceptions module 72 | ---------------------- 73 | 74 | .. automodule:: ddht.exceptions 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | ddht.identity\_schemes module 80 | ----------------------------- 81 | 82 | .. automodule:: ddht.identity_schemes 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | ddht.kademlia module 88 | -------------------- 89 | 90 | .. automodule:: ddht.kademlia 91 | :members: 92 | :undoc-members: 93 | :show-inheritance: 94 | 95 | ddht.logging module 96 | ------------------- 97 | 98 | .. automodule:: ddht.logging 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | 103 | ddht.main module 104 | ---------------- 105 | 106 | .. automodule:: ddht.main 107 | :members: 108 | :undoc-members: 109 | :show-inheritance: 110 | 111 | ddht.typing module 112 | ------------------ 113 | 114 | .. automodule:: ddht.typing 115 | :members: 116 | :undoc-members: 117 | :show-inheritance: 118 | 119 | ddht.validation module 120 | ---------------------- 121 | 122 | .. automodule:: ddht.validation 123 | :members: 124 | :undoc-members: 125 | :show-inheritance: 126 | 127 | ddht.xdg module 128 | --------------- 129 | 130 | .. automodule:: ddht.xdg 131 | :members: 132 | :undoc-members: 133 | :show-inheritance: 134 | 135 | 136 | Module contents 137 | --------------- 138 | 139 | .. automodule:: ddht 140 | :members: 141 | :undoc-members: 142 | :show-inheritance: 143 | -------------------------------------------------------------------------------- /docs/ddht.tools.factories.rst: -------------------------------------------------------------------------------- 1 | ddht.tools.factories package 2 | ============================ 3 | 4 | Submodules 5 | ---------- 6 | 7 | ddht.tools.factories.boot\_info module 8 | -------------------------------------- 9 | 10 | .. automodule:: ddht.tools.factories.boot_info 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | ddht.tools.factories.discovery module 16 | ------------------------------------- 17 | 18 | .. automodule:: ddht.tools.factories.discovery 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | ddht.tools.factories.kademlia module 24 | ------------------------------------ 25 | 26 | .. automodule:: ddht.tools.factories.kademlia 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | ddht.tools.factories.keys module 32 | -------------------------------- 33 | 34 | .. automodule:: ddht.tools.factories.keys 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | ddht.tools.factories.socket module 40 | ---------------------------------- 41 | 42 | .. automodule:: ddht.tools.factories.socket 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | 48 | Module contents 49 | --------------- 50 | 51 | .. automodule:: ddht.tools.factories 52 | :members: 53 | :undoc-members: 54 | :show-inheritance: 55 | -------------------------------------------------------------------------------- /docs/ddht.tools.rst: -------------------------------------------------------------------------------- 1 | ddht.tools package 2 | ================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | ddht.tools.factories 10 | 11 | Submodules 12 | ---------- 13 | 14 | ddht.tools.v5\_strategies module 15 | -------------------------------- 16 | 17 | .. automodule:: ddht.tools.v5_strategies 18 | :members: 19 | :undoc-members: 20 | :show-inheritance: 21 | 22 | 23 | Module contents 24 | --------------- 25 | 26 | .. automodule:: ddht.tools 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | -------------------------------------------------------------------------------- /docs/ddht.v5.rst: -------------------------------------------------------------------------------- 1 | ddht.v5 package 2 | =============== 3 | 4 | Submodules 5 | ---------- 6 | 7 | ddht.v5.abc module 8 | ------------------ 9 | 10 | .. automodule:: ddht.v5.abc 11 | :members: 12 | :undoc-members: 13 | :show-inheritance: 14 | 15 | ddht.v5.channel\_services module 16 | -------------------------------- 17 | 18 | .. automodule:: ddht.v5.channel_services 19 | :members: 20 | :undoc-members: 21 | :show-inheritance: 22 | 23 | ddht.v5.constants module 24 | ------------------------ 25 | 26 | .. automodule:: ddht.v5.constants 27 | :members: 28 | :undoc-members: 29 | :show-inheritance: 30 | 31 | ddht.v5.encryption module 32 | ------------------------- 33 | 34 | .. automodule:: ddht.v5.encryption 35 | :members: 36 | :undoc-members: 37 | :show-inheritance: 38 | 39 | ddht.v5.endpoint\_tracker module 40 | -------------------------------- 41 | 42 | .. automodule:: ddht.v5.endpoint_tracker 43 | :members: 44 | :undoc-members: 45 | :show-inheritance: 46 | 47 | ddht.v5.handshake module 48 | ------------------------ 49 | 50 | .. automodule:: ddht.v5.handshake 51 | :members: 52 | :undoc-members: 53 | :show-inheritance: 54 | 55 | ddht.v5.message\_dispatcher module 56 | ---------------------------------- 57 | 58 | .. automodule:: ddht.v5.message_dispatcher 59 | :members: 60 | :undoc-members: 61 | :show-inheritance: 62 | 63 | ddht.v5.messages module 64 | ----------------------- 65 | 66 | .. automodule:: ddht.v5.messages 67 | :members: 68 | :undoc-members: 69 | :show-inheritance: 70 | 71 | ddht.v5.packer module 72 | --------------------- 73 | 74 | .. automodule:: ddht.v5.packer 75 | :members: 76 | :undoc-members: 77 | :show-inheritance: 78 | 79 | ddht.v5.packets module 80 | ---------------------- 81 | 82 | .. automodule:: ddht.v5.packets 83 | :members: 84 | :undoc-members: 85 | :show-inheritance: 86 | 87 | ddht.v5.routing\_table module 88 | ----------------------------- 89 | 90 | .. automodule:: ddht.v5.routing_table 91 | :members: 92 | :undoc-members: 93 | :show-inheritance: 94 | 95 | ddht.v5.routing\_table\_manager module 96 | -------------------------------------- 97 | 98 | .. automodule:: ddht.v5.routing_table_manager 99 | :members: 100 | :undoc-members: 101 | :show-inheritance: 102 | 103 | ddht.v5.tags module 104 | ------------------- 105 | 106 | .. automodule:: ddht.v5.tags 107 | :members: 108 | :undoc-members: 109 | :show-inheritance: 110 | 111 | ddht.v5.topic\_table module 112 | --------------------------- 113 | 114 | .. automodule:: ddht.v5.topic_table 115 | :members: 116 | :undoc-members: 117 | :show-inheritance: 118 | 119 | ddht.v5.typing module 120 | --------------------- 121 | 122 | .. automodule:: ddht.v5.typing 123 | :members: 124 | :undoc-members: 125 | :show-inheritance: 126 | 127 | 128 | Module contents 129 | --------------- 130 | 131 | .. automodule:: ddht.v5 132 | :members: 133 | :undoc-members: 134 | :show-inheritance: 135 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Discovery V5 DHT 2 | ============================== 3 | 4 | Implementation of the P2P Discoveryv5 Protocol 5 | 6 | Contents 7 | -------- 8 | 9 | .. toctree:: 10 | :maxdepth: 3 11 | 12 | ddht 13 | jsonrpc 14 | release_notes 15 | 16 | 17 | Indices and tables 18 | ------------------ 19 | 20 | * :ref:`genindex` 21 | * :ref:`modindex` 22 | -------------------------------------------------------------------------------- /docs/release_notes.rst: -------------------------------------------------------------------------------- 1 | Release Notes 2 | ============= 3 | 4 | .. towncrier release notes start 5 | 6 | v0.1.0-alpha.1 7 | -------------- 8 | 9 | - Launched repository, claimed names for pip, RTD, github, etc 10 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | 3 | mypy_path = "stubs" 4 | check_untyped_defs = True 5 | disallow_incomplete_defs = True 6 | disallow_untyped_defs = True 7 | disallow_any_generics = True 8 | disallow_untyped_calls = True 9 | disallow_untyped_decorators = True 10 | disallow_subclassing_any = True 11 | ignore_missing_imports = True 12 | strict_optional = True 13 | strict_equality = True 14 | warn_redundant_casts = True 15 | warn_return_any = True 16 | warn_unused_configs = True 17 | warn_unused_ignores = True 18 | -------------------------------------------------------------------------------- /newsfragments/139.bugfix.rst: -------------------------------------------------------------------------------- 1 | Update dispatcher timeouts to wrap EndOfChannel errors as TooSlowError. 2 | -------------------------------------------------------------------------------- /newsfragments/140.feature.rst: -------------------------------------------------------------------------------- 1 | Implement CLI entry point for running Alexandria client. 2 | -------------------------------------------------------------------------------- /newsfragments/173.feature.rst: -------------------------------------------------------------------------------- 1 | Add support for discv5_sendPing json rpc endpoint. 2 | -------------------------------------------------------------------------------- /newsfragments/174.misc.rst: -------------------------------------------------------------------------------- 1 | Relocate rpc handler validation utils to validation module. 2 | -------------------------------------------------------------------------------- /newsfragments/177.feature.rst: -------------------------------------------------------------------------------- 1 | Add support for discv5_sendPong json rpc endpoint. 2 | -------------------------------------------------------------------------------- /newsfragments/178.feature.rst: -------------------------------------------------------------------------------- 1 | Add support for enr management json rpc endpoints. 2 | -------------------------------------------------------------------------------- /newsfragments/182.feature.rst: -------------------------------------------------------------------------------- 1 | Add support for discv5_updateNodeInfo json rpc endpoint. 2 | -------------------------------------------------------------------------------- /newsfragments/183.feature.rst: -------------------------------------------------------------------------------- 1 | Add support for discv5_lookupENR json rpc endpoint. 2 | -------------------------------------------------------------------------------- /newsfragments/195.feature.rst: -------------------------------------------------------------------------------- 1 | Add support for discv5_sendFindNodes and discv5_sendFoundNodes json rpc endpoints. 2 | -------------------------------------------------------------------------------- /newsfragments/224.feature.rst: -------------------------------------------------------------------------------- 1 | Add support for disvc5 talk json rpc endpoints. 2 | -------------------------------------------------------------------------------- /newsfragments/225.feature.rst: -------------------------------------------------------------------------------- 1 | Add support for discv5_bond and discv5_recursiveFindNodes json rpc endpoints. 2 | -------------------------------------------------------------------------------- /newsfragments/263.feature.rst: -------------------------------------------------------------------------------- 1 | Add support for stream_find_nodes to Network and Client api. 2 | -------------------------------------------------------------------------------- /newsfragments/343.bugfix.rst: -------------------------------------------------------------------------------- 1 | Configure local ENR properly when using the default IP address. 2 | -------------------------------------------------------------------------------- /newsfragments/343.doc.rst: -------------------------------------------------------------------------------- 1 | Move noisy log "Loop monitoring task called" into debug2. Log out ENR items along with encoded ENR. 2 | -------------------------------------------------------------------------------- /newsfragments/343.internal.rst: -------------------------------------------------------------------------------- 1 | Install web3 for lint tests, so that mypy notices problems. Upgrade coincurve. Upgrade web3 to 2 | 5.19+ and handle the ModuleV2 rename. 3 | -------------------------------------------------------------------------------- /newsfragments/343.misc.rst: -------------------------------------------------------------------------------- 1 | Remove all bootnodes for now. 2 | -------------------------------------------------------------------------------- /newsfragments/344.feature.rst: -------------------------------------------------------------------------------- 1 | Fail immediately if you try to launch with 127.0.0.1, because you can't send outbound messages away 2 | from your machine that way. (and get ugly stack traces and a crash) 3 | -------------------------------------------------------------------------------- /newsfragments/345.feature.rst: -------------------------------------------------------------------------------- 1 | Drop peer session if no messages received for 60 seconds. 2 | -------------------------------------------------------------------------------- /newsfragments/346.bugfix.rst: -------------------------------------------------------------------------------- 1 | Encode ENR sequence number with 64 bits, as mentioned in ENR spec. 2 | -------------------------------------------------------------------------------- /newsfragments/346.feature.rst: -------------------------------------------------------------------------------- 1 | Update overlay network ID to "portal". 2 | -------------------------------------------------------------------------------- /newsfragments/README.md: -------------------------------------------------------------------------------- 1 | This directory collects "newsfragments": short files that each contain 2 | a snippet of ReST-formatted text that will be added to the next 3 | release notes. This should be a description of aspects of the change 4 | (if any) that are relevant to users. (This contrasts with the 5 | commit message and PR description, which are a description of the change as 6 | relevant to people working on the code itself.) 7 | 8 | Each file should be named like `..rst`, where 9 | `` is an issue numbers, and `` is one of: 10 | 11 | * `feature` 12 | * `bugfix` 13 | * `performance` 14 | * `doc` 15 | * `internal` 16 | * `removal` 17 | * `misc` 18 | 19 | So for example: `123.feature.rst`, `456.bugfix.rst` 20 | 21 | If the PR fixes an issue, use that number here. If there is no issue, 22 | then open up the PR first and use the PR number for the newsfragment. 23 | 24 | Note that the `towncrier` tool will automatically 25 | reflow your text, so don't try to do any fancy formatting. Run 26 | `towncrier --draft` to get a preview of what the release notes entry 27 | will look like in the final release notes. 28 | -------------------------------------------------------------------------------- /newsfragments/validate_files.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Towncrier silently ignores files that do not match the expected ending. 4 | # We use this script to ensure we catch these as errors in CI. 5 | 6 | import os 7 | import pathlib 8 | import sys 9 | 10 | ALLOWED_EXTENSIONS = { 11 | '.bugfix.rst', 12 | '.doc.rst', 13 | '.feature.rst', 14 | '.internal.rst', 15 | '.misc.rst', 16 | '.performance.rst', 17 | '.removal.rst', 18 | } 19 | 20 | ALLOWED_FILES = { 21 | 'validate_files.py', 22 | 'README.md', 23 | } 24 | 25 | THIS_DIR = pathlib.Path(__file__).parent 26 | 27 | num_args = len(sys.argv) - 1 28 | assert num_args in {0, 1} 29 | if num_args == 1: 30 | assert sys.argv[1] in ('is-empty', ) 31 | 32 | for fragment_file in THIS_DIR.iterdir(): 33 | 34 | if fragment_file.name in ALLOWED_FILES: 35 | continue 36 | elif num_args == 0: 37 | full_extension = "".join(fragment_file.suffixes) 38 | if full_extension not in ALLOWED_EXTENSIONS: 39 | raise Exception(f"Unexpected file: {fragment_file}") 40 | elif sys.argv[1] == 'is-empty': 41 | raise Exception(f"Unexpected file: {fragment_file}") 42 | else: 43 | raise RuntimeError("Strange: arguments {sys.argv} were validated, but not found") 44 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.towncrier] 2 | # Read https://github.com/ethereum/ddht/newsfragments/README.md for instructions 3 | package = "ddht" 4 | filename = "docs/release_notes.rst" 5 | directory = "newsfragments" 6 | underlines = ["-", "~", "^"] 7 | issue_format = "`#{issue} `__" 8 | 9 | [[tool.towncrier.type]] 10 | directory = "feature" 11 | name = "Features" 12 | showcontent = true 13 | 14 | [[tool.towncrier.type]] 15 | directory = "bugfix" 16 | name = "Bugfixes" 17 | showcontent = true 18 | 19 | [[tool.towncrier.type]] 20 | directory = "performance" 21 | name = "Performance improvements" 22 | showcontent = true 23 | 24 | [[tool.towncrier.type]] 25 | directory = "doc" 26 | name = "Improved Documentation" 27 | showcontent = true 28 | 29 | [[tool.towncrier.type]] 30 | directory = "removal" 31 | name = "Deprecations and Removals" 32 | showcontent = true 33 | 34 | [[tool.towncrier.type]] 35 | directory = "internal" 36 | name = "Internal Changes - for Discovery V5 DHT Contributors" 37 | showcontent = true 38 | 39 | [[tool.towncrier.type]] 40 | directory = "misc" 41 | name = "Miscellaneous changes" 42 | showcontent = false 43 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts= -v --showlocals --durations 10 3 | xfail_strict=true 4 | 5 | [pytest-watch] 6 | runner= pytest --failed-first --maxfail=1 --no-success-flaky-report 7 | -------------------------------------------------------------------------------- /requirements-docs.txt: -------------------------------------------------------------------------------- 1 | ddht[doc] 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | from setuptools import ( 4 | setup, 5 | find_packages, 6 | ) 7 | 8 | extras_require = { 9 | "test": [ 10 | "factory-boy==2.12.0", 11 | "hypothesis>=4.45.1,<5", 12 | "pexpect==4.8.0", 13 | "pytest==6.0.1", 14 | "pytest-randomly>=3.1.0,<3.2", 15 | "pytest-trio>=0.6.0,<0.7", 16 | "pytest-xdist==2.0.0", 17 | "tox==3.19.0", 18 | ], 19 | "lint": [ 20 | "black==19.10b0", 21 | "flake8==3.8.3", 22 | "isort>=5.1.4,<6", 23 | "mypy==0.782", 24 | "pydocstyle>=3.0.0,<4", 25 | ], 26 | "doc": ["Sphinx>=1.6.5,<2", "sphinx_rtd_theme>=0.1.9", "towncrier>=19.2.0, <20"], 27 | "dev": [ 28 | "bumpversion>=0.5.3,<1", 29 | "pytest-watch>=4.1.0,<5", 30 | "wheel", 31 | "twine", 32 | "ipython", 33 | ], 34 | "web3": ["web3>=5.19.0,<6"], 35 | "alexandria": [ 36 | "pyethash>=0.1.27,<1.0.0", 37 | "ssz>=0.2.4,<0.3", 38 | ], 39 | "benchmark": [ 40 | "texttable==1.6.3", 41 | ], 42 | } 43 | 44 | extras_require["dev"] = ( 45 | extras_require["dev"] 46 | + extras_require["alexandria"] # noqa: W504 47 | + extras_require["benchmark"] # noqa: W504 48 | + extras_require["web3"] # noqa: W504 49 | + extras_require["test"] # noqa: W504 50 | + extras_require["lint"] # noqa: W504 51 | + extras_require["doc"] # noqa: W504 52 | ) 53 | 54 | 55 | with open("./README.md") as readme: 56 | long_description = readme.read() 57 | 58 | 59 | setup( 60 | name="ddht", 61 | # *IMPORTANT*: Don't manually change the version here. Use `make bump`, as described in readme 62 | version="0.1.0-alpha.2", 63 | description="""ddht: Implementation of the P2P Discoveryv5 Protocol""", 64 | long_description=long_description, 65 | long_description_content_type="text/markdown", 66 | author="The Ethereum Foundation", 67 | author_email="snakecharmers@ethereum.org", 68 | url="https://github.com/ethereum/ddht", 69 | include_package_data=True, 70 | install_requires=[ 71 | "async-service>=0.1.0a9,<0.2", 72 | "cached-property>=1.5.1,<2", 73 | "coincurve>=15.0.0,<16.0.0", 74 | "cryptography>=3.0,<3.2", 75 | "eth-enr>=0.4.1,<0.5", 76 | "eth-hash[pycryptodome]>=0.1.4,<1", 77 | "eth-keys>=0.3.3,<0.4.0", 78 | "eth-typing>=2.2.2,<3", 79 | "eth-utils>=1.8.4,<2", 80 | "lru-dict>=1.1.6,<2.0", 81 | # TODO: unpin once the dependency problems are resolved 82 | "rlp==2.0.0a1", 83 | "trio>=0.16.0,<0.17", 84 | "trio-typing>=0.5.0,<0.6", 85 | "upnp-port-forward>=0.1.1,<0.2", 86 | ], 87 | python_requires=">=3.8, <4", 88 | extras_require=extras_require, 89 | py_modules=["ddht"], 90 | license="MIT", 91 | zip_safe=False, 92 | keywords="ethereum", 93 | packages=find_packages(exclude=["tests", "tests.*"]), 94 | classifiers=[ 95 | "Development Status :: 3 - Alpha", 96 | "Intended Audience :: Developers", 97 | "License :: OSI Approved :: MIT License", 98 | "Natural Language :: English", 99 | "Programming Language :: Python :: 3", 100 | "Programming Language :: Python :: 3.8", 101 | ], 102 | entry_points={"console_scripts": ["ddht=ddht._boot:_boot"]}, 103 | ) 104 | -------------------------------------------------------------------------------- /stubs/cached_property.pyi: -------------------------------------------------------------------------------- 1 | cached_property = property 2 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import tempfile 3 | 4 | import pytest 5 | import pytest_trio 6 | import trio 7 | 8 | 9 | @pytest.fixture(autouse=True) 10 | def xdg_home(monkeypatch): 11 | with tempfile.TemporaryDirectory() as temp_xdg: 12 | monkeypatch.setenv("XDG_DATA_HOME", temp_xdg) 13 | yield pathlib.Path(temp_xdg) 14 | 15 | 16 | @pytest.fixture 17 | def ipc_path(xdg_home): 18 | return xdg_home / "jsonrpc.ipc" 19 | 20 | 21 | @pytest_trio.trio_fixture 22 | async def socket_pair(): 23 | sending_socket = trio.socket.socket( 24 | family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM, 25 | ) 26 | receiving_socket = trio.socket.socket( 27 | family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM, 28 | ) 29 | # specifying 0 as port number results in using random available port 30 | await sending_socket.bind(("127.0.0.1", 0)) 31 | await receiving_socket.bind(("127.0.0.1", 0)) 32 | return sending_socket, receiving_socket 33 | -------------------------------------------------------------------------------- /tests/core/conftest.py: -------------------------------------------------------------------------------- 1 | import io 2 | import itertools 3 | import json 4 | 5 | from eth_utils.toolz import take 6 | import pytest 7 | import trio 8 | 9 | from ddht.rpc import RPCRequest, read_json 10 | 11 | 12 | @pytest.fixture(name="make_raw_request") 13 | async def _make_raw_request(ipc_path, rpc_server): 14 | socket = await trio.open_unix_socket(str(ipc_path)) 15 | async with socket: 16 | buffer = io.StringIO() 17 | 18 | async def make_raw_request(raw_request: str): 19 | with trio.fail_after(2): 20 | data = raw_request.encode("utf8") 21 | data_iter = iter(data) 22 | while True: 23 | chunk = bytes(take(1024, data_iter)) 24 | if chunk: 25 | try: 26 | await socket.send_all(chunk) 27 | except trio.BrokenResourceError: 28 | break 29 | else: 30 | break 31 | return await read_json(socket.socket, buffer) 32 | 33 | yield make_raw_request 34 | 35 | 36 | @pytest.fixture(name="make_request") 37 | async def _make_request(make_raw_request): 38 | id_counter = itertools.count() 39 | 40 | async def make_request(method, params=None): 41 | if params is None: 42 | params = [] 43 | request = RPCRequest( 44 | jsonrpc="2.0", method=method, params=params, id=next(id_counter), 45 | ) 46 | raw_request = json.dumps(request) 47 | 48 | raw_response = await make_raw_request(raw_request) 49 | 50 | if "error" in raw_response: 51 | raise Exception(raw_response) 52 | elif "result" in raw_response: 53 | return raw_response["result"] 54 | else: 55 | raise Exception("Invariant") 56 | 57 | yield make_request 58 | -------------------------------------------------------------------------------- /tests/core/test_adaptive_timeout_util.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import trio 3 | 4 | from ddht._utils import adaptive_timeout 5 | 6 | 7 | @pytest.mark.trio 8 | async def test_adaptive_timeout_all_complete(autojump_clock): 9 | did_complete = [] 10 | 11 | async def do_sleep(task_id, seconds): 12 | await trio.sleep(seconds) 13 | did_complete.append(task_id) 14 | 15 | tasks = ( 16 | (do_sleep, (1, 0.9)), 17 | (do_sleep, (1, 1.0)), 18 | (do_sleep, (1, 1.1)), 19 | ) 20 | 21 | # with a threshold of 1 and variance of 2.0 all tasks should complete. 22 | await adaptive_timeout(*tasks, threshold=1, variance=2.0) 23 | 24 | assert len(did_complete) == 3 25 | 26 | 27 | @pytest.mark.trio 28 | async def test_adaptive_timeout_some_timeout(autojump_clock): 29 | did_complete = [] 30 | 31 | async def do_sleep(task_id, seconds): 32 | await trio.sleep(seconds) 33 | did_complete.append(task_id) 34 | 35 | tasks = ( 36 | (do_sleep, (1, 0.9)), 37 | (do_sleep, (1, 1.0)), 38 | (do_sleep, (1, 2.5)), # this one should not complete 39 | ) 40 | 41 | # with a threshold of 1 and variance of 2.0 all tasks should complete. 42 | await adaptive_timeout(*tasks, threshold=1, variance=2.0) 43 | 44 | assert len(did_complete) == 2 45 | 46 | 47 | @pytest.mark.trio 48 | async def test_adaptive_timeout_higher_threshold_all_complete(autojump_clock): 49 | did_complete = [] 50 | 51 | async def do_sleep(task_id, seconds): 52 | await trio.sleep(seconds) 53 | did_complete.append(task_id) 54 | 55 | tasks = ( 56 | (do_sleep, (1, 1.0)), 57 | (do_sleep, (1, 3.0)), 58 | (do_sleep, (1, 3.5)), 59 | ) 60 | 61 | # with a threshold of 1 and variance of 2.0 all tasks should complete. 62 | await adaptive_timeout(*tasks, threshold=2, variance=2.0) 63 | 64 | assert len(did_complete) == 3 65 | 66 | 67 | @pytest.mark.trio 68 | async def test_adaptive_timeout_higher_threshold_some_timeout(autojump_clock): 69 | did_complete = [] 70 | 71 | async def do_sleep(task_id, seconds): 72 | await trio.sleep(seconds) 73 | did_complete.append(task_id) 74 | 75 | tasks = ( 76 | (do_sleep, (1, 1.0)), 77 | (do_sleep, (1, 3.0)), 78 | (do_sleep, (1, 4.5)), 79 | ) 80 | 81 | # with a threshold of 1 and variance of 2.0 all tasks should complete. 82 | await adaptive_timeout(*tasks, threshold=2, variance=2.0) 83 | 84 | assert len(did_complete) == 2 85 | -------------------------------------------------------------------------------- /tests/core/test_aesgcm_encryption.py: -------------------------------------------------------------------------------- 1 | from eth_utils import ValidationError, decode_hex 2 | from hypothesis import given 3 | from hypothesis import strategies as st 4 | import pytest 5 | 6 | from ddht.constants import AES128_KEY_SIZE 7 | from ddht.encryption import ( 8 | aesgcm_decrypt, 9 | aesgcm_encrypt, 10 | validate_aes128_key, 11 | validate_nonce, 12 | ) 13 | from ddht.exceptions import DecryptionError 14 | from ddht.typing import AES128Key, Nonce 15 | from ddht.v5.constants import NONCE_SIZE 16 | 17 | key_st = st.binary(min_size=AES128_KEY_SIZE, max_size=AES128_KEY_SIZE) 18 | nonce_st = st.binary(min_size=NONCE_SIZE, max_size=NONCE_SIZE) 19 | plain_text_st = st.binary(min_size=0, max_size=10) 20 | aad_st = st.binary(min_size=0, max_size=10) 21 | 22 | 23 | def test_key_validation_invalid(): 24 | for length in (0, 12, 15, 17, 32): 25 | with pytest.raises(ValidationError): 26 | validate_aes128_key(AES128Key(b"\x00" * length)) 27 | 28 | 29 | @given(key_st) 30 | def test_key_validation_valid(key): 31 | validate_aes128_key(AES128Key(key)) 32 | 33 | 34 | def test_nonce_validation_invalid(): 35 | for length in (0, 11, 13, 16): 36 | with pytest.raises(ValidationError): 37 | validate_nonce(Nonce(b"\x00" * length)) 38 | 39 | 40 | @given(nonce_st) 41 | def test_nonce_validation_valid(key): 42 | validate_nonce(Nonce(key)) 43 | 44 | 45 | def test_decryption_with_wrong_inputs(): 46 | key = AES128Key(b"\x00" * 16) 47 | nonce = Nonce(b"\x11" * 12) 48 | plain_text = b"\x33" * 5 49 | aad = b"\x44" * 5 50 | cipher_text = aesgcm_encrypt(key, nonce, plain_text, aad) 51 | 52 | assert aesgcm_decrypt(key, nonce, cipher_text, aad) == plain_text 53 | with pytest.raises(ValidationError): 54 | aesgcm_decrypt(b"", nonce, cipher_text, aad) 55 | with pytest.raises(ValidationError): 56 | aesgcm_decrypt(key, b"", cipher_text, aad) 57 | with pytest.raises(DecryptionError): 58 | aesgcm_decrypt(key, nonce, b"", aad) 59 | with pytest.raises(DecryptionError): 60 | aesgcm_decrypt(key, nonce, cipher_text, b"") 61 | 62 | 63 | @given(key=key_st, nonce=nonce_st, plain_text=plain_text_st, aad=aad_st) 64 | def test_roundtrip(key, nonce, plain_text, aad): 65 | cipher_text = aesgcm_encrypt(key, nonce, plain_text, aad) 66 | plain_text_recovered = aesgcm_decrypt(key, nonce, cipher_text, aad) 67 | assert plain_text_recovered == plain_text 68 | 69 | 70 | @pytest.mark.parametrize( 71 | ["key", "nonce", "plain_text", "aad", "cipher_text"], 72 | [ 73 | [ 74 | decode_hex("0x9f2d77db7004bf8a1a85107ac686990b"), 75 | decode_hex("0x27b5af763c446acd2749fe8e"), 76 | decode_hex("0x01c20101"), 77 | decode_hex( 78 | "0x93a7400fa0d6a694ebc24d5cf570f65d04215b6ac00757875e3f3a5f42107903" 79 | ), 80 | decode_hex("0xa5d12a2d94b8ccb3ba55558229867dc13bfa3648"), 81 | ] 82 | ], 83 | ) 84 | def test_encryption_official(key, nonce, plain_text, aad, cipher_text): 85 | encrypted = aesgcm_encrypt(key, nonce, plain_text, aad) 86 | assert encrypted == cipher_text 87 | assert aesgcm_decrypt(key, nonce, cipher_text, aad) == plain_text 88 | -------------------------------------------------------------------------------- /tests/core/test_cli.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pexpect 4 | import pytest 5 | 6 | from ddht import __version__ 7 | from ddht import logging as ddht_logging 8 | 9 | 10 | def test_ddht_version(): 11 | child = pexpect.spawn("ddht --version") 12 | child.expect(__version__) 13 | 14 | 15 | def test_ddht_help(): 16 | child = pexpect.spawn("ddht --help") 17 | child.expect("Discovery V5 DHT") 18 | child.expect("core:") 19 | child.expect("logging:") 20 | child.expect("network:") 21 | 22 | 23 | @pytest.mark.parametrize( 24 | "env,expected", 25 | ( 26 | (None, dict()), 27 | ("debug", {None: logging.DEBUG}), 28 | ("DEBUG,INFO:root", {None: logging.DEBUG, "root": logging.INFO}), 29 | ), 30 | ) 31 | def test_loglevel_parsing(env, expected): 32 | parsed = ddht_logging.environment_to_log_levels(env) 33 | assert parsed == expected 34 | 35 | 36 | def test_bad_log_level(): 37 | with pytest.raises(Exception): 38 | ddht_logging.environment_to_log_levels("debu") 39 | -------------------------------------------------------------------------------- /tests/core/test_cli_parsing_to_boot_info.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | import pathlib 3 | 4 | from eth_keys import keys 5 | import pytest 6 | 7 | from ddht.boot_info import BootInfo 8 | from ddht.constants import ProtocolVersion 9 | from ddht.tools.factories.boot_info import BOOTNODES_V5, BootInfoFactory 10 | from ddht.tools.factories.discovery import ENRFactory 11 | 12 | KEY_RAW = b"unicornsrainbowsunicornsrainbows" 13 | KEY_HEX = KEY_RAW.hex() 14 | KEY_HEX_PREFIXED = "0x" + KEY_HEX 15 | KEY = keys.PrivateKey(KEY_RAW) 16 | 17 | 18 | ENR_A = ENRFactory() 19 | ENR_B = ENRFactory() 20 | 21 | 22 | @pytest.mark.parametrize( 23 | "args,factory_kwargs", 24 | ( 25 | ((), {}), 26 | (("--port", "12345"), dict(port=12345)), 27 | ( 28 | ("--listen-address", "192.168.0.1"), 29 | dict(listen_on=ipaddress.ip_address("192.168.0.1")), 30 | ), 31 | ( 32 | ("--base-dir", "~/test-home-gets-resolved"), 33 | dict(base_dir=pathlib.Path("~/test-home-gets-resolved").expanduser()), 34 | ), 35 | ( 36 | ("--base-dir", "./../test-relative-gets-resolved"), 37 | dict(base_dir=pathlib.Path("./../test-relative-gets-resolved").resolve()), 38 | ), 39 | (("--private-key", KEY_HEX), dict(private_key=KEY)), 40 | (("--bootnode", repr(ENR_A)), (dict(bootnodes=(ENR_A,))),), 41 | ( 42 | ("--bootnode", repr(ENR_A), "--bootnode", repr(ENR_B)), 43 | (dict(bootnodes=(ENR_A, ENR_B))), 44 | ), 45 | (("--disable-upnp",), (dict(is_upnp_enabled=False)),), 46 | # protocol version 47 | ( 48 | ("--protocol-version", "v5"), 49 | dict(protocol_version=ProtocolVersion.v5, bootnodes=BOOTNODES_V5), 50 | ), 51 | (("--protocol-version", "v5.1"), {}), 52 | ), 53 | ) 54 | def test_cli_args_to_boot_info(args, factory_kwargs): 55 | expected = BootInfoFactory(**factory_kwargs) 56 | actual = BootInfo.from_cli_args(args) 57 | assert actual == expected 58 | -------------------------------------------------------------------------------- /tests/core/test_datagram_services.py: -------------------------------------------------------------------------------- 1 | from socket import inet_aton 2 | 3 | from async_service import background_trio_service 4 | import pytest 5 | import trio 6 | 7 | from ddht.datagram import DatagramReceiver, DatagramSender, OutboundDatagram 8 | from ddht.endpoint import Endpoint 9 | 10 | 11 | @pytest.mark.trio 12 | async def test_datagram_receiver(socket_pair): 13 | sending_socket, receiving_socket = socket_pair 14 | receiver_address = receiving_socket.getsockname() 15 | sender_address = sending_socket.getsockname() 16 | 17 | send_channel, receive_channel = trio.open_memory_channel(1) 18 | async with background_trio_service( 19 | DatagramReceiver(receiving_socket, send_channel) 20 | ): 21 | data = b"some packet" 22 | 23 | await sending_socket.sendto(data, receiver_address) 24 | with trio.fail_after(0.5): 25 | received_datagram = await receive_channel.receive() 26 | 27 | assert received_datagram.datagram == data 28 | assert received_datagram.sender_endpoint.ip_address == inet_aton( 29 | sender_address[0] 30 | ) 31 | assert received_datagram.sender_endpoint.port == sender_address[1] 32 | 33 | 34 | @pytest.mark.trio 35 | async def test_datagram_sender(socket_pair): 36 | sending_socket, receiving_socket = socket_pair 37 | receiver_endpoint = receiving_socket.getsockname() 38 | sender_endpoint = sending_socket.getsockname() 39 | 40 | send_channel, receive_channel = trio.open_memory_channel(1) 41 | async with background_trio_service(DatagramSender(receive_channel, sending_socket)): 42 | outbound_datagram = OutboundDatagram( 43 | b"some packet", 44 | Endpoint(inet_aton(receiver_endpoint[0]), receiver_endpoint[1]), 45 | ) 46 | await send_channel.send(outbound_datagram) 47 | 48 | with trio.fail_after(0.5): 49 | data, sender = await receiving_socket.recvfrom(1024) 50 | assert data == outbound_datagram.datagram 51 | assert sender == sender_endpoint 52 | -------------------------------------------------------------------------------- /tests/core/test_enr_partitioning.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | 3 | from eth_enr.constants import MAX_ENR_SIZE 4 | from eth_enr.sedes import ENRSedes 5 | from eth_enr.tools.factories import ENRFactory 6 | from eth_utils.toolz import sliding_window 7 | from hypothesis import given, settings 8 | from hypothesis import strategies as st 9 | import rlp 10 | 11 | from ddht.constants import DISCOVERY_MAX_PACKET_SIZE 12 | from ddht.enr import partition_enrs 13 | 14 | 15 | @settings(max_examples=50, deadline=1000) 16 | @given( 17 | num_enr_records=st.integers(min_value=0, max_value=100), 18 | max_payload_size=st.integers( 19 | min_value=MAX_ENR_SIZE, max_value=DISCOVERY_MAX_PACKET_SIZE 20 | ), 21 | ) 22 | def test_enr_partitioning_fuzzy(num_enr_records, max_payload_size): 23 | enrs = ENRFactory.create_batch(num_enr_records) 24 | batches = partition_enrs(enrs, max_payload_size) 25 | 26 | assert sum(len(batch) for batch in batches) == len(enrs) 27 | assert set(itertools.chain(*batches)) == set(enrs) 28 | 29 | if num_enr_records == 0: 30 | assert batches == ((),) 31 | 32 | for batch in batches: 33 | encoded_batch = rlp.encode(batch, sedes=rlp.sedes.CountableList(ENRSedes)) 34 | assert len(encoded_batch) <= max_payload_size 35 | 36 | for batch, next_batch in sliding_window(2, batches): 37 | overfull_batch = tuple(batch) + (next_batch[0],) 38 | encoded_batch = rlp.encode( 39 | overfull_batch, sedes=rlp.sedes.CountableList(ENRSedes) 40 | ) 41 | assert len(encoded_batch) > max_payload_size 42 | -------------------------------------------------------------------------------- /tests/core/test_event.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import trio 3 | 4 | from ddht.event import Event 5 | 6 | 7 | @pytest.mark.trio 8 | async def test_event_trigger_with_no_subscriptions(): 9 | event = Event("test") 10 | 11 | await event.trigger(None) 12 | 13 | 14 | @pytest.mark.trio 15 | async def test_event_trigger_nowait_with_no_subscriptions(): 16 | event = Event("test") 17 | 18 | await event.trigger(None) 19 | 20 | 21 | @pytest.mark.trio 22 | async def test_event_trigger_with_single_subscription(): 23 | event = Event("test") 24 | 25 | async with event.subscribe() as subscription: 26 | with pytest.raises(trio.WouldBlock): 27 | subscription.receive_nowait() 28 | 29 | await event.trigger(1234) 30 | 31 | result = await subscription.receive() 32 | assert result == 1234 33 | 34 | with pytest.raises(trio.ClosedResourceError): 35 | subscription.receive_nowait() 36 | with pytest.raises(trio.ClosedResourceError): 37 | await subscription.receive() 38 | 39 | 40 | @pytest.mark.trio 41 | async def test_event_trigger_nowait_with_single_subscription(): 42 | event = Event("test") 43 | 44 | async with event.subscribe() as subscription: 45 | with pytest.raises(trio.WouldBlock): 46 | subscription.receive_nowait() 47 | 48 | event.trigger_nowait(1234) 49 | 50 | result = await subscription.receive() 51 | assert result == 1234 52 | 53 | with pytest.raises(trio.ClosedResourceError): 54 | subscription.receive_nowait() 55 | with pytest.raises(trio.ClosedResourceError): 56 | await subscription.receive() 57 | 58 | 59 | @pytest.mark.trio 60 | async def test_event_trigger_with_multiple_subscriptions(): 61 | event = Event("test") 62 | 63 | async with event.subscribe() as subscription_a: 64 | await event.trigger(1234) 65 | 66 | async with event.subscribe() as subscription_b: 67 | await event.trigger(4321) 68 | 69 | result_a_1 = subscription_a.receive_nowait() 70 | result_a_2 = subscription_a.receive_nowait() 71 | result_b_1 = subscription_b.receive_nowait() 72 | 73 | assert result_a_1 == 1234 74 | assert result_a_2 == 4321 75 | assert result_b_1 == 4321 76 | 77 | with pytest.raises(trio.WouldBlock): 78 | subscription_a.receive_nowait() 79 | with pytest.raises(trio.WouldBlock): 80 | subscription_b.receive_nowait() 81 | 82 | 83 | @pytest.mark.trio 84 | async def test_event_trigger_nowait_with_multiple_subscriptions(): 85 | event = Event("test") 86 | 87 | async with event.subscribe() as subscription_a: 88 | event.trigger_nowait(1234) 89 | 90 | async with event.subscribe() as subscription_b: 91 | event.trigger_nowait(4321) 92 | 93 | result_a_1 = subscription_a.receive_nowait() 94 | result_a_2 = subscription_a.receive_nowait() 95 | result_b_1 = subscription_b.receive_nowait() 96 | 97 | assert result_a_1 == 1234 98 | assert result_a_2 == 4321 99 | assert result_b_1 == 4321 100 | 101 | with pytest.raises(trio.WouldBlock): 102 | subscription_a.receive_nowait() 103 | with pytest.raises(trio.WouldBlock): 104 | subscription_b.receive_nowait() 105 | 106 | 107 | @pytest.mark.trio 108 | async def test_event_wait_without_explicit_subscription(): 109 | event = Event("test") 110 | 111 | async with trio.open_nursery() as nursery: 112 | got_it = trio.Event() 113 | 114 | async def wait_for_it(): 115 | result = await event.wait() 116 | assert result == 1234 117 | got_it.set() 118 | 119 | nursery.start_soon(wait_for_it) 120 | # give the wait_for_it task a moment to initiate the subscription 121 | await trio.lowlevel.checkpoint() 122 | await trio.lowlevel.checkpoint() 123 | await trio.lowlevel.checkpoint() 124 | # trigger a few times just in case the subscription isn't setup yet.... 125 | await event.trigger(1234) 126 | await event.trigger(1234) 127 | await event.trigger(1234) 128 | await event.trigger(1234) 129 | 130 | with trio.fail_after(1): 131 | await got_it.wait() 132 | 133 | 134 | @pytest.mark.trio 135 | async def test_event_subscribe_and_wait(): 136 | event = Event("test") 137 | 138 | with trio.fail_after(1): 139 | async with trio.open_nursery() as nursery: 140 | async with event.subscribe_and_wait(): 141 | nursery.start_soon(event.trigger, 1234) 142 | -------------------------------------------------------------------------------- /tests/core/test_every_and_gather_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import trio 3 | 4 | from ddht._utils import every, gather 5 | 6 | 7 | @pytest.mark.trio 8 | async def test_empty_gather(): 9 | result = await gather() 10 | assert result == () 11 | 12 | with trio.testing.assert_checkpoints(): 13 | await gather() 14 | 15 | 16 | @pytest.mark.trio 17 | async def test_gather_sorted(autojump_clock): 18 | async def f(return_value, sleep_time): 19 | await trio.sleep(sleep_time) 20 | return return_value 21 | 22 | results = await gather((f, 0, 0.1), (f, 1, 0.2), (f, 2, 0.05),) 23 | assert results == (0, 1, 2) 24 | 25 | 26 | @pytest.mark.trio 27 | async def test_gather_args(): 28 | async def return_args(*args): 29 | await trio.lowlevel.checkpoint() 30 | return args 31 | 32 | results = await gather(return_args, (return_args,), (return_args, 1, 2, 3)) 33 | assert results == ((), (), (1, 2, 3)) 34 | 35 | 36 | @pytest.mark.trio 37 | async def test_every(autojump_clock): 38 | start_time = trio.current_time() 39 | 40 | every_generator = every(2, initial_delay=1) 41 | 42 | first_time = await every_generator.__anext__() 43 | assert first_time == pytest.approx(trio.current_time()) 44 | assert first_time <= trio.current_time() 45 | assert first_time == pytest.approx(start_time + 1) 46 | 47 | second_time = await every_generator.__anext__() 48 | assert second_time == pytest.approx(trio.current_time()) 49 | assert second_time == pytest.approx(first_time + 2) 50 | 51 | third_time = await every_generator.__anext__() 52 | assert third_time == pytest.approx(trio.current_time()) 53 | assert third_time == pytest.approx(first_time + 4) 54 | 55 | 56 | @pytest.mark.trio 57 | async def test_every_send(autojump_clock): 58 | start_time = trio.current_time() 59 | 60 | every_generator = every(2, initial_delay=1) 61 | 62 | first_time = await every_generator.__anext__() 63 | assert first_time == pytest.approx(start_time + 1) 64 | 65 | second_time = await every_generator.asend(3) 66 | assert second_time == pytest.approx(first_time + 2 + 3) 67 | 68 | third_time = await every_generator.asend(1) 69 | assert third_time == pytest.approx(second_time + 2 + 1) 70 | 71 | 72 | @pytest.mark.trio 73 | async def test_every_late(autojump_clock): 74 | start_time = trio.current_time() 75 | 76 | every_generator = every(2, initial_delay=1) 77 | 78 | first_time = await every_generator.__anext__() 79 | await trio.sleep(3) 80 | 81 | second_time = await every_generator.__anext__() 82 | assert second_time == pytest.approx(first_time + 2) 83 | assert trio.current_time() == pytest.approx(start_time + 1 + 3) 84 | 85 | third_time = await every_generator.__anext__() 86 | assert third_time == pytest.approx(second_time + 2) 87 | assert trio.current_time() == pytest.approx(third_time) 88 | -------------------------------------------------------------------------------- /tests/core/test_humanize_bytes.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from ddht._utils import humanize_bytes 4 | 5 | KB = 1024 6 | MB = 1024 * 1024 7 | GB = 1024 * 1024 * 1024 8 | TB = 1024 * 1024 * 1024 * 1024 9 | PB = 1024 * 1024 * 1024 * 1024 * 1024 10 | 11 | 12 | @pytest.mark.parametrize( 13 | "num_bytes,expected", 14 | ( 15 | (0, "0B"), 16 | (1, "1B"), 17 | (10, "10B"), 18 | (125, "125B"), 19 | (1023, "1023B"), 20 | (KB, "1KB"), 21 | (MB, "1MB"), 22 | (GB, "1GB"), 23 | (TB, "1TB"), 24 | (PB, "1PB"), 25 | (int(KB * 1.25), "1.25KB"), 26 | (int(MB * 1.25), "1.25MB"), 27 | (int(GB * 1.25), "1.25GB"), 28 | (int(TB * 1.25), "1.25TB"), 29 | (int(PB * 1.25), "1.25PB"), 30 | (int(KB * 1.2), "1.2KB"), 31 | (int(MB * 1.2), "1.2MB"), 32 | (int(GB * 1.2), "1.2GB"), 33 | (int(TB * 1.2), "1.2TB"), 34 | (int(PB * 1.2), "1.2PB"), 35 | (int(KB * 125), "125KB"), 36 | (int(MB * 125), "125MB"), 37 | (int(GB * 125), "125GB"), 38 | (int(TB * 125), "125TB"), 39 | (int(PB * 125), "125PB"), 40 | ), 41 | ) 42 | def test_humanize_bytes(num_bytes, expected): 43 | actual = humanize_bytes(num_bytes) 44 | assert actual == expected 45 | -------------------------------------------------------------------------------- /tests/core/test_import.py: -------------------------------------------------------------------------------- 1 | def test_import(): 2 | import ddht # noqa: F401 3 | -------------------------------------------------------------------------------- /tests/core/test_kademlia.py: -------------------------------------------------------------------------------- 1 | from ddht.kademlia import Address, check_relayed_addr 2 | 3 | 4 | def test_check_relayed_addr(): 5 | public_host = Address("8.8.8.8", 80, 80) 6 | local_host = Address("127.0.0.1", 80, 80) 7 | assert check_relayed_addr(local_host, local_host) 8 | assert not check_relayed_addr(public_host, local_host) 9 | 10 | private = Address("192.168.1.1", 80, 80) 11 | assert check_relayed_addr(private, private) 12 | assert not check_relayed_addr(public_host, private) 13 | 14 | reserved = Address("240.0.0.1", 80, 80) 15 | assert not check_relayed_addr(local_host, reserved) 16 | assert not check_relayed_addr(public_host, reserved) 17 | 18 | unspecified = Address("0.0.0.0", 80, 80) 19 | assert not check_relayed_addr(local_host, unspecified) 20 | assert not check_relayed_addr(public_host, unspecified) 21 | -------------------------------------------------------------------------------- /tests/core/test_node_at_distance.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from ddht.kademlia import at_log_distance, compute_log_distance 4 | from ddht.tools.factories.node_id import NodeIDFactory 5 | 6 | 7 | def test_at_log_distance(): 8 | for i in range(10000): 9 | node = NodeIDFactory() 10 | distance = random.randint(1, 256) 11 | other = at_log_distance(node, distance) 12 | actual = compute_log_distance(node, other) 13 | assert actual == distance 14 | -------------------------------------------------------------------------------- /tests/core/test_request_tracker.py: -------------------------------------------------------------------------------- 1 | from ddht.request_tracker import RequestTracker 2 | from ddht.tools.factories.node_id import NodeIDFactory 3 | 4 | 5 | def test_request_tracker_reserve_request_id_generated(): 6 | tracker = RequestTracker() 7 | 8 | node_id = NodeIDFactory() 9 | 10 | with tracker.reserve_request_id(node_id) as request_id: 11 | assert tracker.is_request_id_active(node_id, request_id) 12 | assert not tracker.is_request_id_active(node_id, request_id) 13 | 14 | 15 | def test_request_tracker_reserve_request_id_provided(): 16 | tracker = RequestTracker() 17 | 18 | node_id = NodeIDFactory() 19 | 20 | request_id = b"\x01\x02\x03\04" 21 | 22 | assert not tracker.is_request_id_active(node_id, request_id) 23 | 24 | with tracker.reserve_request_id(node_id, request_id) as actual_request_id: 25 | assert actual_request_id == request_id 26 | assert tracker.is_request_id_active(node_id, request_id) 27 | assert not tracker.is_request_id_active(node_id, request_id) 28 | -------------------------------------------------------------------------------- /tests/core/test_resource_queue.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import pytest 4 | import trio 5 | 6 | from ddht.resource_queue import ResourceQueue 7 | 8 | 9 | async def _yield(num: int = 10, base: int = 0): 10 | for _ in range(random.randint(0, num) + base): 11 | await trio.lowlevel.checkpoint() 12 | 13 | 14 | @pytest.mark.trio 15 | async def test_resource_queue_fuzzy(): 16 | known_resources = {"a", "b", "c", "d"} 17 | queue = ResourceQueue(known_resources) 18 | 19 | resources_in_use = set() 20 | seen_resources = set() 21 | 22 | async def worker(seen): 23 | """ 24 | Worker process intended to try and hit as many edge cases as possible 25 | about what could happen within the context block of 26 | `ResourceQueue.reserve` by yielding to trio at as many stages as 27 | possible. 28 | """ 29 | while True: 30 | async with queue.reserve() as resource: 31 | seen.add(resource) 32 | assert resource in queue 33 | 34 | await _yield() 35 | 36 | assert resource not in resources_in_use 37 | resources_in_use.add(resource) 38 | 39 | await _yield() 40 | 41 | resources_in_use.remove(resource) 42 | 43 | await _yield() 44 | 45 | assert resource not in resources_in_use 46 | 47 | async with trio.open_nursery() as nursery: 48 | for _ in range(10): 49 | nursery.start_soon(worker, seen_resources) 50 | 51 | await _yield(1, 500) 52 | 53 | assert seen_resources == queue.resources 54 | 55 | assert "e" not in queue 56 | assert "f" not in queue 57 | 58 | # Now add two more resources. They should get picked up by the new 59 | # workers. 60 | await queue.add("e") 61 | await queue.add("f") 62 | 63 | assert "e" in queue 64 | assert "f" in queue 65 | 66 | await _yield(1, 500) 67 | 68 | seen_resources_after_add = set() 69 | 70 | for _ in range(10): 71 | nursery.start_soon(worker, seen_resources_after_add) 72 | 73 | await _yield(1, 500) 74 | 75 | assert seen_resources_after_add == queue.resources 76 | 77 | nursery.cancel_scope.cancel() 78 | 79 | 80 | @pytest.mark.trio 81 | async def test_resource_queue_add_idempotent(): 82 | queue = ResourceQueue(("a", "b", "c")) 83 | 84 | assert len(queue) == 3 85 | 86 | await queue.add("a") 87 | 88 | assert len(queue) == 3 89 | 90 | await queue.add("d") 91 | 92 | assert len(queue) == 4 93 | 94 | 95 | @pytest.mark.trio 96 | async def test_resource_queue_remove_idempotent(): 97 | queue = ResourceQueue(("a", "b", "c")) 98 | 99 | assert len(queue) == 3 100 | 101 | await queue.remove("a") 102 | 103 | assert len(queue) == 2 104 | 105 | await queue.remove("a") 106 | -------------------------------------------------------------------------------- /tests/core/test_validation_utils.py: -------------------------------------------------------------------------------- 1 | from eth_enr.tools.factories import ENRFactory 2 | from eth_utils import ValidationError 3 | import pytest 4 | 5 | from ddht.kademlia import compute_log_distance 6 | from ddht.validation import validate_found_nodes_distances 7 | 8 | 9 | @pytest.fixture 10 | def local_enr(): 11 | return ENRFactory() 12 | 13 | 14 | @pytest.fixture 15 | def enr_group(local_enr): 16 | enrs = tuple(ENRFactory() for _ in range(10)) 17 | distances = tuple( 18 | compute_log_distance(enr.node_id, local_enr.node_id) for enr in enrs 19 | ) 20 | return enrs, distances 21 | 22 | 23 | def test_validate_found_nodes_distances(local_enr, enr_group): 24 | enrs, distances = enr_group 25 | assert validate_found_nodes_distances(enrs, local_enr.node_id, distances) is None 26 | 27 | 28 | def test_validate_found_nodes_distances_catches_invalid_cases(local_enr, enr_group): 29 | enrs, distances = enr_group 30 | unique_distances = set(distances) 31 | unique_distances.remove(distances[0]) 32 | with pytest.raises(ValidationError, match="Invalid response: distance"): 33 | validate_found_nodes_distances(enrs, local_enr.node_id, unique_distances) 34 | 35 | 36 | def test_validate_found_nodes_distances_catches_self_reference(local_enr, enr_group): 37 | enrs, distances = enr_group 38 | invalid_enrs = enrs + (local_enr,) 39 | unique_distances = set(distances) 40 | assert 0 not in unique_distances 41 | with pytest.raises(ValidationError, match="Invalid response: distance=0"): 42 | validate_found_nodes_distances( 43 | invalid_enrs, local_enr.node_id, unique_distances 44 | ) 45 | -------------------------------------------------------------------------------- /tests/core/test_weighted_choice_util.py: -------------------------------------------------------------------------------- 1 | import collections 2 | 3 | from ddht._utils import weighted_choice 4 | 5 | 6 | def test_weighted_choice(): 7 | for _ in range(100): 8 | options = ("a", "b", "c") 9 | results = tuple(weighted_choice(options) for _ in range(10000)) 10 | counts = collections.Counter(results) 11 | count_a = counts["a"] 12 | count_b = counts["b"] 13 | count_c = counts["c"] 14 | 15 | assert count_a < count_b < count_c 16 | 17 | # `c` should be piced 3x as often as `a` 18 | # `b` should be piced 2x as often as `a` 19 | assert abs(3 - count_c / count_a) < 1.50 20 | assert abs(2 - count_b / count_a) < 1.50 21 | -------------------------------------------------------------------------------- /tests/core/v5/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ethereum/ddht/142911d134ff839f3f79ff8fe9e45d3fe5a58cd0/tests/core/v5/__init__.py -------------------------------------------------------------------------------- /tests/core/v5/test_channel_services.py: -------------------------------------------------------------------------------- 1 | from async_service import background_trio_service 2 | import pytest 3 | import trio 4 | 5 | from ddht.tools.factories.discovery import AuthTagPacketFactory 6 | from ddht.tools.factories.endpoint import EndpointFactory 7 | from ddht.v5.channel_services import ( 8 | InboundDatagram, 9 | OutboundPacket, 10 | PacketDecoder, 11 | PacketEncoder, 12 | ) 13 | 14 | 15 | @pytest.mark.trio 16 | async def test_packet_decoder(): 17 | datagram_send_channel, datagram_receive_channel = trio.open_memory_channel(1) 18 | packet_send_channel, packet_receive_channel = trio.open_memory_channel(1) 19 | 20 | service = PacketDecoder(datagram_receive_channel, packet_send_channel) 21 | async with background_trio_service(service): 22 | packet = AuthTagPacketFactory() 23 | sender_endpoint = EndpointFactory() 24 | await datagram_send_channel.send( 25 | InboundDatagram( 26 | datagram=packet.to_wire_bytes(), sender_endpoint=sender_endpoint, 27 | ) 28 | ) 29 | 30 | with trio.fail_after(0.5): 31 | inbound_packet = await packet_receive_channel.receive() 32 | 33 | assert inbound_packet.packet == packet 34 | assert inbound_packet.sender_endpoint.ip_address == sender_endpoint.ip_address 35 | assert inbound_packet.sender_endpoint.port == sender_endpoint.port 36 | 37 | 38 | @pytest.mark.trio 39 | async def test_packet_decoder_error(): 40 | datagram_send_channel, datagram_receive_channel = trio.open_memory_channel(1) 41 | packet_send_channel, packet_receive_channel = trio.open_memory_channel(1) 42 | 43 | service = PacketDecoder(datagram_receive_channel, packet_send_channel) 44 | async with background_trio_service(service): 45 | # send invalid packet 46 | await datagram_send_channel.send( 47 | InboundDatagram( 48 | datagram=b"not a valid packet", sender_endpoint=EndpointFactory(), 49 | ) 50 | ) 51 | 52 | # send valid packet 53 | packet = AuthTagPacketFactory() 54 | sender_endpoint = EndpointFactory() 55 | await datagram_send_channel.send( 56 | InboundDatagram( 57 | datagram=packet.to_wire_bytes(), sender_endpoint=sender_endpoint, 58 | ) 59 | ) 60 | 61 | # ignore the invalid one, only receive the valid one 62 | with trio.fail_after(0.5): 63 | inbound_packet = await packet_receive_channel.receive() 64 | 65 | assert inbound_packet.packet == packet 66 | assert inbound_packet.sender_endpoint.ip_address == sender_endpoint.ip_address 67 | assert inbound_packet.sender_endpoint.port == sender_endpoint.port 68 | 69 | 70 | @pytest.mark.trio 71 | async def test_packet_encoder(): 72 | packet_send_channel, packet_receive_channel = trio.open_memory_channel(1) 73 | datagram_send_channel, datagram_receive_channel = trio.open_memory_channel(1) 74 | 75 | service = PacketEncoder(packet_receive_channel, datagram_send_channel) 76 | async with background_trio_service(service): 77 | receiver_endpoint = EndpointFactory() 78 | outbound_packet = OutboundPacket( 79 | packet=AuthTagPacketFactory(), receiver_endpoint=receiver_endpoint, 80 | ) 81 | await packet_send_channel.send(outbound_packet) 82 | 83 | with trio.fail_after(0.5): 84 | outbound_datagram = await datagram_receive_channel.receive() 85 | 86 | assert outbound_datagram.datagram == outbound_packet.packet.to_wire_bytes() 87 | assert ( 88 | outbound_datagram.receiver_endpoint.ip_address 89 | == receiver_endpoint.ip_address 90 | ) 91 | assert outbound_datagram.receiver_endpoint.port == receiver_endpoint.port 92 | -------------------------------------------------------------------------------- /tests/core/v5/test_endpoint_tracker.py: -------------------------------------------------------------------------------- 1 | from async_service import background_trio_service 2 | from eth_enr import ENRDB, default_identity_scheme_registry 3 | from eth_enr.tools.factories import ENRFactory 4 | import pytest 5 | import pytest_trio 6 | import trio 7 | from trio.testing import wait_all_tasks_blocked 8 | 9 | from ddht.constants import IP_V4_ADDRESS_ENR_KEY, UDP_PORT_ENR_KEY 10 | from ddht.tools.factories.discovery import EndpointVoteFactory 11 | from ddht.tools.factories.endpoint import EndpointFactory 12 | from ddht.tools.factories.keys import PrivateKeyFactory 13 | from ddht.v5.endpoint_tracker import EndpointTracker 14 | 15 | 16 | @pytest.fixture 17 | def private_key(): 18 | return PrivateKeyFactory().to_bytes() 19 | 20 | 21 | @pytest.fixture 22 | def initial_enr(private_key): 23 | return ENRFactory(private_key=private_key,) 24 | 25 | 26 | @pytest_trio.trio_fixture 27 | async def enr_db(initial_enr): 28 | enr_db = ENRDB({}) 29 | enr_db.set_enr(initial_enr) 30 | return enr_db 31 | 32 | 33 | @pytest.fixture 34 | def vote_channels(): 35 | return trio.open_memory_channel(0) 36 | 37 | 38 | @pytest.fixture 39 | async def endpoint_tracker(private_key, initial_enr, enr_db, vote_channels): 40 | endpoint_tracker = EndpointTracker( 41 | local_private_key=private_key, 42 | local_node_id=initial_enr.node_id, 43 | enr_db=enr_db, 44 | identity_scheme_registry=default_identity_scheme_registry, 45 | vote_receive_channel=vote_channels[1], 46 | ) 47 | async with background_trio_service(endpoint_tracker): 48 | yield endpoint_tracker 49 | 50 | 51 | @pytest.mark.trio 52 | async def test_endpoint_tracker_updates_enr( 53 | endpoint_tracker, initial_enr, enr_db, vote_channels 54 | ): 55 | endpoint = EndpointFactory() 56 | endpoint_vote = EndpointVoteFactory(endpoint=endpoint) 57 | await vote_channels[0].send(endpoint_vote) 58 | await wait_all_tasks_blocked() # wait until vote has been processed 59 | 60 | updated_enr = enr_db.get_enr(initial_enr.node_id) 61 | assert updated_enr.sequence_number == initial_enr.sequence_number + 1 62 | assert updated_enr[IP_V4_ADDRESS_ENR_KEY] == endpoint.ip_address 63 | assert updated_enr[UDP_PORT_ENR_KEY] == endpoint.port 64 | -------------------------------------------------------------------------------- /tests/core/v5/test_flat_routing_table.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from ddht.tools.factories.node_id import NodeIDFactory 4 | from ddht.v5.routing_table import FlatRoutingTable 5 | 6 | 7 | @pytest.fixture 8 | def routing_table(): 9 | return FlatRoutingTable() 10 | 11 | 12 | def test_add(routing_table): 13 | node_id = NodeIDFactory() 14 | assert node_id not in routing_table 15 | routing_table.add(node_id) 16 | assert node_id in routing_table 17 | with pytest.raises(ValueError): 18 | routing_table.add(node_id) 19 | 20 | 21 | def test_update(routing_table): 22 | first_node_id = NodeIDFactory() 23 | second_node_id = NodeIDFactory() 24 | 25 | with pytest.raises(KeyError): 26 | routing_table.update(first_node_id) 27 | routing_table.add(first_node_id) 28 | routing_table.add(second_node_id) 29 | 30 | assert routing_table.get_oldest_entry() == first_node_id 31 | routing_table.update(first_node_id) 32 | assert routing_table.get_oldest_entry() == second_node_id 33 | 34 | 35 | def test_add_or_update(routing_table): 36 | first_node_id = NodeIDFactory() 37 | second_node_id = NodeIDFactory() 38 | 39 | routing_table.add_or_update(first_node_id) 40 | assert first_node_id in routing_table 41 | 42 | routing_table.add(second_node_id) 43 | assert routing_table.get_oldest_entry() == first_node_id 44 | routing_table.add_or_update(first_node_id) 45 | assert routing_table.get_oldest_entry() == second_node_id 46 | 47 | 48 | def test_remove(routing_table): 49 | node_id = NodeIDFactory() 50 | 51 | with pytest.raises(KeyError): 52 | routing_table.remove(node_id) 53 | routing_table.add(node_id) 54 | routing_table.remove(node_id) 55 | assert node_id not in routing_table 56 | -------------------------------------------------------------------------------- /tests/core/v5/test_tags.py: -------------------------------------------------------------------------------- 1 | from eth_utils import decode_hex 2 | from hypothesis import given 3 | from hypothesis import strategies as st 4 | import pytest 5 | 6 | from ddht.v5.tags import compute_tag, recover_source_id_from_tag 7 | 8 | 9 | @given(st.binary(min_size=32, max_size=32), st.binary(min_size=32, max_size=32)) 10 | def test_source_recovery(source, destination): 11 | tag = compute_tag(source, destination) 12 | recovered_src = recover_source_id_from_tag(tag, destination) 13 | assert recovered_src == source 14 | 15 | 16 | @pytest.mark.parametrize( 17 | ("source", "destination", "tag"), 18 | ( 19 | ( 20 | decode_hex( 21 | "0x0000000000000000000000000000000000000000000000000000000000000000" 22 | ), 23 | decode_hex( 24 | "0x0000000000000000000000000000000000000000000000000000000000000000" 25 | ), 26 | decode_hex( 27 | "66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925" 28 | ), 29 | ), 30 | ( 31 | decode_hex( 32 | "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" 33 | ), 34 | decode_hex( 35 | "0x0000000000000000000000000000000000000000000000000000000000000000" 36 | ), 37 | decode_hex( 38 | "0x99978552079d428893703e74716071dff768eb7a911dcc4c6fd5a6e2f2a0d6da" 39 | ), 40 | ), 41 | ( 42 | decode_hex( 43 | "0xf72d359a057d2c4dbb4502edd4b9ca5f71fe7f93357e733c5f18cadd754e30de" 44 | ), 45 | decode_hex( 46 | "0x4a0f699062a9871bd8ef06f94f51d338bba02aaaedefde860093ad5f1e64dc25" 47 | ), 48 | decode_hex( 49 | "0xa979df942382a64ea3ace81dd5ceb9d95c05ef3ef8e2515b4ab5e45638029b0a" 50 | ), 51 | ), 52 | ), 53 | ) 54 | def test_tags(source, destination, tag): 55 | assert compute_tag(source, destination) == tag 56 | assert recover_source_id_from_tag(tag, destination) == source 57 | -------------------------------------------------------------------------------- /tests/core/v5/test_v5_bootnodes.py: -------------------------------------------------------------------------------- 1 | from eth_enr import ENR 2 | import pytest 3 | 4 | from ddht.v5.constants import DEFAULT_BOOTNODES 5 | 6 | 7 | @pytest.mark.parametrize( 8 | "enr_repr", DEFAULT_BOOTNODES, 9 | ) 10 | def test_default_bootnodes_valid(enr_repr): 11 | enr = ENR.from_repr(enr_repr) 12 | assert b"ip" in enr or b"ip6" in enr 13 | assert b"udp" in enr 14 | -------------------------------------------------------------------------------- /tests/core/v5_1/alexandria/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | @pytest.fixture 5 | async def alice_alexandria_client(alice, alice_network): 6 | async with alice.alexandria.client(alice_network) as alice_alexandria_client: 7 | yield alice_alexandria_client 8 | 9 | 10 | @pytest.fixture 11 | async def bob_alexandria_client(bob, bob_network): 12 | async with bob.alexandria.client(bob_network) as bob_alexandria_client: 13 | yield bob_alexandria_client 14 | 15 | 16 | @pytest.fixture 17 | async def alice_alexandria_network(alice, alice_network): 18 | async with alice.alexandria.network(alice_network) as alice_alexandria: 19 | yield alice_alexandria 20 | 21 | 22 | @pytest.fixture 23 | async def bob_alexandria_network(bob, bob_network): 24 | async with bob.alexandria.network(bob_network) as bob_alexandria: 25 | yield bob_alexandria 26 | -------------------------------------------------------------------------------- /tests/core/v5_1/alexandria/test_message_encoding.py: -------------------------------------------------------------------------------- 1 | from eth_enr.tools.factories import ENRFactory 2 | from hypothesis import given 3 | from hypothesis import strategies as st 4 | import rlp 5 | 6 | from ddht.v5_1.alexandria.messages import ( 7 | FindContentMessage, 8 | FindNodesMessage, 9 | FoundContentMessage, 10 | FoundNodesMessage, 11 | PingMessage, 12 | PongMessage, 13 | decode_message, 14 | ) 15 | from ddht.v5_1.alexandria.payloads import ( 16 | FindContentPayload, 17 | FindNodesPayload, 18 | FoundContentPayload, 19 | FoundNodesPayload, 20 | PingPayload, 21 | PongPayload, 22 | ) 23 | 24 | 25 | @given( 26 | enr_seq=st.integers(min_value=0, max_value=2 ** 32 - 1), 27 | advertisement_radius=st.integers(min_value=0, max_value=2 ** 256 - 1), 28 | ) 29 | def test_ping_message_encoding_round_trip(enr_seq, advertisement_radius): 30 | payload = PingPayload(enr_seq=enr_seq, advertisement_radius=advertisement_radius) 31 | message = PingMessage(payload) 32 | encoded = message.to_wire_bytes() 33 | result = decode_message(encoded) 34 | assert result == message 35 | 36 | 37 | @given( 38 | enr_seq=st.integers(min_value=0, max_value=2 ** 32 - 1), 39 | advertisement_radius=st.integers(min_value=0, max_value=2 ** 256 - 1), 40 | ) 41 | def test_pong_message_encoding_round_trip(enr_seq, advertisement_radius): 42 | payload = PongPayload(enr_seq=enr_seq, advertisement_radius=advertisement_radius) 43 | message = PongMessage(payload) 44 | encoded = message.to_wire_bytes() 45 | result = decode_message(encoded) 46 | assert result == message 47 | 48 | 49 | @given( 50 | distances=st.lists( 51 | st.integers(min_value=0, max_value=256), min_size=1, max_size=32, unique=True, 52 | ).map(tuple) 53 | ) 54 | def test_find_nodes_message_encoding_round_trip(distances): 55 | payload = FindNodesPayload(distances) 56 | message = FindNodesMessage(payload) 57 | encoded = message.to_wire_bytes() 58 | result = decode_message(encoded) 59 | assert result == message 60 | 61 | 62 | @given(num_enr_records=st.integers(min_value=0, max_value=5)) 63 | def test_found_nodes_message_encoding_round_trip(num_enr_records): 64 | enrs = tuple(ENRFactory() for _ in range(num_enr_records)) 65 | encoded_enrs = tuple(rlp.encode(enr) for enr in enrs) 66 | payload = FoundNodesPayload(num_enr_records, encoded_enrs) 67 | message = FoundNodesMessage(payload) 68 | encoded = message.to_wire_bytes() 69 | result = decode_message(encoded) 70 | assert result.payload == message.payload 71 | 72 | 73 | @given(content_key=st.binary(min_size=1, max_size=16),) 74 | def test_find_content_message_encoding_round_trip(content_key): 75 | payload = FindContentPayload(content_key) 76 | message = FindContentMessage(payload) 77 | encoded = message.to_wire_bytes() 78 | result = decode_message(encoded) 79 | assert result == message 80 | 81 | 82 | @given(data=st.data()) 83 | def test_found_content_message_encoding_round_trip(data): 84 | is_content = data.draw(st.booleans()) 85 | if is_content: 86 | content = data.draw(st.binary(min_size=32, max_size=32)) 87 | enrs = () 88 | else: 89 | num_enrs = data.draw(st.integers(min_value=0, max_value=3)) 90 | enrs = tuple(ENRFactory() for _ in range(num_enrs)) 91 | content = b"" 92 | 93 | encoded_enrs = tuple(rlp.encode(enr) for enr in enrs) 94 | payload = FoundContentPayload(encoded_enrs, content) 95 | message = FoundContentMessage(payload) 96 | encoded = message.to_wire_bytes() 97 | result = decode_message(encoded) 98 | assert result.payload == message.payload 99 | -------------------------------------------------------------------------------- /tests/core/v5_1/alexandria/test_subscriptions.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import trio 3 | 4 | from ddht.v5_1.alexandria.messages import PingMessage, PongMessage 5 | from ddht.v5_1.alexandria.payloads import PingPayload, PongPayload 6 | 7 | 8 | @pytest.mark.trio 9 | async def test_alexandria_client_subscription_via_talk_request( 10 | alice, bob, alice_alexandria_client, bob_alexandria_client, 11 | ): 12 | async with bob_alexandria_client.subscribe(PingMessage) as subscription: 13 | await alice_alexandria_client.send_ping( 14 | bob.node_id, 15 | bob.endpoint, 16 | enr_seq=alice.enr.sequence_number, 17 | advertisement_radius=1234, 18 | ) 19 | 20 | with trio.fail_after(1): 21 | message = await subscription.receive() 22 | 23 | assert isinstance(message.message, PingMessage) 24 | assert message.message.payload.enr_seq == alice.enr.sequence_number 25 | assert message.message.payload.advertisement_radius == 1234 26 | 27 | 28 | @pytest.mark.trio 29 | async def test_alexandria_client_subscription_via_talk_response( 30 | alice, bob, alice_alexandria_client, bob_alexandria_client, 31 | ): 32 | async with bob_alexandria_client.subscribe(PongMessage) as subscription: 33 | with bob_alexandria_client.request_tracker.reserve_request_id( 34 | alice.node_id, b"\x01\x02" 35 | ): 36 | with trio.fail_after(1): 37 | await alice_alexandria_client.send_pong( 38 | bob.node_id, 39 | bob.endpoint, 40 | enr_seq=alice.enr.sequence_number, 41 | advertisement_radius=1234, 42 | request_id=b"\x01\x02", 43 | ) 44 | 45 | message = await subscription.receive() 46 | 47 | assert isinstance(message.message, PongMessage) 48 | assert message.message.payload.enr_seq == alice.enr.sequence_number 49 | assert message.message.payload.advertisement_radius == 1234 50 | 51 | 52 | @pytest.mark.trio 53 | async def test_alexandria_client_subscription_via_talk_request_protocol_mismatch( 54 | alice_network, alice, bob, bob_alexandria_client, autojump_clock 55 | ): 56 | async with bob_alexandria_client.subscribe(PingMessage) as subscription: 57 | message = PingMessage(PingPayload(alice.enr.sequence_number, 1234)) 58 | data_payload = message.to_wire_bytes() 59 | await alice_network.client.send_talk_request( 60 | bob.node_id, 61 | bob.endpoint, 62 | protocol=b"wrong-protocol-id", 63 | payload=data_payload, 64 | ) 65 | with pytest.raises(trio.TooSlowError): 66 | with trio.fail_after(1): 67 | message = await subscription.receive() 68 | 69 | 70 | @pytest.mark.trio 71 | async def test_alexandria_client_subscription_via_talk_response_unknown_request_id( 72 | alice_network, alice, bob, bob_alexandria_client, autojump_clock 73 | ): 74 | async with bob_alexandria_client.subscribe(PongMessage) as subscription: 75 | message = PongMessage(PongPayload(alice.enr.sequence_number, 1234)) 76 | data_payload = message.to_wire_bytes() 77 | await alice_network.client.send_talk_response( 78 | bob.node_id, 79 | bob.endpoint, 80 | payload=data_payload, 81 | request_id=b"\x01\x02\x03", # unknown/unexpected request_id 82 | ) 83 | with pytest.raises(trio.TooSlowError): 84 | with trio.fail_after(1): 85 | message = await subscription.receive() 86 | -------------------------------------------------------------------------------- /tests/core/v5_1/conftest.py: -------------------------------------------------------------------------------- 1 | from eth_enr import OldSequenceNumber 2 | import pytest 3 | 4 | from ddht.tools.driver import Tester 5 | 6 | 7 | @pytest.fixture 8 | def tester(): 9 | return Tester() 10 | 11 | 12 | # 13 | # Nodes 14 | # 15 | @pytest.fixture 16 | async def alice(tester, bob): 17 | node = tester.node(name="alice") 18 | try: 19 | node.enr_db.set_enr(bob.enr) 20 | except OldSequenceNumber: 21 | pass 22 | return node 23 | 24 | 25 | @pytest.fixture 26 | async def bob(tester): 27 | return tester.node(name="bob") 28 | 29 | 30 | @pytest.fixture 31 | async def carol(tester): 32 | return tester.node(name="carol") 33 | 34 | 35 | @pytest.fixture 36 | async def driver(tester, alice, bob): 37 | return tester.session_pair(alice, bob) 38 | 39 | 40 | # 41 | # Clients 42 | # 43 | @pytest.fixture 44 | async def alice_client(alice, bob, carol): 45 | async with alice.client() as alice_client: 46 | yield alice_client 47 | 48 | 49 | @pytest.fixture 50 | async def bob_client(alice, bob): 51 | async with bob.client() as bob_client: 52 | yield bob_client 53 | 54 | 55 | # 56 | # Networks 57 | # 58 | @pytest.fixture 59 | async def alice_network(alice, bob): 60 | async with alice.network() as alice_network: 61 | yield alice_network 62 | 63 | 64 | @pytest.fixture 65 | async def bob_network(alice, bob): 66 | async with bob.network() as bob_network: 67 | yield bob_network 68 | -------------------------------------------------------------------------------- /tests/core/v5_1/test_packet_encoding.py: -------------------------------------------------------------------------------- 1 | from eth_enr.tools.factories import ENRFactory 2 | import pytest 3 | 4 | from ddht.v5_1.messages import PingMessage 5 | from ddht.v5_1.packets import ( 6 | HandshakeHeader, 7 | HandshakePacket, 8 | MessagePacket, 9 | Packet, 10 | WhoAreYouPacket, 11 | decode_packet, 12 | ) 13 | 14 | 15 | def test_message_packet_encoding(): 16 | initiator_key = b"\x01" * 16 17 | aes_gcm_nonce = b"\x02" * 12 18 | source_node_id = b"\x03" * 32 19 | dest_node_id = b"\x04" * 32 20 | message = PingMessage(b"\x01", 0) 21 | auth_data = MessagePacket(source_node_id) 22 | 23 | packet = Packet.prepare( 24 | aes_gcm_nonce=aes_gcm_nonce, 25 | initiator_key=initiator_key, 26 | message=message, 27 | auth_data=auth_data, 28 | dest_node_id=dest_node_id, 29 | ) 30 | packet_wire_bytes = packet.to_wire_bytes() 31 | result = decode_packet(packet_wire_bytes, dest_node_id) 32 | 33 | assert result == packet 34 | 35 | 36 | def test_who_are_you_packet_encoding(): 37 | initiator_key = b"\x01" * 16 38 | aes_gcm_nonce = b"\x02" * 12 39 | dest_node_id = b"\x04" * 32 40 | message = PingMessage(b"\x01", 0) 41 | auth_data = WhoAreYouPacket(id_nonce=b"\x06" * 16, enr_sequence_number=0x07) 42 | 43 | packet = Packet.prepare( 44 | aes_gcm_nonce=aes_gcm_nonce, 45 | initiator_key=initiator_key, 46 | message=message, 47 | auth_data=auth_data, 48 | dest_node_id=dest_node_id, 49 | ) 50 | packet_wire_bytes = packet.to_wire_bytes() 51 | result = decode_packet(packet_wire_bytes, dest_node_id) 52 | 53 | assert result == packet 54 | 55 | 56 | @pytest.mark.parametrize( 57 | "enr", (None, ENRFactory(),), 58 | ) 59 | def test_handshake_packet_encoding(enr): 60 | initiator_key = b"\x01" * 16 61 | aes_gcm_nonce = b"\x02" * 12 62 | source_node_id = b"\x03" * 32 63 | dest_node_id = b"\x04" * 32 64 | message = PingMessage(b"\x01", 0) 65 | auth_data = HandshakePacket( 66 | auth_data_head=HandshakeHeader( 67 | source_node_id=source_node_id, signature_size=64, ephemeral_key_size=33, 68 | ), 69 | id_signature=b"\x05" * 64, 70 | ephemeral_public_key=b"\x06" * 33, 71 | record=enr, 72 | ) 73 | 74 | packet = Packet.prepare( 75 | aes_gcm_nonce=aes_gcm_nonce, 76 | initiator_key=initiator_key, 77 | message=message, 78 | auth_data=auth_data, 79 | dest_node_id=dest_node_id, 80 | ) 81 | packet_wire_bytes = packet.to_wire_bytes() 82 | result = decode_packet(packet_wire_bytes, dest_node_id) 83 | 84 | assert result == packet 85 | -------------------------------------------------------------------------------- /tests/core/v5_1/test_v51_bootnodes.py: -------------------------------------------------------------------------------- 1 | from eth_enr import ENR 2 | import pytest 3 | 4 | from ddht.v5_1.constants import DEFAULT_BOOTNODES 5 | 6 | 7 | @pytest.mark.parametrize( 8 | "enr_repr", DEFAULT_BOOTNODES, 9 | ) 10 | def test_default_bootnodes_valid(enr_repr): 11 | enr = ENR.from_repr(enr_repr) 12 | assert b"ip" in enr or b"ip6" in enr 13 | assert b"udp" in enr 14 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist= 3 | py{38}-core 4 | lint 5 | docs 6 | 7 | [isort] 8 | force_sort_within_sections=True 9 | known_third_party=hypothesis,pytest,eth_utils,eth_keys,trio_typing,pytest_trio,trio,factory,coincurve,async_service,eth_hash,rlp,cached_property,xdg 10 | known_first_party=ddht 11 | multi_line_output=3 12 | include_trailing_comma=True 13 | force_grid_wrap=0 14 | use_parentheses=True 15 | line_length=88 16 | 17 | [flake8] 18 | max-line-length= 100 19 | exclude= venv*,.tox,docs,build 20 | ignore=W503,E203 21 | 22 | [testenv] 23 | usedevelop=True 24 | setenv = 25 | MYPYPATH = {toxinidir}/stubs 26 | passenv = 27 | HOME 28 | commands= 29 | core: pytest {posargs:tests/core} 30 | docs: make build-docs 31 | basepython = 32 | docs: python 33 | py38: python3.8 34 | extras= 35 | test 36 | web3 37 | alexandria 38 | benchmark 39 | docs: doc 40 | whitelist_externals=make 41 | 42 | [testenv:lint] 43 | basepython=python 44 | extras= 45 | lint 46 | web3 47 | commands= 48 | mypy -p {toxinidir}/ddht --config-file {toxinidir}/mypy.ini 49 | flake8 {toxinidir}/ddht {toxinidir}/tests 50 | isort --check-only --diff {toxinidir}/ddht {toxinidir}/tests 51 | black --check --diff {toxinidir}/ddht/ --check --diff {toxinidir}/tests/ 52 | pydocstyle {toxinidir}/ddht {toxinidir}/tests 53 | --------------------------------------------------------------------------------