├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── feature_request.md
│ └── question.md
├── actions
│ └── coverage
│ │ └── action.yml
├── pull_request_template.md
└── workflows
│ ├── coverage.yml
│ ├── mypy.yml
│ ├── pr-comment-validate.yml
│ ├── python-publish.yml
│ ├── ruff.yml
│ ├── unittests.yml
│ └── weekly_mutation_tests.yml
├── .gitignore
├── .gitmodules
├── .readthedocs.yaml
├── .ruff.toml
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── __init__.py
├── create_test_coverage_report.py
├── doc
├── .ruff.toml
├── Makefile
├── basics
│ ├── discoverystrategy_tutorial.rst
│ ├── discoverystrategy_tutorial_1.py
│ ├── discoverystrategy_tutorial_2.py
│ ├── identity_tutorial.rst
│ ├── identity_tutorial_1.py
│ ├── identity_tutorial_integration
│ │ ├── __init__.py
│ │ ├── attestation_tutorial_attest.py
│ │ ├── attestation_tutorial_common.py
│ │ ├── attestation_tutorial_verify.py
│ │ └── main.py
│ ├── overlay_tutorial.rst
│ ├── overlay_tutorial_1.py
│ ├── overlay_tutorial_2.py
│ ├── overlay_tutorial_3.py
│ ├── overlay_tutorial_4.py
│ ├── overlay_tutorial_5.py
│ ├── requestcache_tutorial.rst
│ ├── requestcache_tutorial_1.py
│ ├── requestcache_tutorial_2.py
│ ├── requestcache_tutorial_3.py
│ ├── tasks_tutorial.rst
│ ├── tasks_tutorial_1.py
│ ├── tasks_tutorial_2.py
│ ├── tasks_tutorial_3.py
│ ├── tasks_tutorial_4.py
│ ├── tasks_tutorial_5.py
│ ├── tasks_tutorial_6.py
│ ├── tasks_tutorial_7.py
│ ├── testbase_tutorial.rst
│ ├── testbase_tutorial_1.py
│ └── testbase_tutorial_2.py
├── conf.py
├── further-reading
│ ├── advanced_identity.rst
│ ├── advanced_identity_1.py
│ ├── advanced_identity_2.py
│ ├── advanced_peer_discovery.rst
│ ├── anonymization.rst
│ ├── dht.rst
│ └── dht_1.py
├── index.rst
├── make.bat
├── preliminaries
│ └── install_libsodium.rst
├── reference
│ ├── bootstrapping.rst
│ ├── community_best_practices.rst
│ ├── configuration.rst
│ ├── keys.rst
│ ├── peer_discovery.rst
│ ├── resources
│ │ └── ipv8_peer_discovery.png
│ ├── serialization.rst
│ ├── serialization_1.py
│ ├── serialization_2.py
│ ├── serialization_3.py
│ ├── serialization_4.py
│ ├── serialization_5.py
│ ├── serialization_6.py
│ └── serialization_7.py
├── requirements.txt
├── resources
│ └── healthy_IPv8_overlay_collection.png
└── validate_examples.py
├── github_increment_version.py
├── ipv8
├── REST
│ ├── __init__.py
│ ├── asyncio_endpoint.py
│ ├── base_endpoint.py
│ ├── dht_endpoint.py
│ ├── identity_endpoint.py
│ ├── isolation_endpoint.py
│ ├── network_endpoint.py
│ ├── noblock_dht_endpoint.py
│ ├── overlays_endpoint.py
│ ├── rest_manager.py
│ ├── root_endpoint.py
│ ├── schema.py
│ └── tunnel_endpoint.py
├── __init__.py
├── attestation
│ ├── __init__.py
│ ├── communication_manager.py
│ ├── default_identity_formats.py
│ ├── identity
│ │ ├── __init__.py
│ │ ├── attestation.py
│ │ ├── community.py
│ │ ├── database.py
│ │ ├── manager.py
│ │ ├── metadata.py
│ │ └── payload.py
│ ├── identity_formats.py
│ ├── schema
│ │ ├── __init__.py
│ │ └── manager.py
│ ├── signed_object.py
│ ├── tokentree
│ │ ├── __init__.py
│ │ ├── token.py
│ │ └── tree.py
│ └── wallet
│ │ ├── __init__.py
│ │ ├── bonehexact
│ │ ├── __init__.py
│ │ ├── algorithm.py
│ │ ├── attestation.py
│ │ └── structs.py
│ │ ├── caches.py
│ │ ├── community.py
│ │ ├── database.py
│ │ ├── payload.py
│ │ ├── pengbaorange
│ │ ├── __init__.py
│ │ ├── algorithm.py
│ │ ├── attestation.py
│ │ ├── boudot.py
│ │ └── structs.py
│ │ └── primitives
│ │ ├── __init__.py
│ │ ├── attestation.py
│ │ ├── boneh.py
│ │ ├── cryptography_wrapper.py
│ │ ├── ec.py
│ │ ├── structs.py
│ │ └── value.py
├── bootstrapping
│ ├── __init__.py
│ ├── bootstrapper_interface.py
│ ├── dispersy
│ │ ├── __init__.py
│ │ └── bootstrapper.py
│ └── udpbroadcast
│ │ ├── __init__.py
│ │ └── bootstrapper.py
├── community.py
├── configuration.py
├── database.py
├── dht
│ ├── __init__.py
│ ├── churn.py
│ ├── community.py
│ ├── discovery.py
│ ├── payload.py
│ ├── provider.py
│ ├── routing.py
│ ├── storage.py
│ └── trie.py
├── keyvault
│ ├── __init__.py
│ ├── crypto.py
│ ├── keys.py
│ ├── private
│ │ ├── __init__.py
│ │ ├── libnaclkey.py
│ │ └── m2crypto.py
│ └── public
│ │ ├── __init__.py
│ │ ├── libnaclkey.py
│ │ └── m2crypto.py
├── lazy_community.py
├── loader.py
├── messaging
│ ├── __init__.py
│ ├── anonymization
│ │ ├── __init__.py
│ │ ├── caches.py
│ │ ├── community.py
│ │ ├── crypto.py
│ │ ├── endpoint.py
│ │ ├── exit_socket.py
│ │ ├── hidden_services.py
│ │ ├── payload.py
│ │ ├── pex.py
│ │ ├── tunnel.py
│ │ └── utils.py
│ ├── interfaces
│ │ ├── __init__.py
│ │ ├── dispatcher
│ │ │ ├── __init__.py
│ │ │ └── endpoint.py
│ │ ├── endpoint.py
│ │ ├── lan_addresses
│ │ │ ├── __init__.py
│ │ │ ├── addressprovider.py
│ │ │ ├── any_os
│ │ │ │ ├── __init__.py
│ │ │ │ ├── getaddrinfo.py
│ │ │ │ ├── netifaces.py
│ │ │ │ └── testnet1.py
│ │ │ ├── importshield.py
│ │ │ ├── interfaces.py
│ │ │ ├── unix
│ │ │ │ ├── __init__.py
│ │ │ │ ├── getifaddrs.py
│ │ │ │ └── ioctl.py
│ │ │ └── windows
│ │ │ │ ├── GetAdaptersAddresses.py
│ │ │ │ └── __init__.py
│ │ ├── network_stats.py
│ │ ├── statistics_endpoint.py
│ │ └── udp
│ │ │ ├── __init__.py
│ │ │ └── endpoint.py
│ ├── lazy_payload.py
│ ├── payload.py
│ ├── payload_dataclass.py
│ ├── payload_headers.py
│ └── serialization.py
├── overlay.py
├── peer.py
├── peerdiscovery
│ ├── __init__.py
│ ├── churn.py
│ ├── community.py
│ ├── discovery.py
│ ├── network.py
│ └── payload.py
├── requestcache.py
├── taskmanager.py
├── test
│ ├── REST
│ │ ├── __init__.py
│ │ ├── rest_base.py
│ │ ├── test_identity_endpoint.py
│ │ ├── test_isolation_endpoint.py
│ │ ├── test_network_endpoint.py
│ │ └── test_overlays_endpoint.py
│ ├── __init__.py
│ ├── attestation
│ │ ├── __init__.py
│ │ ├── identity
│ │ │ ├── __init__.py
│ │ │ ├── test_identity.py
│ │ │ └── test_manager.py
│ │ ├── tokentree
│ │ │ ├── __init__.py
│ │ │ ├── test_token.py
│ │ │ └── test_tree.py
│ │ └── wallet
│ │ │ ├── __init__.py
│ │ │ ├── attestation.txt
│ │ │ ├── attestation_big.txt
│ │ │ ├── attestation_range.txt
│ │ │ ├── bonehexact
│ │ │ ├── __init__.py
│ │ │ ├── test_attestation.py
│ │ │ └── test_structs.py
│ │ │ ├── pengbaorange
│ │ │ ├── __init__.py
│ │ │ └── test_boudot.py
│ │ │ ├── primitives
│ │ │ ├── __init__.py
│ │ │ ├── test_boneh.py
│ │ │ ├── test_ec.py
│ │ │ └── test_value.py
│ │ │ └── test_attestation_community.py
│ ├── base.py
│ ├── bootstrapping
│ │ ├── __init__.py
│ │ └── dispersy
│ │ │ ├── __init__.py
│ │ │ └── test_bootstrapper.py
│ ├── dht
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── test_churn.py
│ │ ├── test_community.py
│ │ ├── test_discovery.py
│ │ ├── test_provider.py
│ │ ├── test_routing.py
│ │ ├── test_storage.py
│ │ └── test_trie.py
│ ├── keyvault
│ │ ├── __init__.py
│ │ ├── test_crypto.py
│ │ ├── test_serialization.py
│ │ └── test_signature.py
│ ├── messaging
│ │ ├── __init__.py
│ │ ├── anonymization
│ │ │ ├── __init__.py
│ │ │ ├── mock.py
│ │ │ ├── test_community.py
│ │ │ ├── test_datachecker.py
│ │ │ ├── test_exit_socket.py
│ │ │ └── test_hiddenservices.py
│ │ ├── interfaces
│ │ │ ├── __init__.py
│ │ │ ├── dispatcher
│ │ │ │ ├── __init__.py
│ │ │ │ └── test_endpoint.py
│ │ │ ├── lan_addresses
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_addressprovider.py
│ │ │ │ ├── test_importshield.py
│ │ │ │ └── test_interfaces.py
│ │ │ ├── test_network_stats.py
│ │ │ ├── test_statistics_endpoint.py
│ │ │ └── udp
│ │ │ │ ├── __init__.py
│ │ │ │ └── test_endpoint.py
│ │ ├── test_lazy_payload.py
│ │ ├── test_payload_dataclass.py
│ │ └── test_serialization.py
│ ├── mocking
│ │ ├── __init__.py
│ │ ├── community.py
│ │ ├── discovery.py
│ │ ├── endpoint.py
│ │ ├── exit_socket.py
│ │ └── ipv8.py
│ ├── peerdiscovery
│ │ ├── __init__.py
│ │ ├── test_churn.py
│ │ ├── test_community.py
│ │ ├── test_edge_discovery.py
│ │ ├── test_network.py
│ │ └── test_random_discovery.py
│ ├── test_community.py
│ ├── test_configuration.py
│ ├── test_database.py
│ ├── test_loader.py
│ ├── test_peer.py
│ ├── test_requestcache.py
│ └── test_taskmanager.py
├── types.py
└── util.py
├── ipv8_service.py
├── mypy.ini
├── requirements.txt
├── run_all_tests.py
├── scripts
├── __scriptpath__.py
├── exitnode_ipv8_only_plugin.py
├── ipv8_plugin.py
├── tracker_plugin.py
├── tracker_reporter_plugin.py
└── tracker_service.py
├── setup.py
├── stresstest
├── __init__.py
├── __scriptpath__.py
├── bootstrap_introductions.py
├── bootstrap_introductions.r
├── bootstrap_rtt.py
├── peer_discovery.py
└── peer_discovery_defaults.py
└── systemd
├── ipv8-exit-node@.service
└── ipv8-tracker@.service
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: 'priority: unknown'
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Traceback and/or steps to reproduce**
11 |
12 | ```python
13 | Insert your traceback here!
14 | ```
15 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: 'priority: unknown'
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Question
3 | about: Ask for clarification of properties or the use of this project
4 | title: ''
5 | labels: question
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
--------------------------------------------------------------------------------
/.github/actions/coverage/action.yml:
--------------------------------------------------------------------------------
1 | name: 'Generate coverage report'
2 | inputs:
3 | html_report_name:
4 | description: 'Artifact name for storing HTML coverage report. When omitted no report is stored.'
5 | required: false
6 | runs:
7 | using: "composite"
8 | steps:
9 | - name: Generate coverage report
10 | shell: bash
11 | run: |
12 | pip install coverage
13 | python create_test_coverage_report.py
14 | echo -e "Coverage report
\n" >> $GITHUB_STEP_SUMMARY
15 | cat coverage.md >> $GITHUB_STEP_SUMMARY
16 | echo ' ' >> $GITHUB_STEP_SUMMARY
17 | - name: Upload coverage report
18 | if: ${{ inputs.html_report_name }}
19 | uses: actions/upload-artifact@v4
20 | with:
21 | name: ${{ inputs.html_report_name }}
22 | path: coverage/
23 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | Fixes #(issue number)
2 |
3 | This PR:
4 |
5 | - Adds (a new feature description)
6 | - Fixes (a minor bug description)
7 | - Updates (a change in behavior description)
8 | - Removes (a feature description)
9 |
10 | [In case of a large architectural change: please add a detailed description and an image here]
11 |
12 |
--------------------------------------------------------------------------------
/.github/workflows/coverage.yml:
--------------------------------------------------------------------------------
1 | name: Coverage
2 | on:
3 | push:
4 | branches:
5 | - master
6 | schedule:
7 | - cron: "10 6 * * 1,3,5" # Mon, Wed, Fri @ 6:10 UTC
8 | workflow_dispatch:
9 | jobs:
10 | linux:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 | - uses: actions/setup-python@v5
15 | with:
16 | python-version: '3.9'
17 | cache: 'pip'
18 | - run: python -m pip install -r requirements.txt
19 | - uses: ./.github/actions/coverage
20 | with:
21 | html_report_name: coverage-linux
22 | windows:
23 | runs-on: windows-latest
24 | steps:
25 | - uses: actions/checkout@v4
26 | - uses: actions/setup-python@v5
27 | with:
28 | python-version: '3.9'
29 | cache: 'pip'
30 | - uses: actions/cache/restore@v4
31 | id: restore_cache
32 | with:
33 | path: libsodium.dll
34 | key: cache_libsodium_dll
35 | - run: python -m pip install -r requirements.txt
36 | - uses: ./.github/actions/coverage
37 | with:
38 | html_report_name: coverage-windows
39 | - uses: actions/cache/save@v4
40 | with:
41 | path: libsodium.dll
42 | key: cache_libsodium_dll
43 | macos:
44 | runs-on: macos-latest
45 | steps:
46 | - uses: actions/checkout@v4
47 | - uses: actions/setup-python@v5
48 | with:
49 | python-version: '3.12'
50 | cache: 'pip'
51 | - run: |
52 | cp /System/Volumes/Data/opt/homebrew/lib/libsodium.dylib libsodium.dylib
53 | python -m pip install -r requirements.txt
54 | - uses: ./.github/actions/coverage
55 | with:
56 | html_report_name: coverage-macos
57 |
--------------------------------------------------------------------------------
/.github/workflows/mypy.yml:
--------------------------------------------------------------------------------
1 | name: Mypy
2 | on: [pull_request, workflow_dispatch]
3 | jobs:
4 | mypy:
5 | runs-on: ubuntu-latest
6 | steps:
7 | - uses: actions/checkout@v4
8 | - name: Setup Python 3.9
9 | uses: actions/setup-python@v5
10 | with:
11 | python-version: 3.9
12 | - name: Install mypy
13 | run: pip install mypy
14 | - name: Run mypy
15 | run: mypy -p ipv8
16 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflows will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | name: Upload Python Package
5 |
6 | on:
7 | release:
8 | types: [created]
9 |
10 | jobs:
11 | deploy:
12 |
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Set up Python
18 | uses: actions/setup-python@v2
19 | with:
20 | python-version: '3.x'
21 | - name: Install dependencies
22 | run: |
23 | python -m pip install --upgrade pip
24 | pip install setuptools wheel twine
25 | - name: Build and publish
26 | env:
27 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
28 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
29 | run: |
30 | python setup.py sdist bdist_wheel
31 | twine upload dist/*
32 |
--------------------------------------------------------------------------------
/.github/workflows/ruff.yml:
--------------------------------------------------------------------------------
1 | name: Ruff
2 | on: [pull_request, workflow_dispatch]
3 | jobs:
4 | ruff:
5 | runs-on: ubuntu-latest
6 | steps:
7 | - uses: actions/checkout@v4
8 | - name: Setup Python 3.9
9 | uses: actions/setup-python@v5
10 | with:
11 | python-version: 3.9
12 | - name: Install ruff
13 | run: pip install ruff
14 | - name: Get changed Python files
15 | id: changed-py-files
16 | uses: tj-actions/changed-files@v46.0.1
17 | with:
18 | files: |
19 | *.py
20 | **/*.py
21 | - name: Run ruff
22 | if: steps.changed-py-files.outputs.any_changed == 'true'
23 | run: ruff check ${{ steps.changed-py-files.outputs.all_changed_files }}
24 |
--------------------------------------------------------------------------------
/.github/workflows/unittests.yml:
--------------------------------------------------------------------------------
1 | name: Unittests
2 | on: [pull_request, workflow_dispatch]
3 | jobs:
4 | linux:
5 | runs-on: ubuntu-latest
6 | steps:
7 | - uses: actions/checkout@v4
8 | - uses: actions/setup-python@v5
9 | with:
10 | python-version: '3.9'
11 | cache: 'pip'
12 | - run: python -m pip install -r requirements.txt
13 | - name: Run unit tests
14 | run: python run_all_tests.py -a
15 | windows:
16 | runs-on: windows-latest
17 | steps:
18 | - uses: actions/checkout@v4
19 | - uses: actions/setup-python@v5
20 | with:
21 | python-version: '3.9'
22 | cache: 'pip'
23 | - uses: actions/cache/restore@v4
24 | id: restore_cache
25 | with:
26 | path: libsodium.dll
27 | key: cache_libsodium_dll
28 | - run: python -m pip install -r requirements.txt
29 | - name: Run unit tests
30 | run: python run_all_tests.py -a
31 | macos:
32 | runs-on: macos-latest
33 | timeout-minutes: 2
34 | steps:
35 | - uses: actions/checkout@v4
36 | - uses: actions/setup-python@v5
37 | with:
38 | python-version: '3.12'
39 | cache: 'pip'
40 | - run: python -m pip install -r requirements.txt
41 | - name: Run unit tests
42 | run: |
43 | cp /System/Volumes/Data/opt/homebrew/lib/libsodium.dylib libsodium.dylib
44 | python run_all_tests.py -a
45 |
--------------------------------------------------------------------------------
/.github/workflows/weekly_mutation_tests.yml:
--------------------------------------------------------------------------------
1 | name: Weekly mutation tests (Friday 00:00)
2 | on:
3 | schedule:
4 | - cron: '0 0 * * 5'
5 |
6 | jobs:
7 | mutationtests:
8 | runs-on: ubuntu-latest
9 | if: ${{ github.repository == 'Tribler/py-ipv8' }}
10 | steps:
11 | - name: Checkout Tribler/py-ipv8
12 | uses: actions/checkout@v4
13 | with:
14 | fetch-depth: 0
15 | path: 'py-ipv8'
16 | - name: Check for changes
17 | run: |
18 | cd py-ipv8
19 | echo "NUM_COMMITS=$(git log --oneline --since '7 days ago' | wc -l)" >> $GITHUB_ENV
20 | - name: Setup Python 3.10
21 | uses: actions/setup-python@v5
22 | if: ${{ env.NUM_COMMITS > 0 }}
23 | with:
24 | python-version: '3.10'
25 | cache: 'pip'
26 | - name: Install dependencies
27 | if: ${{ env.NUM_COMMITS > 0 }}
28 | run: |
29 | cd py-ipv8
30 | python -m pip install -r requirements.txt
31 | - name: Checkout Tribler/mutpy
32 | if: ${{ env.NUM_COMMITS > 0 }}
33 | uses: actions/checkout@v4
34 | with:
35 | repository: 'Tribler/mutpy'
36 | path: 'mutpy'
37 | - name: Run mutation tests
38 | if: ${{ env.NUM_COMMITS > 0 }}
39 | run: |
40 | cd mutpy
41 | python3 -m pip install .
42 | cd bin
43 | python3 github_report.py --codebase ipv8
44 | - name: Publish report
45 | if: ${{ env.NUM_COMMITS > 0 }}
46 | run: cat mutpy/bin/report.md >> $GITHUB_STEP_SUMMARY
47 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 |
49 | #Trial tests temp dirs
50 | _trial_temp*/
51 | _trial_temp*
52 |
53 | # Translations
54 | *.mo
55 | *.pot
56 |
57 | # Django stuff:
58 | *.log
59 | local_settings.py
60 |
61 | # Flask stuff:
62 | instance/
63 | .webassets-cache
64 |
65 | # Scrapy stuff:
66 | .scrapy
67 |
68 | # Sphinx documentation
69 | docs/_build/
70 |
71 | # PyBuilder
72 | target/
73 |
74 | # Jupyter Notebook
75 | .ipynb_checkpoints
76 |
77 | # pyenv
78 | .python-version
79 |
80 | # celery beat schedule file
81 | celerybeat-schedule
82 |
83 | # SageMath parsed files
84 | *.sage.py
85 |
86 | # dotenv
87 | .env
88 |
89 | # virtualenv
90 | .venv
91 | venv/
92 | ENV/
93 |
94 | # Spyder project settings
95 | .spyderproject
96 | .spyproject
97 |
98 | # Rope project settings
99 | .ropeproject
100 |
101 | # mkdocs documentation
102 | /site
103 |
104 | # mypy
105 | .mypy_cache/
106 |
107 | # PyCharm
108 | .idea/
109 |
110 | # Coverage
111 | coverage/
112 |
113 | # Twistd
114 | twisted/plugins/dropin.cache
115 |
116 | # Key files
117 | *.pem
118 |
119 | # sqlite folder
120 | sqlite/
121 |
122 | # macOS DS_Store files
123 | **.DS_Store
124 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/.gitmodules
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | formats: all
4 |
5 | build:
6 | # Check https://docs.readthedocs.io/en/stable/config-file/v2.html#build-os
7 | os: ubuntu-22.04
8 | tools:
9 | # Check https://docs.readthedocs.io/en/stable/config-file/v2.html#build-tools-python
10 | python: "3.11"
11 |
12 | python:
13 | install:
14 | - requirements: doc/requirements.txt
15 | - requirements: requirements.txt
16 |
17 | sphinx:
18 | builder: html
19 | configuration: doc/conf.py
20 | fail_on_warning: false
21 |
--------------------------------------------------------------------------------
/.ruff.toml:
--------------------------------------------------------------------------------
1 | lint.select = ["ALL"]
2 | lint.fixable = ["ALL"]
3 | lint.unfixable = []
4 |
5 | lint.ignore = [
6 | "ANN003",
7 | "ARG001",
8 | "ARG002",
9 | "ARG005",
10 | "BLE001",
11 | "COM812",
12 | "COM819",
13 | "D100",
14 | "D104",
15 | "D200",
16 | "D203",
17 | "D205",
18 | "D212",
19 | "D400",
20 | "D401",
21 | "D404",
22 | "D405",
23 | "E501",
24 | "E722",
25 | "E731",
26 | "E741",
27 | "EM",
28 | "ERA001",
29 | "F403",
30 | "F405",
31 | "FBT",
32 | "INP001",
33 | "N818",
34 | "N999",
35 | "PD",
36 | "PERF203",
37 | "PLR2004",
38 | "PT",
39 | "PTH",
40 | "Q000",
41 | "Q003",
42 | "RUF012",
43 | "S101",
44 | "S104",
45 | "S105",
46 | "S106",
47 | "S107",
48 | "S303",
49 | "S311",
50 | "S324",
51 | "TID",
52 | "TRY002",
53 | "TRY300",
54 | "TRY301",
55 | "TRY401",
56 | "UP006", # Backward compatibility, remove after Python >= 3.9
57 | ]
58 |
59 | exclude = [
60 | ".direnv",
61 | ".eggs",
62 | ".git",
63 | ".git-rewrite",
64 | ".github",
65 | ".hg",
66 | ".idea",
67 | ".mypy_cache",
68 | ".nox",
69 | ".pants.d",
70 | ".pytest_cache",
71 | ".pytype",
72 | ".ruff_cache",
73 | ".svn",
74 | ".tox",
75 | ".venv",
76 | "__pycache__",
77 | "__pypackages__",
78 | "_build",
79 | "buck-out",
80 | "build",
81 | "dist",
82 | "node_modules",
83 | "venv",
84 | ]
85 |
86 | line-length = 120
87 |
88 | # Allow unused variables when underscore-prefixed.
89 | lint.dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
90 |
91 | target-version = "py39"
92 |
93 | [lint.pylint]
94 | max-args = 6
95 | max-returns = 8
96 | max-branches = 12
97 | max-statements = 50
98 |
99 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | Any contributor of the IPv8 project (by posting an issue or by contributing code) is expected to follow the following 3 rules.
4 | In case of minor transgressions you will be warned.
5 | Major transgressions will result in banishment from this project.
6 | Administrators reserve the right to judge the severity of your transgression: it is not up for debate.
7 |
8 | ### 1. Abide by the Golden Rule.
9 |
10 | https://en.wikipedia.org/wiki/Golden_Rule
11 |
12 | Examples of violations include:
13 |
14 | - Posting non-constructive issues (e.g. posting an issue titled "I don't like you" without content).
15 | - Excessive trolling (e.g. to the point of violating rule 2 and 3).
16 | - Aggression (e.g. swearing at others or sexual intimidation).
17 | - Not respecting someone's time (e.g. posting exceptionally long issues or linking to unrelated resources).
18 |
19 | Violations of this nature will **usually result in a warning and an edit or removal of your original content and may be accompanied by a ban**.
20 |
21 | ### 2. Respect the Universal Declaration of Human Rights.
22 |
23 | https://www.un.org/en/universal-declaration-human-rights/
24 |
25 | Examples of violations include judging contributions based on:
26 |
27 | - Racism.
28 | - Sexism.
29 | - Political or theological views.
30 |
31 | Violations of this nature are usually criminalized by law and will **always result in a ban without warning**.
32 | The offending content will be removed.
33 |
34 | ### 3. Be professional and scientific.
35 |
36 | IPv8 should be a safe space for development, free from societal pressures and political games.
37 |
38 | Examples of violations include:
39 |
40 | - Changing code without fixing a bug or implementing a feature (e.g. including a new dependency because "everyone else is using it"). This leads to edit wars, see also https://en.wikipedia.org/wiki/Wikipedia:Lamest_edit_wars.
41 | - Gaming GitHub metrics (e.g. insisting on being the one to merge a Pull Request to get a larger amount of line changes).
42 | - Being pedantic or belittling when giving feedback (e.g. insulting someones education when pointing out a flaw).
43 | - Ungracefully receiving feedback (e.g. closing an unrelated Pull Request if your changes did not get accepted).
44 | - Administrator rights abuse (e.g. not adhering to the contributing guidelines "because you're an administrator").
45 |
46 | Violations of this nature will **usually result in a warning and an edit of your original content**. But, in extreme cases, **may also result in a ban**.
47 |
--------------------------------------------------------------------------------
/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/__init__.py
--------------------------------------------------------------------------------
/doc/.ruff.toml:
--------------------------------------------------------------------------------
1 | extend = "../.ruff.toml"
2 |
3 | extend-ignore = [
4 | "ANN401", # We want ``Any`` in some cases.
5 | "D", # Documentation happens in the RST, not in the code.
6 | "T201" # We print things for clarity: this is not production code.
7 | ]
8 |
9 | [pylint]
10 | max-args = 7
11 |
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = .
8 | BUILDDIR = build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
20 |
--------------------------------------------------------------------------------
/doc/basics/discoverystrategy_tutorial.rst:
--------------------------------------------------------------------------------
1 | Network IO and the DiscoveryStrategy
2 | ====================================
3 |
4 | This document assumes you have a basic understanding of asyncio tasks, as documented in `the tasks tutorial <../basics/tasks_tutorial.html>`_.
5 | You will learn how to use the IPv8's ``DiscoveryStrategy`` class to avoid network congestion.
6 |
7 | The DiscoveryStrategy
8 | ---------------------
9 |
10 | IPv8 only manages one socket (``Endpoint``), which is most likely using the UDP protocol.
11 | If every ``Community`` starts sending at the exact same time and overpowers the UDP socket, this causes packet drops.
12 | To counter this, IPv8 has the ``DiscoveryStrategy`` class.
13 |
14 | An IPv8 instance will call each of its registered ``DiscoveryStrategy`` instances sequentially to avoid network I/O clashes.
15 | If you have an ``interval`` task in your ``TaskManager`` that leads to network I/O, you should consider converting it to a ``DiscoveryStrategy``.
16 | You can make your own subclass as follows:
17 |
18 | .. literalinclude:: discoverystrategy_tutorial_1.py
19 | :lines: 13-20
20 |
21 | Note that a ``DiscoveryStrategy`` should be thread-safe.
22 | You can use the ``walk_lock`` for thread safety.
23 |
24 | Using a DiscoveryStrategy
25 | -------------------------
26 |
27 | You can register your ``DiscoveryStrategy`` with a running ``IPv8`` instance as follows:
28 |
29 | .. literalinclude:: discoverystrategy_tutorial_1.py
30 | :lines: 23-28
31 |
32 | Note that we specify a ``target_peers`` argument.
33 | This argument specifies the amount of peers after which the ``DiscoveryStrategy`` should no longer be called.
34 | Calls will be resumed when the amount of peers in your ``Community`` dips below this value again.
35 | For example, the built-in ``RandomWalk`` strategy can be configured to stop finding new peers after if an overlay already has ``20`` or more peers.
36 | In this example we have used the magic value ``-1``, which causes ``IPv8`` to never stop calling this strategy.
37 |
38 | You can also load your strategy through the ``configuration`` or ``loader``.
39 | First, an example of how to do this with the ``configuration``:
40 |
41 | .. literalinclude:: discoverystrategy_tutorial_2.py
42 | :lines: 16-37
43 |
44 | Note that you can add as many strategies as you want to an overlay.
45 | Also note that for IPv8 to link the name ``"MyDiscoveryStrategy"`` to a class, you need to define it in your ``Community``'s ``get_available_strategies()`` dictionary.
46 |
47 | Lastly, alternatively, the way to add your custom ``MyDiscoveryStrategy`` class to a ``CommunityLauncher`` is as follows:
48 |
49 | .. code-block:: python
50 |
51 | @overlay('my_module.some_submodule', 'MyCommunity')
52 | @walk_strategy(MyDiscoveryStrategy)
53 | class MyLauncher(CommunityLauncher):
54 | pass
55 |
56 | This is the shortest way.
57 |
--------------------------------------------------------------------------------
/doc/basics/discoverystrategy_tutorial_1.py:
--------------------------------------------------------------------------------
1 | from os import urandom
2 | from random import choice
3 |
4 | from ipv8.community import Community
5 | from ipv8.peerdiscovery.discovery import DiscoveryStrategy
6 | from ipv8.types import IPv8
7 |
8 |
9 | class MyCommunity(Community):
10 | community_id = urandom(20)
11 |
12 |
13 | class MyDiscoveryStrategy(DiscoveryStrategy):
14 |
15 | def take_step(self) -> None:
16 | with self.walk_lock:
17 | # Insert your logic here. For example:
18 | if self.overlay.get_peers():
19 | peer = choice(self.overlay.get_peers())
20 | self.overlay.send_introduction_request(peer)
21 |
22 |
23 | def main(ipv8_instance: IPv8) -> None:
24 | overlay = ipv8_instance.get_overlay(MyCommunity)
25 | target_peers = -1
26 | ipv8_instance.add_strategy(overlay,
27 | MyDiscoveryStrategy(overlay),
28 | target_peers)
29 |
--------------------------------------------------------------------------------
/doc/basics/discoverystrategy_tutorial_2.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import os
4 |
5 | from ipv8.community import Community
6 | from ipv8.configuration import DISPERSY_BOOTSTRAPPER, get_default_configuration
7 | from ipv8.peerdiscovery.discovery import DiscoveryStrategy
8 |
9 |
10 | class MyDiscoveryStrategy(DiscoveryStrategy):
11 |
12 | def take_step(self) -> None:
13 | pass
14 |
15 |
16 | class MyCommunity(Community):
17 | community_id = os.urandom(20)
18 |
19 | def get_available_strategies(self) -> dict[str, type[DiscoveryStrategy]]:
20 | return {"MyDiscoveryStrategy": MyDiscoveryStrategy}
21 |
22 |
23 | definition = {
24 | 'strategy': "MyDiscoveryStrategy",
25 | 'peers': -1,
26 | 'init': {}
27 | }
28 |
29 | config = get_default_configuration()
30 | config['overlays'] = [{
31 | 'class': 'MyCommunity',
32 | 'key': "anonymous id",
33 | 'walkers': [definition],
34 | 'bootstrappers': [DISPERSY_BOOTSTRAPPER.copy()],
35 | 'initialize': {},
36 | 'on_start': []
37 | }]
38 |
--------------------------------------------------------------------------------
/doc/basics/identity_tutorial_1.py:
--------------------------------------------------------------------------------
1 | from asyncio import run
2 | from base64 import b64encode
3 |
4 | from ipv8.configuration import get_default_configuration
5 | from ipv8.REST.rest_manager import RESTManager
6 | from ipv8.util import run_forever
7 | from ipv8_service import IPv8
8 |
9 |
10 | async def start_community() -> None:
11 | for peer_id in [1, 2]:
12 | configuration = get_default_configuration()
13 | configuration['keys'] = [
14 | {'alias': "anonymous id", 'generation': "curve25519", 'file': f"keyfile_{peer_id}.pem"}]
15 | configuration['working_directory'] = f"state_{peer_id}"
16 | configuration['overlays'] = []
17 |
18 | # Start the IPv8 service
19 | ipv8 = IPv8(configuration)
20 | await ipv8.start()
21 | rest_manager = RESTManager(ipv8)
22 | await rest_manager.start(14410 + peer_id)
23 |
24 | # Print the peer for reference
25 | print("Starting peer", b64encode(ipv8.keys["anonymous id"].mid))
26 |
27 | await run_forever()
28 |
29 |
30 | run(start_community())
31 |
--------------------------------------------------------------------------------
/doc/basics/identity_tutorial_integration/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | 1. Run ``attestation_tutorial_attest``.
3 | 2. Run ``attestation_tutorial_verify``.
4 |
5 | May freeze in case of horrible error: be sure to kill scripts if they run longer than ~1 minute.
6 | """
7 |
--------------------------------------------------------------------------------
/doc/basics/identity_tutorial_integration/attestation_tutorial_attest.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 |
4 | from attestation_tutorial_common import finish, http_get, http_post, start, urlstr, wait_for_list
5 |
6 | # Remove the output of previous experiments.
7 | if os.path.exists('./state_1'):
8 | shutil.rmtree('./state_1')
9 | if os.path.exists('./state_2'):
10 | shutil.rmtree('./state_2')
11 |
12 | start()
13 | print("Enrollment/Attestation flow")
14 |
15 | print("0. SANITY CHECK")
16 | http_get("http://localhost:14411/identity/pseudonym1/peers")
17 | http_get("http://localhost:14412/identity/pseudonym2/peers")
18 | peer1_neighborhood = wait_for_list("http://localhost:14411/identity/pseudonym1/peers", "peers")
19 | peer2_neighborhood = wait_for_list("http://localhost:14412/identity/pseudonym2/peers", "peers")
20 |
21 | peer1_id = urlstr(peer2_neighborhood[0])
22 | peer2_id = urlstr(peer1_neighborhood[0])
23 |
24 | print("Peer 1:", peer1_id)
25 | print("Peer 2:", peer2_id)
26 |
27 | print("Peer 1 attributes:", http_get("http://localhost:14411/identity/pseudonym1/credentials"))
28 | print("Peer 2 attributes:", http_get("http://localhost:14412/identity/pseudonym2/credentials"))
29 |
30 | print("1. ATTESTATION REQUEST")
31 | print("Request attestation from peer 2:",
32 | http_post(f"http://localhost:14411/identity/pseudonym1/request/{peer2_id}",
33 | {"Content-Type": "application/json"},
34 | b'{"name":"my_attribute","schema":"id_metadata","metadata":{}}'))
35 |
36 | print("2. ATTESTATION")
37 | peer2_outstanding_requests = wait_for_list("http://localhost:14412/identity/pseudonym2/outstanding/attestations",
38 | "requests")
39 | print("Peer 2 outstanding requests:", peer2_outstanding_requests)
40 |
41 | print("Peer 2 attesting to outstanding request:",
42 | http_post(f"http://localhost:14412/identity/pseudonym2/attest/{peer1_id}",
43 | {"Content-Type": "application/json"},
44 | b'{"name":"my_attribute","value":"dmFsdWU="}'))
45 |
46 | print("3. CHECK")
47 | peer1_attributes = http_get("http://localhost:14411/identity/pseudonym1/credentials")
48 | print("Peer 1 attributes:", peer1_attributes)
49 | print("Peer 2 attributes:", http_get("http://localhost:14412/identity/pseudonym2/credentials"))
50 |
51 | assert len(peer1_attributes['names']) > 0
52 |
53 | print("X. DONE!")
54 | finish()
55 |
--------------------------------------------------------------------------------
/doc/basics/identity_tutorial_integration/attestation_tutorial_common.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import base64
4 | import json
5 | import os
6 | import signal
7 | import subprocess
8 | import time
9 | import urllib.parse
10 | import urllib.request
11 | from typing import Any, cast
12 |
13 | PROCESS = None
14 | BASE_HEADERS = {"X-Rendezvous": base64.b64encode(os.urandom(20)).decode()}
15 |
16 |
17 | def http_get(url: str) -> Any:
18 | """
19 | Perform an HTTP GET request to the given URL.
20 | """
21 | return json.loads(urllib.request.urlopen(urllib.request.Request(url)).read().decode()) # noqa: S310
22 |
23 |
24 | def http_post(url: str, headers: dict[str, str] | None = None, data: bytes | None = None) -> Any:
25 | """
26 | Perform an HTTP POST request to the given URL.
27 | """
28 | if headers:
29 | headers.update(BASE_HEADERS)
30 | return json.loads(urllib.request.urlopen(urllib.request.Request(url, # noqa: S310
31 | method="PUT", headers=headers, data=data))
32 | .read().decode())
33 |
34 |
35 | def urlstr(s: str) -> str:
36 | """
37 | Make the given string URL safe.
38 | """
39 | return urllib.parse.quote(s, safe='')
40 |
41 |
42 | def wait_for_list(url: str, element: str | None = None) -> list:
43 | """
44 | Poll an endpoint until output (a list) is available.
45 | """
46 | out = []
47 | while not out:
48 | out = http_get(url)
49 | if element:
50 | out = out[element]
51 | time.sleep(0.5)
52 | return out
53 |
54 |
55 | def start() -> None:
56 | """
57 | Run the main.py script and wait for it to finish initializing.
58 | """
59 | global PROCESS # noqa: PLW0603
60 | PROCESS = subprocess.Popen('python3 main.py', shell=True, preexec_fn=os.setsid) # noqa: PLW1509,S602,S607
61 | os.waitpid(PROCESS.pid, os.P_NOWAITO)
62 | time.sleep(5.0)
63 |
64 |
65 | def finish() -> None:
66 | """
67 | Kill our two IPv8 instances (running in the same process).
68 | """
69 | process = cast(subprocess.Popen, PROCESS)
70 | os.killpg(os.getpgid(process.pid), signal.SIGTERM)
71 | process.communicate()
72 |
--------------------------------------------------------------------------------
/doc/basics/identity_tutorial_integration/attestation_tutorial_verify.py:
--------------------------------------------------------------------------------
1 | from attestation_tutorial_common import finish, http_get, http_post, start, urlstr, wait_for_list
2 |
3 | start()
4 | print("Attribute verification flow")
5 |
6 | print("0. SANITY CHECK")
7 | http_get("http://localhost:14411/identity/pseudonym1/peers")
8 | http_get("http://localhost:14412/identity/pseudonym2/peers")
9 | peer1_neighborhood = wait_for_list("http://localhost:14411/identity/pseudonym1/peers", "peers")
10 | peer2_neighborhood = wait_for_list("http://localhost:14412/identity/pseudonym2/peers", "peers")
11 |
12 | peer1_id = urlstr(peer2_neighborhood[0])
13 | peer2_id = urlstr(peer1_neighborhood[0])
14 |
15 | print("Peer 1:", peer1_id)
16 | print("Peer 2:", peer2_id)
17 |
18 | peer1_attributes = http_get("http://localhost:14411/identity/pseudonym1/credentials")['names']
19 | peer2_attributes = http_get("http://localhost:14412/identity/pseudonym2/credentials")['names']
20 |
21 | print("Peer 1 attributes:", peer1_attributes)
22 | print("Peer 2 attributes:", peer2_attributes)
23 |
24 | attribute_hash = peer1_attributes[-1]["hash"].encode()
25 |
26 | print("1. VERIFICATION REQUEST")
27 | print("Request verification from peer 1:",
28 | http_post(f"http://localhost:14412/identity/pseudonym2/verify/{peer1_id}",
29 | {"Content-Type": "application/json"},
30 | b'{"hash":"' + attribute_hash + b'","value":"dmFsdWU=","schema":"id_metadata"}'))
31 |
32 | print("2. VERIFICATION ")
33 | peer1_outstanding_requests = wait_for_list("http://localhost:14411/identity/pseudonym1/outstanding/verifications",
34 | "requests")
35 | print("Peer 1 outstanding verification requests:", peer1_outstanding_requests)
36 |
37 | print("Peer 1 allow verification of outstanding request:",
38 | http_post(f"http://localhost:14411/identity/pseudonym1/allow/{peer2_id}",
39 | {"Content-Type": "application/json"},
40 | b'{"name":"my_attribute"}'))
41 |
42 | print("3. CHECK")
43 | verification_output = wait_for_list("http://localhost:14412/identity/pseudonym2/verifications", 'outputs')
44 | print("Peer 2 verification output:", )
45 | assert verification_output[0]['match'] > 0.9
46 |
47 | print("X. DONE!")
48 | finish()
49 |
--------------------------------------------------------------------------------
/doc/basics/identity_tutorial_integration/main.py:
--------------------------------------------------------------------------------
1 | from asyncio import run, sleep
2 | from base64 import b64encode
3 |
4 | from ipv8.configuration import get_default_configuration
5 | from ipv8.REST.rest_manager import RESTManager
6 | from ipv8.util import run_forever
7 | from ipv8_service import IPv8
8 |
9 |
10 | async def start_community() -> None:
11 | for peer_id in [1, 2]:
12 | configuration = get_default_configuration()
13 | configuration['logger']['level'] = "ERROR"
14 | configuration['keys'] = [{'alias': "anonymous id",
15 | 'generation': "curve25519",
16 | 'file': f"keyfile_{peer_id}.pem"}]
17 | configuration['working_directory'] = f"state_{peer_id}"
18 | configuration['overlays'] = []
19 |
20 | # Start the IPv8 service
21 | ipv8 = IPv8(configuration)
22 | await ipv8.start()
23 | rest_manager = RESTManager(ipv8)
24 |
25 | # We REALLY want this particular port, keep trying
26 | keep_trying = True
27 | while keep_trying:
28 | try:
29 | await rest_manager.start(14410 + peer_id)
30 | keep_trying = False
31 | except OSError:
32 | await sleep(1.0)
33 |
34 | # Print the peer for reference
35 | print("Starting peer", b64encode(ipv8.keys["anonymous id"].mid))
36 |
37 | await run_forever()
38 |
39 |
40 | run(start_community())
41 |
--------------------------------------------------------------------------------
/doc/basics/overlay_tutorial.rst:
--------------------------------------------------------------------------------
1 |
2 | Creating your first overlay
3 | ===========================
4 |
5 | This document assumes you have installed all of the dependencies as instructed in the `README.md `_.
6 | You will learn how to construct a *network overlay* using IPv8.
7 |
8 | Running the IPv8 service
9 | ------------------------
10 |
11 | Fill your ``main.py`` file with the following code:
12 |
13 | .. literalinclude:: overlay_tutorial_1.py
14 |
15 | You can now run this file using Python as follows:
16 |
17 | .. code-block:: bash
18 |
19 | python3 main.py
20 |
21 | You should see some debug information being printed to your terminal.
22 | If this step failed, you are probably missing dependencies.
23 |
24 | If everything is running correctly: congratulations!
25 | You have just run the IPv8 service for the first time.
26 |
27 | Running two IPv8 services
28 | -------------------------
29 |
30 | Now that we have managed to create an IPv8-service instance, we want to create a second instance.
31 | This way we can start testing the network overlay with multiple instances.
32 | To try this, fill your ``main.py`` file with the following code:
33 |
34 | .. literalinclude:: overlay_tutorial_2.py
35 |
36 | If you were successful, you should now see double the debug information being printed to your terminal.
37 |
38 | Loading a custom overlay
39 | ------------------------
40 |
41 | Now that we can launch two instances, let's create the actual network overlay.
42 | To do this, fill your ``main.py`` file with the following code:
43 |
44 | .. literalinclude:: overlay_tutorial_3.py
45 |
46 | As we replaced the default overlays, you should no longer see any debug information being printed to your terminal.
47 | Our overlay is now loaded twice, but it is still not doing anything.
48 |
49 | Printing the known peers
50 | ------------------------
51 |
52 | Like every DHT-based network overlay framework, IPv8 needs some time to find peers.
53 | We will now modify ``main.py`` again to print the current number of peers:
54 |
55 | .. literalinclude:: overlay_tutorial_4.py
56 |
57 | Running this should yield something like the following output:
58 |
59 | .. code-block:: bash
60 |
61 | $ python main.py
62 | I am: Peer<0.0.0.0:0:8090, dxGFpQ4awTMz826HOVCB5OoiPPI=> I found: Peer<0.0.0.0:0:8091, YfHrKJR4O72/k/FBYYxMIQwOb1U=>
63 | I am: Peer<0.0.0.0:0:8091, YfHrKJR4O72/k/FBYYxMIQwOb1U=> I found: Peer<0.0.0.0:0:8090, dxGFpQ4awTMz826HOVCB5OoiPPI=>
64 |
65 | .. warning::
66 | You should never use the ``address`` of a ``Peer`` as its identifier.
67 | A ``Peer``'s ``address`` can change over time!
68 | Instead, use the ``mid`` of a Peer (which is the ``SHA-1`` hash of its public key) or its ``public_key.key_to_bin()`` (the serialized form of the public key).
69 | The public key of a ``Peer`` never changes.
70 |
71 | Adding messages
72 | ---------------
73 |
74 | As an example for adding messages, we will now make a Lamport clock for three peers.
75 | Update your ``main.py`` once again to contain the following code:
76 |
77 | .. literalinclude:: overlay_tutorial_5.py
78 |
79 | If you run this, you should see the three peers actively trying to establish an ever-increasing global clock value.
80 |
--------------------------------------------------------------------------------
/doc/basics/overlay_tutorial_1.py:
--------------------------------------------------------------------------------
1 | from asyncio import run
2 |
3 | from ipv8.configuration import get_default_configuration
4 | from ipv8.util import run_forever
5 | from ipv8_service import IPv8
6 |
7 |
8 | async def start_ipv8() -> None:
9 | # Create an IPv8 object with the default settings.
10 | ipv8 = IPv8(get_default_configuration())
11 | await ipv8.start()
12 |
13 | # Wait forever (or until the user presses Ctrl+C)
14 | await run_forever()
15 |
16 | # Shutdown IPv8. To keep things simple, we won't stop IPv8 in the remainder of the tutorial.
17 | await ipv8.stop()
18 |
19 | # Create a new event loop and run a task that starts an IPv8 instance.
20 | run(start_ipv8())
21 |
--------------------------------------------------------------------------------
/doc/basics/overlay_tutorial_2.py:
--------------------------------------------------------------------------------
1 | from asyncio import run
2 |
3 | from ipv8.configuration import get_default_configuration
4 | from ipv8.util import run_forever
5 | from ipv8_service import IPv8
6 |
7 |
8 | async def start_ipv8() -> None:
9 | # The first IPv8 will attempt to claim a port.
10 | await IPv8(get_default_configuration()).start()
11 | # The second IPv8 will attempt to claim a port.
12 | # It cannot claim the same port and will end up claiming a different one.
13 | await IPv8(get_default_configuration()).start()
14 | await run_forever()
15 |
16 |
17 | run(start_ipv8())
18 |
--------------------------------------------------------------------------------
/doc/basics/overlay_tutorial_3.py:
--------------------------------------------------------------------------------
1 | import os
2 | from asyncio import run
3 |
4 | from ipv8.community import Community
5 | from ipv8.configuration import ConfigBuilder, Strategy, WalkerDefinition, default_bootstrap_defs
6 | from ipv8.util import run_forever
7 | from ipv8_service import IPv8
8 |
9 |
10 | class MyCommunity(Community):
11 | # Register this community with a randomly generated community ID.
12 | # Other peers will connect to this community based on this identifier.
13 | community_id = os.urandom(20)
14 |
15 |
16 | async def start_communities() -> None:
17 | for i in [1, 2]:
18 | builder = ConfigBuilder().clear_keys().clear_overlays()
19 | # If we actually want to communicate between two different peers
20 | # we need to assign them different keys.
21 | # We will generate an EC key called 'my peer' which has 'medium'
22 | # security and will be stored in file 'ecI.pem' where 'I' is replaced
23 | # by the peer number (1 or 2).
24 | builder.add_key("my peer", "medium", f"ec{i}.pem")
25 | # Instruct IPv8 to load our custom overlay, registered in _COMMUNITIES.
26 | # We use the 'my peer' key, which we registered before.
27 | # We will attempt to find other peers in this overlay using the
28 | # RandomWalk strategy, until we find 10 peers.
29 | # We do not provide additional startup arguments or a function to run
30 | # once the overlay has been initialized.
31 | builder.add_overlay("MyCommunity", "my peer",
32 | [WalkerDefinition(Strategy.RandomWalk,
33 | 10, {'timeout': 3.0})],
34 | default_bootstrap_defs, {}, [])
35 | await IPv8(builder.finalize(),
36 | extra_communities={'MyCommunity': MyCommunity}).start()
37 | await run_forever()
38 |
39 |
40 | run(start_communities())
41 |
--------------------------------------------------------------------------------
/doc/basics/overlay_tutorial_4.py:
--------------------------------------------------------------------------------
1 | import os
2 | from asyncio import run
3 |
4 | from ipv8.community import Community
5 | from ipv8.configuration import ConfigBuilder, Strategy, WalkerDefinition, default_bootstrap_defs
6 | from ipv8.peerdiscovery.network import PeerObserver
7 | from ipv8.types import Peer
8 | from ipv8.util import run_forever
9 | from ipv8_service import IPv8
10 |
11 |
12 | class MyCommunity(Community, PeerObserver):
13 | community_id = os.urandom(20)
14 |
15 | def on_peer_added(self, peer: Peer) -> None:
16 | print("I am:", self.my_peer, "I found:", peer)
17 |
18 | def on_peer_removed(self, peer: Peer) -> None:
19 | pass
20 |
21 | def started(self) -> None:
22 | self.network.add_peer_observer(self)
23 |
24 |
25 | async def start_communities() -> None:
26 | for i in [1, 2]:
27 | builder = ConfigBuilder().clear_keys().clear_overlays()
28 | builder.add_key("my peer", "medium", f"ec{i}.pem")
29 | # We provide the 'started' function to the 'on_start'.
30 | # We will call the overlay's 'started' function without any
31 | # arguments once IPv8 is initialized.
32 | builder.add_overlay("MyCommunity", "my peer",
33 | [WalkerDefinition(Strategy.RandomWalk,
34 | 10, {'timeout': 3.0})],
35 | default_bootstrap_defs, {}, [('started',)])
36 | await IPv8(builder.finalize(),
37 | extra_communities={'MyCommunity': MyCommunity}).start()
38 | await run_forever()
39 |
40 |
41 | run(start_communities())
42 |
--------------------------------------------------------------------------------
/doc/basics/overlay_tutorial_5.py:
--------------------------------------------------------------------------------
1 | import os
2 | from asyncio import run
3 | from dataclasses import dataclass
4 |
5 | from ipv8.community import Community, CommunitySettings
6 | from ipv8.configuration import ConfigBuilder, Strategy, WalkerDefinition, default_bootstrap_defs
7 | from ipv8.lazy_community import lazy_wrapper
8 | from ipv8.messaging.payload_dataclass import DataClassPayload
9 | from ipv8.types import Peer
10 | from ipv8.util import run_forever
11 | from ipv8_service import IPv8
12 |
13 |
14 | @dataclass
15 | class MyMessage(DataClassPayload[1]): # The value 1 identifies this message and must be unique per community
16 | clock: int # We add an integer (technically a "long long") field "clock" to this message
17 |
18 |
19 | class MyCommunity(Community):
20 | community_id = os.urandom(20)
21 |
22 | def __init__(self, settings: CommunitySettings) -> None:
23 | super().__init__(settings)
24 | # Register the message handler for messages (with the identifier "1").
25 | self.add_message_handler(MyMessage, self.on_message)
26 | # The Lamport clock this peer maintains.
27 | # This is for the example of global clock synchronization.
28 | self.lamport_clock = 0
29 |
30 | def started(self) -> None:
31 | async def start_communication() -> None:
32 | if not self.lamport_clock:
33 | # If we have not started counting, try boostrapping
34 | # communication with our other known peers.
35 | for p in self.get_peers():
36 | self.ez_send(p, MyMessage(self.lamport_clock))
37 | else:
38 | self.cancel_pending_task("start_communication")
39 |
40 | # We register an asyncio task with this overlay.
41 | # This makes sure that the task ends when this overlay is unloaded.
42 | # We call the 'start_communication' function every 5.0 seconds, starting now.
43 | self.register_task("start_communication", start_communication, interval=5.0, delay=0)
44 |
45 | @lazy_wrapper(MyMessage)
46 | def on_message(self, peer: Peer, payload: MyMessage) -> None:
47 | # Update our Lamport clock.
48 | self.lamport_clock = max(self.lamport_clock, payload.clock) + 1
49 | print(self.my_peer, "current clock:", self.lamport_clock)
50 | # Then synchronize with the rest of the network again.
51 | self.ez_send(peer, MyMessage(self.lamport_clock))
52 |
53 |
54 | async def start_communities() -> None:
55 | for i in [1, 2, 3]:
56 | builder = ConfigBuilder().clear_keys().clear_overlays()
57 | builder.add_key("my peer", "medium", f"ec{i}.pem")
58 | builder.add_overlay("MyCommunity", "my peer",
59 | [WalkerDefinition(Strategy.RandomWalk,
60 | 10, {'timeout': 3.0})],
61 | default_bootstrap_defs, {}, [('started',)])
62 | await IPv8(builder.finalize(),
63 | extra_communities={'MyCommunity': MyCommunity}).start()
64 | await run_forever()
65 |
66 |
67 | run(start_communities())
68 |
--------------------------------------------------------------------------------
/doc/basics/requestcache_tutorial.rst:
--------------------------------------------------------------------------------
1 | Storing states in IPv8
2 | ======================
3 |
4 | This document assumes you have a basic understanding of network overlays in IPv8, as documented in `the overlay tutorial <../basics/overlay_tutorial.html>`_.
5 | You will learn how to use the IPv8's ``RequestCache`` class to store the state of message flows.
6 |
7 | When you need a state
8 | ---------------------
9 |
10 | More often than not messages come in *flows*.
11 | For example, one peer sends out a *request* and another peer provides a *response*.
12 | Or, as another example, your message is too big to fit into a single UDP packet and you need to keep track of multiple smaller messages that belong together.
13 | In these cases you need to keep a state.
14 | The ``RequestCache`` class keeps track of states and also natively includes a timeout mechanism to make sure you don't get a memory leak.
15 |
16 | Typically, you will use one ``RequestCache`` per network overlay, to which you add the caches that store states.
17 |
18 | The hard way
19 | ------------
20 |
21 | The most straightforward way of interacting with the ``RequestCache`` is by adding ``NumberCache`` instances to it directly.
22 | Normally, you will use ``add()`` and ``pop()`` to respectively add new caches and remove existing caches from the ``RequestCache``.
23 | This is a bare-bones example of how states can be stored and retrieved:
24 |
25 | .. literalinclude:: requestcache_tutorial_1.py
26 |
27 | In the previous example we have assumed that a cache would eventually arrive.
28 | This will almost never be the case in practice.
29 | You can overwrite the ``on_timeout`` method of your ``NumberCache`` instances to deal with cleaning up when a cache times out.
30 | In this following example we shut down when the cache times out:
31 |
32 | .. literalinclude:: requestcache_tutorial_2.py
33 |
34 | You may notice some inconvenient properties of these caches.
35 | You need to generate a unique identifier and manually keep track of it.
36 | This is why we have an easier way to interact with the ``RequestCache``.
37 |
38 | The easier way
39 | --------------
40 |
41 | Let's look at the complete Community code for two peers that use each other to count to 10.
42 | For this toy box example we define two messages and a single cache.
43 | Unlike when doing things the hard way, we now use a ``RandomNumberCache`` to have IPv8 select a message identifier for us.
44 | Both the ``identifier`` fields for the messages and the ``name`` for the cache are required.
45 | Please attentively read through this code:
46 |
47 | .. literalinclude:: requestcache_tutorial_3.py
48 | :lines: 12-91
49 |
50 | You are encouraged to play around with this code.
51 | Also, take notice of the fact that this example includes a replay attack (try removing the cache and see what happens).
52 |
--------------------------------------------------------------------------------
/doc/basics/requestcache_tutorial_1.py:
--------------------------------------------------------------------------------
1 | from asyncio import run
2 |
3 | from ipv8.requestcache import NumberCacheWithName, RequestCache
4 |
5 |
6 | class MyState(NumberCacheWithName):
7 |
8 | name = "my-state"
9 |
10 | def __init__(self, request_cache: RequestCache,
11 | identifier: int, state: int) -> None:
12 | super().__init__(request_cache, self.name, identifier)
13 | self.state = state
14 |
15 |
16 | async def foo(request_cache: RequestCache) -> None:
17 | """
18 | Add a new MyState cache to the global request cache.
19 | The state variable is set to 42 and the identifier of this cache is 0.
20 | """
21 | cache = MyState(request_cache, 0, 42)
22 | request_cache.add(cache)
23 |
24 |
25 | async def bar() -> None:
26 | """
27 | Wait until a MyState cache with identifier 0 is added.
28 | Then, remove this cache from the global request cache and print its state.
29 | """
30 | # Normally, you would add this to a network overlay instance.
31 | request_cache = RequestCache()
32 | request_cache.register_anonymous_task("Add later", foo, request_cache, delay=1.23)
33 |
34 | cache = await request_cache.wait_for(MyState, 0)
35 |
36 | print("I found a cache with the state:", cache.state)
37 |
38 |
39 | run(bar())
40 |
--------------------------------------------------------------------------------
/doc/basics/requestcache_tutorial_2.py:
--------------------------------------------------------------------------------
1 | from asyncio import run, sleep
2 |
3 | from ipv8.requestcache import NumberCacheWithName, RequestCache
4 |
5 |
6 | class MyState(NumberCacheWithName):
7 |
8 | name = "my-state"
9 |
10 | def __init__(self, request_cache: RequestCache,
11 | identifier: int, state: int) -> None:
12 | super().__init__(request_cache, self.name, identifier)
13 | self.state = state
14 |
15 | def on_timeout(self) -> None:
16 | print("Oh no! I never received a response!")
17 |
18 | @property
19 | def timeout_delay(self) -> float:
20 | # We will timeout after 3 seconds (default is 10 seconds)
21 | return 3.0
22 |
23 |
24 | async def foo() -> None:
25 | request_cache = RequestCache()
26 | cache = MyState(request_cache, 0, 42)
27 | request_cache.add(cache)
28 | await sleep(4)
29 |
30 |
31 | run(foo())
32 |
--------------------------------------------------------------------------------
/doc/basics/tasks_tutorial_1.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | COMPLETED = []
4 |
5 |
6 | async def execute_me_too(i: int) -> None:
7 | await asyncio.sleep(0.5)
8 | COMPLETED.append(i)
9 |
10 |
11 | async def execute_me() -> None:
12 | execute_me_too(1) # 1
13 | await execute_me_too(2) # 2
14 | COMPLETED.append(3) # 3
15 | _ = asyncio.ensure_future(execute_me_too(4)) # 4
16 | COMPLETED.append(5) # 5
17 | await asyncio.sleep(1) # 6
18 |
19 | asyncio.run(execute_me())
20 | print(COMPLETED)
21 |
--------------------------------------------------------------------------------
/doc/basics/tasks_tutorial_2.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | COMPLETED = []
4 |
5 |
6 | async def execute_me_too(i: int) -> None:
7 | await asyncio.sleep(0.5)
8 | COMPLETED.append(i)
9 |
10 |
11 | async def execute_me() -> None:
12 | await execute_me_too(2)
13 | COMPLETED.append(3)
14 | fut = asyncio.ensure_future(execute_me_too(4)) # store future
15 | COMPLETED.append(5)
16 | await fut # await future before exiting
17 |
18 | asyncio.run(execute_me())
19 | print(COMPLETED)
20 |
--------------------------------------------------------------------------------
/doc/basics/tasks_tutorial_3.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from ipv8.taskmanager import TaskManager
4 |
5 | COMPLETED = []
6 |
7 |
8 | async def execute_me_too(i: int) -> None:
9 | await asyncio.sleep(0.5)
10 | COMPLETED.append(i)
11 |
12 |
13 | async def main() -> None:
14 | task_manager = TaskManager()
15 |
16 | task_manager.register_task("execute_me_too1", execute_me_too, 1)
17 | task_manager.register_task("execute_me_too2", execute_me_too, 2)
18 | task_manager.cancel_pending_task("execute_me_too1")
19 | await task_manager.wait_for_tasks()
20 |
21 | await task_manager.shutdown_task_manager()
22 | print(COMPLETED)
23 |
24 | asyncio.run(main())
25 |
--------------------------------------------------------------------------------
/doc/basics/tasks_tutorial_4.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from ipv8.taskmanager import TaskManager
4 |
5 | COMPLETED = []
6 |
7 |
8 | async def execute_me_too(i: int) -> None:
9 | await asyncio.sleep(0.5)
10 | COMPLETED.append(i)
11 |
12 |
13 | async def main() -> None:
14 | task_manager = TaskManager()
15 |
16 | for i in range(20):
17 | task_manager.register_anonymous_task("execute_me_too",
18 | execute_me_too, i)
19 | await task_manager.wait_for_tasks()
20 |
21 | await task_manager.shutdown_task_manager()
22 | print(COMPLETED)
23 |
24 | asyncio.run(main())
25 |
--------------------------------------------------------------------------------
/doc/basics/tasks_tutorial_5.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from ipv8.taskmanager import TaskManager
4 |
5 | COMPLETED = []
6 |
7 |
8 | async def execute_me_too(i: int, task_manager: TaskManager) -> None:
9 | if len(COMPLETED) == 20:
10 | task_manager.cancel_pending_task("keep adding 1")
11 | return
12 | COMPLETED.append(i)
13 |
14 |
15 | async def main() -> None:
16 | task_manager = TaskManager()
17 |
18 | task_manager.register_task("keep adding 1", execute_me_too,
19 | 1, task_manager, interval=0.1)
20 | task_manager.register_task("sneaky inject", execute_me_too,
21 | 2, task_manager, delay=0.5)
22 | await task_manager.wait_for_tasks()
23 |
24 | await task_manager.shutdown_task_manager()
25 | print(COMPLETED)
26 |
27 | asyncio.run(main())
28 |
--------------------------------------------------------------------------------
/doc/basics/tasks_tutorial_6.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Any
3 |
4 | from ipv8.community import Community, CommunitySettings
5 | from ipv8.test.base import TestBase
6 | from ipv8.test.mocking.ipv8 import MockIPv8
7 |
8 |
9 | class MyCommunity(Community):
10 | community_id = os.urandom(20)
11 |
12 | def __init__(self, settings: CommunitySettings) -> None:
13 | super().__init__(settings)
14 | self.register_task("error out :-(", self.error)
15 |
16 | def error(self) -> None:
17 | raise RuntimeError
18 |
19 |
20 | class TestMyCommunity(TestBase):
21 |
22 | def create_node(self, *args: Any, **kwargs) -> MockIPv8:
23 | mock_ipv8 = super().create_node(*args, **kwargs)
24 | mock_ipv8.overlay.cancel_all_pending_tasks()
25 | return mock_ipv8
26 |
27 | async def test_something(self) -> None:
28 | self.initialize(MyCommunity, 1) # Will not run tasks
29 |
30 |
31 | if __name__ == '__main__':
32 | import unittest
33 | unittest.main()
34 |
--------------------------------------------------------------------------------
/doc/basics/tasks_tutorial_7.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from ipv8.requestcache import NumberCacheWithName, RequestCache
4 |
5 |
6 | class MyCache(NumberCacheWithName):
7 |
8 | name = "my cache"
9 |
10 | def __init__(self,
11 | request_cache: RequestCache,
12 | number: int) -> None:
13 | super().__init__(request_cache, self.name, number)
14 |
15 | self.awaitable = asyncio.Future()
16 |
17 | self.register_future(self.awaitable, on_timeout=False)
18 |
19 | @property
20 | def timeout_delay(self) -> float:
21 | return 1.0
22 |
23 | def finish(self) -> None:
24 | self.awaitable.set_result(True)
25 |
26 |
27 | async def main() -> None:
28 | rq = RequestCache()
29 |
30 | rq.add(MyCache(rq, 0))
31 | with rq.passthrough():
32 | rq.add(MyCache(rq, 1)) # Overwritten timeout = 0.0
33 | rq.add(MyCache(rq, 2))
34 |
35 | future0 = rq.get(MyCache, 0).awaitable
36 | future1 = rq.get(MyCache, 1).awaitable
37 | future2 = rq.get(MyCache, 2).awaitable
38 |
39 | rq.get(MyCache, 0).finish()
40 | await future0
41 | print(f"future0.result()={future0.result()}")
42 | rq.pop(MyCache, 0)
43 |
44 | await future1
45 | print(f"future1.result()={future1.result()}")
46 |
47 | await rq.shutdown()
48 |
49 | print(f"future2.cancelled()={future2.cancelled()}")
50 |
51 |
52 | asyncio.run(main())
53 |
--------------------------------------------------------------------------------
/doc/further-reading/advanced_identity_1.py:
--------------------------------------------------------------------------------
1 | from asyncio import run, sleep
2 | from base64 import b64encode
3 |
4 | from ipv8.configuration import get_default_configuration
5 | from ipv8.REST.rest_manager import RESTManager
6 | from ipv8_service import IPv8
7 |
8 |
9 | async def start_community() -> None:
10 | for peer_id in [1, 2]:
11 | configuration = get_default_configuration()
12 | configuration['keys'] = [
13 | {'alias': "anonymous id", 'generation': "curve25519", 'file': f"keyfile_{peer_id}.pem"}]
14 | configuration['working_directory'] = f"state_{peer_id}"
15 | configuration['overlays'] = [overlay for overlay in configuration['overlays']
16 | if overlay['class'] == 'HiddenTunnelCommunity']
17 |
18 | # Start the IPv8 service
19 | ipv8 = IPv8(configuration)
20 | await ipv8.start()
21 | rest_manager = RESTManager(ipv8)
22 | await rest_manager.start(14410 + peer_id, api_key="my secret key")
23 |
24 | # Print the peer for reference
25 | print("Starting peer", b64encode(ipv8.keys["anonymous id"].mid))
26 |
27 | await sleep(1.0) # We run a 1 second test for this example
28 |
29 |
30 | run(start_community())
31 |
--------------------------------------------------------------------------------
/doc/further-reading/advanced_identity_2.py:
--------------------------------------------------------------------------------
1 | import os
2 | import ssl
3 | import sys
4 | from asyncio import run, sleep
5 | from base64 import b64encode
6 |
7 | from ipv8.configuration import get_default_configuration
8 | from ipv8.REST.rest_manager import RESTManager
9 | from ipv8_service import IPv8
10 |
11 | cert_file = os.path.join(os.path.dirname(sys.modules[IPv8.__module__].__file__),
12 | "doc", "further-reading", "certfile.pem")
13 |
14 |
15 | async def start_community() -> None:
16 | for peer_id in [1, 2]:
17 | configuration = get_default_configuration()
18 | configuration['keys'] = [
19 | {'alias': "anonymous id", 'generation': "curve25519", 'file': f"keyfile_{peer_id}.pem"}]
20 | configuration['working_directory'] = f"state_{peer_id}"
21 | configuration['overlays'] = [overlay for overlay in configuration['overlays']
22 | if overlay['class'] == 'HiddenTunnelCommunity']
23 |
24 | # Start the IPv8 service
25 | ipv8 = IPv8(configuration)
26 | await ipv8.start()
27 | ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
28 | ssl_context.load_cert_chain(cert_file)
29 | rest_manager = RESTManager(ipv8)
30 | await rest_manager.start(14410 + peer_id, ssl_context=ssl_context)
31 |
32 | # Print the peer for reference
33 | print("Starting peer", b64encode(ipv8.keys["anonymous id"].mid))
34 |
35 | await sleep(1.0) # We run a 1 second test for this example
36 |
37 |
38 | run(start_community())
39 |
--------------------------------------------------------------------------------
/doc/further-reading/dht.rst:
--------------------------------------------------------------------------------
1 | DHT(Discovery)Community
2 | =======================
3 |
4 | This document contains a description of how to use the ``DHTCommunity`` class for distributed hash table (DHT) data storage and the ``DHTDiscoveryCommunity`` extension of this functionality, which provides functionality to connect given public keys.
5 |
6 | In particular this document will **not** discuss how distributed hash table work, for this we refer the reader to other resources on the Internet.
7 |
8 |
9 | Storing values and finding keys
10 | -------------------------------
11 |
12 | The ``DHTCommunity`` is the main overlay that allows for decentralized key-value storage.
13 | There are two main functions in this overlay: the ``store_value()`` function and the ``find_values()`` function.
14 |
15 | When you call ``store_value()``, you choose the globally unique ``key`` that your given value ``data`` is stored under.
16 | You can, but are not required to, sign this new stored value with your public key to provide it with authenticity.
17 | Note that this function may lead to a ``ipv8.dht.DHTError``: in this case, you will have to try again later.
18 | An example of a call that stores the signed value ``b"my value"`` under the key ``b"my key"``, is the following.
19 |
20 | .. literalinclude:: dht_1.py
21 | :lines: 43-47
22 |
23 | The value can later be retrieved from the network by calling ``find_values()`` with the key that the information was stored under.
24 | The following snippet retrieves the value that was stored in the previous snippet, under the ``b"my key"`` key.
25 |
26 | .. literalinclude:: dht_1.py
27 | :lines: 49-55
28 |
29 | Note that multiple peers may respond with answers and if (a) the orginal value is not signed or (b) multiple values are published under the same key, the reported values may be different.
30 | In this example, only one value is published and it is signed so only a single value is ever returned.
31 |
32 | Finding peers
33 | -------------
34 |
35 | The ``DHTDiscoveryCommunity`` allows for peers to be found by their public key.
36 | You can search for public keys by their SHA-1 hash (conveniently available as ``Peer.mid``).
37 | To do so, you can call ``connect_peer()`` with the hash/mid as shown in the following example.
38 |
39 |
40 | .. literalinclude:: dht_1.py
41 | :lines: 58-65
42 |
43 | Note that you may need a few attempts to find the peer you are looking for.
44 | Of course, if the peer you are looking for is not online, you may be waiting forever.
45 |
--------------------------------------------------------------------------------
/doc/further-reading/dht_1.py:
--------------------------------------------------------------------------------
1 | from asyncio import run, sleep
2 | from itertools import combinations
3 | from typing import cast
4 |
5 | from ipv8.configuration import ConfigBuilder
6 | from ipv8.dht import DHTError
7 | from ipv8.dht.community import DHTCommunity
8 | from ipv8.dht.discovery import DHTDiscoveryCommunity
9 | from ipv8.peer import Peer
10 | from ipv8_service import IPv8
11 |
12 |
13 | async def main() -> None:
14 | instances = []
15 |
16 | # Put some peers in the network
17 | for _ in range(10):
18 | config = ConfigBuilder().clear_keys()
19 | config.config["overlays"] = [o for o in config.config["overlays"] if o["class"] == "DHTDiscoveryCommunity"]
20 | config.add_ephemeral_key("anonymous id")
21 | config.set_address("127.0.0.1") # We don't want this test to connect to the actual network!
22 | ipv8 = IPv8(config.finalize())
23 | instances.append(ipv8)
24 | await ipv8.start()
25 |
26 | # Supercharge introductions, normally this takes longer
27 | for id1, id2 in combinations(range(10), 2):
28 | overlay1 = instances[id1].get_overlay(DHTCommunity)
29 | overlay2 = instances[id2].get_overlay(DHTCommunity)
30 | peer1 = Peer(overlay2.my_peer.public_key.key_to_bin(), ("127.0.0.1", overlay2.my_estimated_lan[1]))
31 | peer1.address_frozen = True
32 | peer2 = Peer(overlay1.my_peer.public_key.key_to_bin(), ("127.0.0.1", overlay1.my_estimated_lan[1]))
33 | peer2.address_frozen = True
34 | overlay1.network.add_verified_peer(peer2)
35 | overlay1.get_requesting_node(peer2)
36 | overlay2.network.add_verified_peer(peer1)
37 | overlay2.get_requesting_node(peer1)
38 | for i in range(10):
39 | await instances[i].get_overlay(DHTDiscoveryCommunity).store_peer()
40 | instances[i].get_overlay(DHTDiscoveryCommunity).ping_all()
41 |
42 | dht_community = cast(DHTCommunity, instances[0].get_overlay(DHTCommunity))
43 | try:
44 | await dht_community.store_value(b"my key", b"my value", True)
45 | print(dht_community.my_peer.public_key.key_to_bin(), "published b'my value' under b'my key'!")
46 | except DHTError as e:
47 | print("Failed to store my value under my key!", e)
48 |
49 | try:
50 | results = await dht_community.find_values(b"my key")
51 | print(f"We got results from {len(results)} peers!")
52 | for value, signer_key in results:
53 | print(f"The value {value} was found, signed by {signer_key}")
54 | except DHTError as e:
55 | print("Failed to find key!", e)
56 |
57 | dht_discovery_community = cast(DHTDiscoveryCommunity, instances[7].get_overlay(DHTDiscoveryCommunity))
58 | some_peer_mid = instances[2].keys["anonymous id"].mid
59 | while True:
60 | try:
61 | await sleep(0.5)
62 | await dht_discovery_community.connect_peer(some_peer_mid)
63 | break
64 | except DHTError as e:
65 | print("Failed to connect to peer!", e)
66 |
67 | run(main())
68 |
--------------------------------------------------------------------------------
/doc/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/doc/preliminaries/install_libsodium.rst:
--------------------------------------------------------------------------------
1 |
2 | Installing Libsodium (Windows and MacOS only)
3 | =============================================
4 |
5 | Running py-ipv8 on Windows or MacOS, requires manual installation of Libsodium.
6 |
7 | Windows
8 | -------
9 |
10 |
11 | #. Libsodium can be downloaded from https://download.libsodium.org/libsodium/releases/
12 |
13 | .. code-block:: console
14 |
15 | For eg. https://download.libsodium.org/libsodium/releases/libsodium-1.0.17-msvc.zip
16 |
17 | #. Extract the files from the zip file
18 | #. There are two extracted directories: ``x64`` and ``Win32``. Select ``x64`` for 64-bit or ``Win32`` for 32-bit versions of Windows, and search for ``libsodium.dll``. You can find one inside ``Release/v141/dynamic/libsodium.dll``
19 | #. Copy this ``libsodium.dll`` file and paste it in ``C:\Windows\system32``
20 |
21 | MacOS
22 | -----
23 |
24 | Homebrew can be used to install libsodium:
25 |
26 | .. code-block:: bash
27 |
28 | brew install libsodium
29 |
30 | For details, check `here `_.
31 |
--------------------------------------------------------------------------------
/doc/reference/keys.rst:
--------------------------------------------------------------------------------
1 | Key generation options
2 | ======================
3 |
4 | The ``ipv8/keyvault/crypto.py`` file contains the main public key cryptography class for IPv8: ``ECCrypto``.
5 | It allows you to generate the following keys:
6 |
7 |
8 | .. csv-table:: Available curves for key generation
9 | :header: "name", "curve", "backend"
10 | :widths: 20, 20, 20
11 |
12 | "very-low", "SECT163K1", "M2Crypto"
13 | "low", "SECT233K1", "M2Crypto"
14 | "medium", "SECT409K1", "M2Crypto"
15 | "high", "SECT571R1", "M2Crypto"
16 | "curve25519", "EC25519", "Libsodium"
17 |
18 |
19 | The ``M2Crypto`` backend keys do not actually use the ``M2Crypto`` backend, but use a ``python-cryptography`` backend.
20 | These ``M2Crypto`` curves are supported for backwards compatibility with the Dispersy project.
21 | For new applications, only the ``curve25519`` should be used.
22 |
23 | Generally you will create either a new ``ECCrypto`` instance (if you wish to modify or extend the base cryptography) or use the default ``default_eccrypto`` instance.
24 | The following methods are most commonly used:
25 |
26 | - ``generate_key()``: generate a new key from a given curve name.
27 | - ``key_to_bin()``: serialize a given key into a string.
28 | - ``key_from_private_bin()``: load a private key from a string.
29 | - ``key_from_public_bin()``: load a public key from a string.
30 |
31 | The following methods will usually be handled by IPv8 internally:
32 |
33 | - ``key_to_hash()``: convert a key into a ``sha1`` string (usually accessed through ``Peer.mid``).
34 | - ``create_signature()``: create a signature for some data (usually handled by the ``Community`` class).
35 | - ``is_valid_signature()``: checks a signature for validity (usually handled by the ``Community`` class).
36 |
--------------------------------------------------------------------------------
/doc/reference/peer_discovery.rst:
--------------------------------------------------------------------------------
1 |
2 | Peer discovery basics
3 | =====================
4 | All IPv8 overlays have 4 messages in common: introduction-request, introduction-response, puncture-request, and puncture. These 4 messages are used for peer discovery and NAT puncturing.
5 |
6 | The peer discovery protocol runs the following steps in a loop until enough peers have been found:
7 |
8 | 1. Peer A sends an introduction-request to peer B. Peer B is chosen from an existing pool of neighboring peers.
9 | 2. Peer B sends an introduction-response to peer A containing the address of peer C.
10 | 3. Peer B sends a puncture-request to peer C containing the address of peer A.
11 | 4. Peer C sends a puncture to peer A, puncturing its NAT.
12 |
13 | .. image:: ./resources/ipv8_peer_discovery.png
14 | :target: ./resources/ipv8_peer_discovery.png
15 | :alt: The IPv8 peer discovery protocol
16 | :align: center
17 |
18 | When a peer doesn't yet have a list of neighboring peers, it will select a bootstrap server for peer B. IPv8 bootstrap servers implement the same peer discovery protocol as ordinary peers, except that they respond to introduction-requests for *any* overlay. Once a peer sends an introduction-request to a bootstrap server, the bootstrap server will keep track of the sender and the overlay within which the introduction-request was sent. When sending introduction-responses, the bootstrap server will pick a peer from this list as an introduction candidate (peer C in the image above).
19 | Periodically, the bootstrap server will send an introduction-request for a random peer in the list. If the peer doesn't respond with an introduction-response, the bootstrap server will assume that the unresponsive peer is no longer interested in new peers and update its list accordingly.
20 |
--------------------------------------------------------------------------------
/doc/reference/resources/ipv8_peer_discovery.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/doc/reference/resources/ipv8_peer_discovery.png
--------------------------------------------------------------------------------
/doc/reference/serialization_1.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from dataclasses import dataclass
4 |
5 | from ipv8.messaging.lazy_payload import VariablePayload, vp_compile
6 | from ipv8.messaging.payload import Payload
7 | from ipv8.messaging.payload_dataclass import DataClassPayload, type_from_format
8 | from ipv8.messaging.serialization import Serializable
9 |
10 |
11 | class MySerializable(Serializable):
12 | format_list = ['I', 'H']
13 |
14 | def __init__(self, field1: int, field2: int) -> None:
15 | self.field1 = field1
16 | self.field2 = field2
17 |
18 | def to_pack_list(self) -> list[tuple]:
19 | return [('I', self.field1),
20 | ('H', self.field2)]
21 |
22 | @classmethod
23 | def from_unpack_list(cls: type[MySerializable],
24 | field1: int, field2: int) -> MySerializable:
25 | return cls(field1, field2)
26 |
27 |
28 | class MyPayload(Payload):
29 | format_list = ['I', 'H']
30 |
31 | def __init__(self, field1: int, field2: int) -> None:
32 | self.field1 = field1
33 | self.field2 = field2
34 |
35 | def to_pack_list(self) -> list[tuple]:
36 | return [('I', self.field1),
37 | ('H', self.field2)]
38 |
39 | @classmethod
40 | def from_unpack_list(cls: type[MyPayload],
41 | field1: int, field2: int) -> MyPayload:
42 | return cls(field1, field2)
43 |
44 |
45 | class MyVariablePayload(VariablePayload):
46 | format_list = ['I', 'H']
47 | names = ['field1', 'field2']
48 |
49 |
50 | @vp_compile
51 | class MyCVariablePayload(VariablePayload):
52 | format_list = ['I', 'H']
53 | names = ['field1', 'field2']
54 |
55 |
56 | I = type_from_format('I')
57 | H = type_from_format('H')
58 |
59 |
60 | @dataclass
61 | class MyDataclassPayload(DataClassPayload):
62 | field1: I
63 | field2: H
64 |
65 |
66 | serializable1 = MySerializable(1, 2)
67 | serializable2 = MyPayload(1, 2)
68 | serializable3 = MyVariablePayload(1, 2)
69 | serializable4 = MyCVariablePayload(1, 2)
70 | serializable5 = MyDataclassPayload(1, 2)
71 |
72 | print("As string:")
73 | print(serializable1)
74 | print(serializable2)
75 | print(serializable3)
76 | print(serializable4)
77 | print(serializable5)
78 |
--------------------------------------------------------------------------------
/doc/reference/serialization_2.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from ipv8.community import Community, CommunitySettings
4 | from ipv8.lazy_community import lazy_wrapper
5 | from ipv8.messaging.lazy_payload import VariablePayload, vp_compile
6 | from ipv8.types import Peer
7 |
8 |
9 | @vp_compile
10 | class MyMessagePayload1(VariablePayload):
11 | format_list = []
12 | names = []
13 |
14 |
15 | @vp_compile
16 | class MyMessagePayload2(VariablePayload):
17 | format_list = []
18 | names = []
19 |
20 |
21 | COMMUNITY_ID = os.urandom(20)
22 |
23 |
24 | class MyCommunity(Community):
25 | community_id = COMMUNITY_ID
26 |
27 | def __init__(self, settings: CommunitySettings) -> None:
28 | super().__init__(settings)
29 |
30 | self.add_message_handler(1, self.on_message)
31 |
32 | @lazy_wrapper(MyMessagePayload1, MyMessagePayload2)
33 | def on_message(self, peer: Peer, payload1: MyMessagePayload1,
34 | payload2: MyMessagePayload2) -> None:
35 | print("Got a message from:", peer)
36 | print("The message includes the first payload:\n", payload1)
37 | print("The message includes the second payload:\n", payload2)
38 |
39 | def send_message(self, peer: Peer) -> None:
40 | packet = self.ezr_pack(1, MyMessagePayload1(), MyMessagePayload2())
41 | self.endpoint.send(peer.address, packet)
42 |
--------------------------------------------------------------------------------
/doc/reference/serialization_3.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from ipv8.community import Community, CommunitySettings
4 | from ipv8.lazy_community import lazy_wrapper
5 | from ipv8.messaging.lazy_payload import VariablePayload, vp_compile
6 | from ipv8.types import Peer
7 |
8 |
9 | @vp_compile
10 | class MyMessage1(VariablePayload):
11 | msg_id = 1
12 | format_list = ['I']
13 | names = ['unsigned_integer_field']
14 |
15 |
16 | @vp_compile
17 | class MyMessage2(VariablePayload):
18 | msg_id = 2
19 | format_list = ['I']
20 | names = ['unsigned_integer_field']
21 |
22 |
23 | COMMUNITY_ID = os.urandom(20)
24 |
25 |
26 | class MyCommunity(Community):
27 | community_id = COMMUNITY_ID
28 |
29 | def __init__(self, settings: CommunitySettings) -> None:
30 | super().__init__(settings)
31 |
32 | self.add_message_handler(MyMessage1, self.on_message1)
33 | self.add_message_handler(MyMessage2, self.on_message2)
34 |
35 | @lazy_wrapper(MyMessage1)
36 | def on_message1(self, peer: Peer, payload: MyMessage1) -> None:
37 | print("Got a message from:", peer)
38 | print("The message includes the first payload:\n", payload)
39 |
40 | @lazy_wrapper(MyMessage2)
41 | def on_message2(self, peer: Peer, payload: MyMessage2) -> None:
42 | print("Got a message from:", peer)
43 | print("The message includes the first payload:\n", payload)
44 |
45 | def send_message1(self, peer: Peer) -> None:
46 | packet = self.ezr_pack(MyMessage1.msg_id, MyMessage1(42))
47 | self.endpoint.send(peer.address, packet)
48 |
49 | def send_message2(self, peer: Peer) -> None:
50 | packet = self.ezr_pack(MyMessage2.msg_id, MyMessage2(7))
51 | self.endpoint.send(peer.address, packet)
52 |
53 | def better_send_message_1(self, peer: Peer) -> None:
54 | self.ez_send(peer, MyMessage1(42))
55 |
56 | def better_send_message_2(self, peer: Peer) -> None:
57 | self.ez_send(peer, MyMessage2(7))
58 |
--------------------------------------------------------------------------------
/doc/reference/serialization_4.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | from dataclasses import dataclass
5 | from typing import cast
6 |
7 | from ipv8.messaging.lazy_payload import VariablePayload, vp_compile
8 | from ipv8.messaging.payload_dataclass import DataClassPayload
9 | from ipv8.messaging.serialization import default_serializer
10 |
11 |
12 | @vp_compile
13 | class VPMessageKeepDict(VariablePayload):
14 | msg_id = 1
15 | format_list = ['varlenH']
16 | names = ["dictionary"]
17 |
18 | def fix_pack_dictionary(self, the_dictionary: dict) -> bytes:
19 | return json.dumps(the_dictionary).encode()
20 |
21 | @classmethod
22 | def fix_unpack_dictionary(cls: type[VPMessageKeepDict],
23 | serialized_dictionary: bytes) -> dict:
24 | return json.loads(serialized_dictionary.decode())
25 |
26 |
27 | @dataclass
28 | class DCMessageKeepDict(DataClassPayload[2]):
29 | dictionary: str
30 |
31 | def fix_pack_dictionary(self, the_dictionary: dict) -> str:
32 | return json.dumps(the_dictionary)
33 |
34 | @classmethod
35 | def fix_unpack_dictionary(cls: type[DCMessageKeepDict],
36 | serialized_dictionary: str) -> dict:
37 | return json.loads(serialized_dictionary)
38 |
39 |
40 | data = {"1": 1, "key": "value"}
41 |
42 | message1 = VPMessageKeepDict(data)
43 | message2 = DCMessageKeepDict(data)
44 |
45 | assert message1.dictionary["1"] == 1
46 | assert message1.dictionary["key"] == "value"
47 | assert cast(dict, message2.dictionary)["1"] == 1
48 | assert cast(dict, message2.dictionary)["key"] == "value"
49 |
50 | serialized1 = default_serializer.pack_serializable(message1)
51 | serialized2 = default_serializer.pack_serializable(message2)
52 |
53 | assert serialized1 == serialized2
54 |
55 | unserialized1, _ = default_serializer.unpack_serializable(VPMessageKeepDict, serialized1)
56 | unserialized2, _ = default_serializer.unpack_serializable(DCMessageKeepDict, serialized2)
57 |
58 | assert unserialized1.dictionary["1"] == 1
59 | assert unserialized1.dictionary["key"] == "value"
60 | assert unserialized2.dictionary["1"] == 1
61 | assert unserialized2.dictionary["key"] == "value"
62 |
--------------------------------------------------------------------------------
/doc/reference/serialization_5.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | import struct
4 | from asyncio import Event, run
5 | from typing import Any
6 |
7 | from ipv8.community import Community, CommunitySettings
8 | from ipv8.configuration import ConfigBuilder, Strategy, WalkerDefinition, default_bootstrap_defs
9 | from ipv8.lazy_community import lazy_wrapper
10 | from ipv8.messaging.lazy_payload import VariablePayload, vp_compile
11 | from ipv8.messaging.serialization import Packer, Serializer
12 | from ipv8.types import Peer
13 | from ipv8_service import IPv8
14 |
15 |
16 | @vp_compile
17 | class Message(VariablePayload):
18 | msg_id = 1
19 | format_list = ['json', 'json', 'json', 'json']
20 | names = ["d1", "d2", "d3", "d4"]
21 |
22 |
23 | class PackerJSON(Packer):
24 |
25 | def pack(self, data: Any) -> bytes:
26 | packed = json.dumps(data).encode()
27 | size = struct.pack(">H", len(packed))
28 | return size + packed
29 |
30 | def unpack(self, data: bytes, offset: int,
31 | unpack_list: list, *args: Any) -> int:
32 | size, = struct.unpack_from(">H", data, offset)
33 |
34 | json_data_start = offset + 2
35 | json_data_end = json_data_start + size
36 |
37 | serialized = data[json_data_start:json_data_end]
38 | unpack_list.append(json.loads(serialized))
39 |
40 | return json_data_end
41 |
42 |
43 | class MyCommunity(Community):
44 |
45 | def get_serializer(self) -> Serializer:
46 | serializer = super().get_serializer()
47 | serializer.add_packer('json', PackerJSON())
48 | return serializer
49 |
50 | community_id = os.urandom(20)
51 |
52 | def __init__(self, settings: CommunitySettings) -> None:
53 | super().__init__(settings)
54 | self.event = None
55 | self.add_message_handler(Message, self.on_message)
56 |
57 | @lazy_wrapper(Message)
58 | def on_message(self, peer: Peer, message: Message) -> None:
59 | self.logger.info(str(peer))
60 | self.logger.info(str(message))
61 |
62 | assert message.d4 == 1337 # Check d4 here to make sure this is not some magic temporary serialization.
63 |
64 | if self.event:
65 | self.event.set()
66 |
67 | def started(self, event: Event, peer_id: int) -> None:
68 | self.event = event
69 |
70 | async def send_message() -> None:
71 | for p in self.get_peers():
72 | message = Message(
73 | {"a": "b", "c": "d"},
74 | {"e": "f", "g": "h"},
75 | ["i", "j", "k", "l"],
76 | 42
77 | )
78 | message.d4 = 1337 # Overwrite 42 here to make sure this is not some magic temporary serialization.
79 | self.ez_send(p, message)
80 |
81 | if peer_id == 1:
82 | self.register_task("Start Sending Messages", send_message, interval=2.0, delay=0)
83 |
84 |
85 | async def start_communities() -> None:
86 | event = Event()
87 |
88 | for i in [1, 2]:
89 | builder = ConfigBuilder().clear_keys().clear_overlays()
90 | builder.add_key("my peer", "medium", f"ec{i}.pem")
91 | builder.add_overlay("MyCommunity", "my peer", [WalkerDefinition(Strategy.RandomWalk, 10, {'timeout': 3.0})],
92 | default_bootstrap_defs, {}, [("started", event, i)])
93 | ipv8 = IPv8(builder.finalize(), extra_communities={'MyCommunity': MyCommunity})
94 | await ipv8.start()
95 |
96 | await event.wait()
97 |
98 |
99 | run(start_communities())
100 |
--------------------------------------------------------------------------------
/doc/reference/serialization_6.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os
3 | from asyncio import Event, run
4 | from binascii import hexlify, unhexlify
5 |
6 | from ipv8.community import Community, CommunitySettings
7 | from ipv8.configuration import ConfigBuilder, Strategy, WalkerDefinition, default_bootstrap_defs
8 | from ipv8.types import Address, Peer
9 | from ipv8_service import IPv8
10 |
11 |
12 | def to_hex(bstr: bytes) -> str:
13 | return hexlify(bstr).decode()
14 |
15 |
16 | class MyCommunity(Community):
17 | community_id = os.urandom(20)
18 |
19 | def __init__(self, settings: CommunitySettings) -> None:
20 | super().__init__(settings)
21 | self.event = None
22 | self.add_message_handler(1, self.on_message)
23 |
24 | def send_message(self, peer: Peer) -> None:
25 | message = json.dumps({"key": "value", "key2": "value2"})
26 | public_key = to_hex(self.my_peer.public_key.key_to_bin())
27 | signature = to_hex(self.my_peer.key.signature(message.encode()))
28 |
29 | signed_message = json.dumps({"message": message,
30 | "public_key": public_key,
31 | "signature": signature}).encode()
32 | self.endpoint.send(peer.address,
33 | self.get_prefix() + b'\x01' + signed_message)
34 |
35 | def on_message(self, source_address: Address, data: bytes) -> None:
36 | # Account for 1 byte message id
37 | header_length = len(self.get_prefix()) + 1
38 | # Strip the IPv8 multiplexing data
39 | received = json.loads(data[header_length:])
40 |
41 | public_key = self.crypto.key_from_public_bin(unhexlify(received["public_key"]))
42 | valid = self.crypto.is_valid_signature(public_key,
43 | received["message"].encode(),
44 | unhexlify(received["signature"]))
45 | self.logger.info("Received message %s from %s, the signature is %s!",
46 | received['message'], source_address, valid)
47 |
48 | if self.event:
49 | self.event.set()
50 |
51 | def started(self, event: Event, peer_id: int) -> None:
52 | self.event = event
53 |
54 | async def send_message() -> None:
55 | for p in self.get_peers():
56 | self.send_message(p)
57 |
58 | if peer_id == 1:
59 | self.register_task("Start Sending Messages", send_message, interval=2.0, delay=0)
60 |
61 |
62 | async def start_communities() -> None:
63 | event = Event()
64 |
65 | for i in [1, 2]:
66 | builder = ConfigBuilder().clear_keys().clear_overlays()
67 | builder.add_key("my peer", "medium", f"ec{i}.pem")
68 | builder.add_overlay("MyCommunity", "my peer", [WalkerDefinition(Strategy.RandomWalk, 10, {'timeout': 3.0})],
69 | default_bootstrap_defs, {}, [("started", event, i)])
70 | ipv8 = IPv8(builder.finalize(), extra_communities={'MyCommunity': MyCommunity})
71 | await ipv8.start()
72 |
73 | await event.wait()
74 |
75 |
76 | run(start_communities())
77 |
--------------------------------------------------------------------------------
/doc/reference/serialization_7.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 |
3 | from ipv8.messaging.lazy_payload import VariablePayload
4 | from ipv8.messaging.payload_dataclass import DataClassPayload
5 |
6 |
7 | class A(VariablePayload):
8 | format_list = ['I', 'H']
9 | names = ["foo", "bar"]
10 |
11 |
12 | class B(VariablePayload):
13 | format_list = [A, 'H'] # Note that we pass the class A
14 | names = ["a", "baz"]
15 |
16 |
17 | @dataclass
18 | class Message(DataClassPayload[1]):
19 | @dataclass
20 | class Item:
21 | foo: int
22 | bar: int
23 |
24 | item: Item
25 | items: [Item] # Yes, you can even make this a list!
26 | baz: int
27 |
--------------------------------------------------------------------------------
/doc/requirements.txt:
--------------------------------------------------------------------------------
1 | astroid>=3.0.0
2 | sphinx-autoapi
3 | sphinx-rtd-theme>=1.3.0
4 |
--------------------------------------------------------------------------------
/doc/resources/healthy_IPv8_overlay_collection.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/doc/resources/healthy_IPv8_overlay_collection.png
--------------------------------------------------------------------------------
/ipv8/REST/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/REST/__init__.py
--------------------------------------------------------------------------------
/ipv8/REST/base_endpoint.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import json
4 | import logging
5 | from collections.abc import Awaitable, Iterable
6 | from typing import Any, Callable, Generic, TypeVar
7 |
8 | from aiohttp import web
9 | from aiohttp.abc import Request, StreamResponse
10 | from aiohttp.typedefs import Handler, LooseHeaders
11 |
12 | HTTP_BAD_REQUEST = 400
13 | HTTP_UNAUTHORIZED = 401
14 | HTTP_NOT_FOUND = 404
15 | HTTP_CONFLICT = 409
16 | HTTP_PRECONDITION_FAILED = 412
17 | HTTP_INTERNAL_SERVER_ERROR = 500
18 |
19 | DEFAULT_HEADERS: dict[str, str] = {}
20 |
21 | T = TypeVar('T')
22 | MiddleWaresType = Iterable[Callable[[Request, Handler], Awaitable[StreamResponse]]]
23 |
24 |
25 | class BaseEndpoint(Generic[T]):
26 | """
27 | Base class for all REST endpoints.
28 | """
29 |
30 | def __init__(self, middlewares: MiddleWaresType = ()) -> None:
31 | """
32 | Create new unregistered and uninitialized REST endpoint.
33 | """
34 | self._logger = logging.getLogger(self.__class__.__name__)
35 | self.app = web.Application(middlewares=middlewares)
36 | self.session: T | None = None
37 | self.endpoints: dict[str, BaseEndpoint] = {}
38 | self.setup_routes()
39 |
40 | def setup_routes(self) -> None:
41 | """
42 | Register the names to make this endpoint callable.
43 | """
44 |
45 | def initialize(self, session: T) -> None:
46 | """
47 | Initialize this endpoint for the given session instance.
48 | """
49 | self.session = session
50 | for endpoint in self.endpoints.values():
51 | endpoint.initialize(session)
52 |
53 | def add_endpoint(self, prefix: str, endpoint: BaseEndpoint) -> None:
54 | """
55 | Add a new child endpoint to this endpoint.
56 | """
57 | self.endpoints[prefix] = endpoint
58 | self.app.add_subapp(prefix, endpoint.app)
59 |
60 |
61 | class Response(web.Response):
62 | """
63 | A convenience class to auto-encode response bodies in JSON format.
64 | """
65 |
66 | def __init__(self, body: Any = None, headers: LooseHeaders | None = None, # noqa: ANN401
67 | content_type: str | None = None, status: int = 200, **kwargs) -> None:
68 | """
69 | Create the response.
70 | """
71 | if isinstance(body, (dict, list)):
72 | body = json.dumps(body)
73 | content_type = 'application/json'
74 | super().__init__(body=body, headers=headers or DEFAULT_HEADERS, content_type=content_type, status=status,
75 | **kwargs)
76 |
--------------------------------------------------------------------------------
/ipv8/REST/network_endpoint.py:
--------------------------------------------------------------------------------
1 | from base64 import b64encode
2 | from typing import cast
3 |
4 | from aiohttp import web
5 | from aiohttp.abc import Request
6 | from aiohttp_apispec import docs
7 | from marshmallow.fields import Integer, List, String
8 |
9 | from ..types import IPv8
10 | from .base_endpoint import BaseEndpoint, Response
11 | from .schema import schema
12 |
13 |
14 | class NetworkEndpoint(BaseEndpoint[IPv8]):
15 | """
16 | This endpoint is responsible for handing all requests regarding the state of the network.
17 | """
18 |
19 | def setup_routes(self) -> None:
20 | """
21 | Register the names to make this endpoint callable.
22 | """
23 | self.app.add_routes([web.get('', self.retrieve_peers)])
24 |
25 | @docs(
26 | tags=["Network"],
27 | summary="Return a list of all known peers.",
28 | responses={
29 | 200: {
30 | "schema": schema(PeersResponse={
31 | "peers": [schema(Peer={
32 | "ip": String,
33 | "port": Integer,
34 | "public_key": String,
35 | "services": List(String),
36 | })]
37 | })
38 | }
39 | }
40 | )
41 | async def retrieve_peers(self, _: Request) -> Response:
42 | """
43 | Return a list of all known peers.
44 | """
45 | if self.session is None:
46 | return Response({"peers": {}})
47 | self.session = cast(IPv8, self.session)
48 |
49 | network = self.session.network
50 | peer_list = network.verified_peers
51 | return Response({"peers": {
52 | b64encode(peer.mid).decode('utf-8'): {
53 | "ip": peer.address[0],
54 | "port": peer.address[1],
55 | "public_key": b64encode(peer.public_key.key_to_bin()).decode('utf-8'),
56 | "services": [b64encode(s).decode('utf-8') for s in network.get_services_for_peer(peer)]
57 | }
58 | for peer in peer_list
59 | }})
60 |
--------------------------------------------------------------------------------
/ipv8/REST/noblock_dht_endpoint.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from asyncio import ensure_future
3 | from binascii import hexlify, unhexlify
4 |
5 | from aiohttp import web
6 | from aiohttp.abc import Request
7 | from aiohttp_apispec import docs
8 |
9 | from ..dht import DHTError
10 | from ..dht.community import DHTCommunity
11 | from ..types import IPv8
12 | from .base_endpoint import HTTP_NOT_FOUND, BaseEndpoint, Response
13 | from .schema import DefaultResponseSchema
14 |
15 |
16 | class NoBlockDHTEndpoint(BaseEndpoint):
17 | """
18 | This endpoint is responsible for handling requests for DHT data, non-blocking.
19 | """
20 |
21 | def __init__(self) -> None:
22 | """
23 | Create a REST endpoint for all non-blocking calls to the DHT overlay.
24 | """
25 | super().__init__()
26 | self.dht = None
27 |
28 | def setup_routes(self) -> None:
29 | """
30 | Register the names to make this endpoint callable.
31 | """
32 | self.app.add_routes([web.get('/{mid}', self.handle_get)])
33 |
34 | def initialize(self, session: IPv8) -> None:
35 | """
36 | Initialize this endpoint.
37 | """
38 | super().initialize(session)
39 | self.dht = session.get_overlay(DHTCommunity)
40 |
41 | @docs(
42 | tags=["DHT"],
43 | summary="Connect to a peer through the DHT.",
44 | parameters=[{
45 | 'in': 'path',
46 | 'name': 'mid',
47 | 'description': 'The mid (i.e., sha1(public_key)) of the peer to connect to.',
48 | 'type': 'string',
49 | 'required': True
50 | }],
51 | responses={
52 | 200: {
53 | "schema": DefaultResponseSchema,
54 | "examples": {'Success': {"success": True}}
55 | },
56 | HTTP_NOT_FOUND: {
57 | "schema": DefaultResponseSchema,
58 | "examples": {'DHT not loaded': {"success": False, "error": "DHT community not found"}}
59 | }
60 | }
61 | )
62 | async def handle_get(self, request: Request) -> Response:
63 | """
64 | Handle a GET request.
65 | """
66 | if not self.dht:
67 | return Response({"error": "DHT community not found"}, status=HTTP_NOT_FOUND)
68 |
69 | mid = unhexlify(request.match_info['mid'])
70 |
71 | async def connect_peer() -> None:
72 | try:
73 | self.dht.connect_peer(mid)
74 | except DHTError:
75 | logging.exception("DHT Failed to connect to %s", hexlify(mid))
76 | else:
77 | logging.exception("DHT connected to %s", hexlify(mid))
78 |
79 | ensure_future(connect_peer()) # noqa: RUF006
80 | return Response({"success": True})
81 |
--------------------------------------------------------------------------------
/ipv8/REST/root_endpoint.py:
--------------------------------------------------------------------------------
1 | from .asyncio_endpoint import AsyncioEndpoint
2 | from .base_endpoint import BaseEndpoint
3 | from .dht_endpoint import DHTEndpoint
4 | from .identity_endpoint import IdentityEndpoint
5 | from .isolation_endpoint import IsolationEndpoint
6 | from .network_endpoint import NetworkEndpoint
7 | from .noblock_dht_endpoint import NoBlockDHTEndpoint
8 | from .overlays_endpoint import OverlaysEndpoint
9 | from .tunnel_endpoint import TunnelEndpoint
10 |
11 |
12 | class RootEndpoint(BaseEndpoint):
13 | """
14 | The root endpoint of the HTTP API is the root resource in the request tree.
15 | It will dispatch requests regarding torrents, channels, settings etc to the right child endpoint.
16 | """
17 |
18 | def setup_routes(self) -> None:
19 | """
20 | Register the names to make this endpoint callable.
21 | """
22 | endpoints = {'/asyncio': AsyncioEndpoint,
23 | '/dht': DHTEndpoint,
24 | '/identity': IdentityEndpoint,
25 | '/isolation': IsolationEndpoint,
26 | '/network': NetworkEndpoint,
27 | '/noblockdht': NoBlockDHTEndpoint,
28 | '/overlays': OverlaysEndpoint,
29 | '/tunnel': TunnelEndpoint}
30 | for path, ep_cls in endpoints.items():
31 | self.add_endpoint(path, ep_cls())
32 |
--------------------------------------------------------------------------------
/ipv8/REST/schema.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import cast
4 |
5 | from marshmallow import Schema
6 | from marshmallow.fields import Boolean, Integer, List, Nested, String
7 | from marshmallow.schema import SchemaMeta
8 |
9 |
10 | class DefaultResponseSchema(Schema):
11 | """
12 | Every response contains its sucess status and optionally the error that occurred.
13 | """
14 |
15 | success = Boolean(metadata={"description": "Indicator of success/failure"}, required=True)
16 | error = String(metadata={"description": "Optional field describing any failures that may have occurred"},
17 | required=False)
18 |
19 |
20 | class Address(Schema):
21 | """
22 | The schema for address information.
23 | """
24 |
25 | ip = String()
26 | port = Integer()
27 |
28 |
29 | class AddressWithPK(Address):
30 | """
31 | The schema for addresses that have a public key.
32 | """
33 |
34 | public_key = String()
35 |
36 |
37 | class OverlayStatisticsSchema(Schema):
38 | """
39 | The schema for overlay statistics.
40 | """
41 |
42 | num_up = Integer()
43 | num_down = Integer()
44 | bytes_up = Integer()
45 | bytes_down = Integer()
46 | diff_time = Integer()
47 |
48 |
49 | class OverlayStrategySchema(Schema):
50 | """
51 | The schema describing discovery strategies for overlays.
52 | """
53 |
54 | name = String()
55 | target_peers = Integer()
56 |
57 |
58 | class OverlaySchema(Schema):
59 | """
60 | The schema to describe overlays.
61 | """
62 |
63 | id = String()
64 | my_peer = String()
65 | global_time = Integer()
66 | peers = List(Nested(cast("Schema", AddressWithPK)))
67 | overlay_name = String()
68 | max_peers = Integer()
69 | is_isolated = Boolean()
70 | my_estimated_wan = Nested(cast("Schema", Address))
71 | my_estimated_lan = Nested(cast("Schema", Address))
72 | strategies = List(Nested(cast("Schema", OverlayStrategySchema)))
73 | statistics = Nested(cast("Schema", OverlayStatisticsSchema))
74 |
75 |
76 | class DHTValueSchema(Schema):
77 | """
78 | The schema to describe values in the DHT.
79 | """
80 |
81 | public_key = String()
82 | key = String()
83 | value = String()
84 |
85 |
86 | def schema(**kwargs) -> Schema:
87 | """
88 | Create a schema. Mostly useful for creating single-use schemas on-the-fly.
89 | """
90 | items = list(kwargs.items())
91 | if len(items) != 1 or not isinstance(items[0][1], dict):
92 | msg = "schema required 1 keyword argument of type dict"
93 | raise RuntimeError(msg)
94 | name, spec = items[0]
95 |
96 | schema_dict: dict[str, Nested | List] = {}
97 | for key, value in spec.items():
98 | cls, description = value if isinstance(value, tuple) else (value, None)
99 | required = key.endswith('*')
100 | key = key.rstrip('*') # noqa: PLW2901
101 | kwargs = {'required': required, "metadata": {"description": description}}
102 |
103 | if isinstance(cls, SchemaMeta):
104 | schema_dict[key] = Nested(cast("Schema", cls), required=required)
105 | elif isinstance(cls, list) and len(cls) == 1:
106 | cls = cls[0]
107 | schema_dict[key] = List(Nested(cast("Schema", cls)), **kwargs) if isinstance(cls, SchemaMeta) else List(cls, **kwargs)
108 | else:
109 | schema_dict[key] = cls.__call__(**kwargs) if callable(cls) else cls
110 | return cast("Schema", type(name, (Schema,), schema_dict))
111 |
--------------------------------------------------------------------------------
/ipv8/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/__init__.py
--------------------------------------------------------------------------------
/ipv8/attestation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/attestation/__init__.py
--------------------------------------------------------------------------------
/ipv8/attestation/default_identity_formats.py:
--------------------------------------------------------------------------------
1 | FORMATS = {
2 | "id_metadata": {
3 | "algorithm": "bonehexact",
4 | "key_size": 32, # Pairings over 1024 bit space
5 | "hash": "sha256_4" # 4 byte hash
6 | },
7 | "id_metadata_big": {
8 | "algorithm": "bonehexact",
9 | "key_size": 64, # Pairings over 4096 bit space
10 | "hash": "sha256" # 32 byte hash
11 | },
12 | "id_metadata_huge": {
13 | "algorithm": "bonehexact",
14 | "key_size": 96, # Pairings over 9216 bit space
15 | "hash": "sha512" # 64 byte hash
16 | },
17 | "id_metadata_range_18plus": {
18 | "algorithm": "pengbaorange",
19 | "key_size": 32, # Pairings over 1024 bit space
20 | "min": 18,
21 | "max": 200
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/ipv8/attestation/identity/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/attestation/identity/__init__.py
--------------------------------------------------------------------------------
/ipv8/attestation/identity/attestation.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import binascii
4 | import struct
5 | from typing import TYPE_CHECKING
6 |
7 | from ..signed_object import AbstractSignedObject
8 |
9 | if TYPE_CHECKING:
10 | from ...types import Metadata, PrivateKey, PublicKey
11 |
12 |
13 | class Attestation(AbstractSignedObject):
14 | """
15 | A pointer to Metadata.
16 |
17 | An Attestation does not and should not contain an index.
18 | An Attestation does not and should not contain a reference to the public key (directly).
19 | """
20 |
21 | def __init__(self,
22 | metadata_pointer: bytes,
23 | private_key: PrivateKey | None = None,
24 | signature: bytes | None = None) -> None:
25 | """
26 | Create a new business-layer attestation (not the actual crypto attestation).
27 | """
28 | self.metadata_pointer = metadata_pointer
29 | super().__init__(private_key, signature)
30 |
31 | def get_plaintext(self) -> bytes:
32 | """
33 | Convert to bytes.
34 | """
35 | return self.metadata_pointer
36 |
37 | @classmethod
38 | def unserialize(cls: type[Attestation], data: bytes, public_key: PublicKey, offset: int = 0) -> Attestation:
39 | """
40 | Read from bytes.
41 | """
42 | sig_len = public_key.get_signature_length()
43 | metadata_pointer, signature = struct.unpack_from(f">32s{sig_len}s", data, offset=offset)
44 | return Attestation(metadata_pointer, signature=signature)
45 |
46 | @classmethod
47 | def create(cls: type[Attestation], metadata: Metadata, private_key: PrivateKey) -> Attestation:
48 | """
49 | Create an attestation for given metadata using our key.
50 | """
51 | return Attestation(metadata.get_hash(), private_key=private_key)
52 |
53 | def to_database_tuple(self) -> tuple[bytes, bytes]:
54 | """
55 | Get a representation of this Attestation as two byte strings (metadata hash and signature).
56 |
57 | :returns: the two byte strings for database insertion.
58 | """
59 | return self.metadata_pointer, self.signature
60 |
61 | @classmethod
62 | def from_database_tuple(cls: type[Attestation],
63 | metadata_pointer: bytes,
64 | signature: bytes) -> Attestation:
65 | """
66 | Create a Token from a two-byte-string representation (metadata hash and signature).
67 |
68 | :param metadata_pointer: the hash of the Attestation.
69 | :param signature: the signature over the plaintext Attestation.
70 | """
71 | return Attestation(metadata_pointer, signature=signature)
72 |
73 | def __str__(self) -> str:
74 | """
75 | Convert this attestation to a human-readable string.
76 | """
77 | return f"Attestation({binascii.hexlify(self.metadata_pointer).decode()})"
78 |
--------------------------------------------------------------------------------
/ipv8/attestation/identity/payload.py:
--------------------------------------------------------------------------------
1 | from ...messaging.lazy_payload import VariablePayloadWID, vp_compile
2 |
3 |
4 | @vp_compile
5 | class DiclosePayload(VariablePayloadWID):
6 | """
7 | A payload used for disclosure of identity meta information.
8 | """
9 |
10 | msg_id = 1
11 | format_list = ['varlenH', 'varlenH', 'varlenH', 'varlenH']
12 | names = ['metadata', 'tokens', 'attestations', 'authorities']
13 |
14 | metadata: bytes
15 | tokens: bytes
16 | attestations: bytes
17 | authorities: bytes
18 |
19 |
20 | @vp_compile
21 | class AttestPayload(VariablePayloadWID):
22 | """
23 | A payload used for attestation.
24 | """
25 |
26 | msg_id = 2
27 | format_list = ['varlenH']
28 | names = ['attestation']
29 |
30 | attestation: bytes
31 |
32 |
33 | @vp_compile
34 | class RequestMissingPayload(VariablePayloadWID):
35 | """
36 | A payload used to request missing identity meta information.
37 | """
38 |
39 | msg_id = 3
40 | format_list = ['I']
41 | names = ['known']
42 |
43 | known: int
44 |
45 |
46 | @vp_compile
47 | class MissingResponsePayload(VariablePayloadWID):
48 | """
49 | A payload to respond with missing identity meta information.
50 | """
51 |
52 | msg_id = 4
53 | format_list = ['raw']
54 | names = ['tokens']
55 |
56 | tokens: bytes
57 |
--------------------------------------------------------------------------------
/ipv8/attestation/schema/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/attestation/schema/__init__.py
--------------------------------------------------------------------------------
/ipv8/attestation/tokentree/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/attestation/tokentree/__init__.py
--------------------------------------------------------------------------------
/ipv8/attestation/wallet/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/attestation/wallet/__init__.py
--------------------------------------------------------------------------------
/ipv8/attestation/wallet/bonehexact/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/attestation/wallet/bonehexact/__init__.py
--------------------------------------------------------------------------------
/ipv8/attestation/wallet/pengbaorange/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/attestation/wallet/pengbaorange/__init__.py
--------------------------------------------------------------------------------
/ipv8/attestation/wallet/pengbaorange/attestation.py:
--------------------------------------------------------------------------------
1 | from binascii import hexlify
2 | from math import sqrt
3 | from os import urandom
4 |
5 | from ..primitives.structs import BonehPublicKey
6 | from .boudot import EL, SQR
7 | from .structs import PengBaoAttestation, PengBaoCommitment, PengBaoCommitmentPrivate, PengBaoPublicData
8 |
9 |
10 | def _random_number(bytelen: int) -> int:
11 | """
12 | Generate a random integer of a given number of bytes.
13 | """
14 | return int(hexlify(urandom(bytelen)), 16)
15 |
16 |
17 | def create_attest_pair(PK: BonehPublicKey, # noqa: N803
18 | value: int,
19 | a: int,
20 | b: int,
21 | bitspace: int) -> PengBaoAttestation:
22 | """
23 | Create an proof that a <= value <= b, for a public key's value lying within a certain bitspace.
24 | """
25 | bytespace = bitspace // 8
26 | r = _random_number(bytespace)
27 | ra = _random_number(bytespace)
28 | raa = _random_number(bitspace // 16)
29 | raa = raa * raa
30 |
31 | w = _random_number(bytespace)
32 | w2 = w * w
33 |
34 | c = PK.g.intpow(value) * PK.h.intpow(r)
35 |
36 | c1 = c // (PK.g.intpow(a - 1))
37 | c2 = PK.g.intpow(b + 1) // c
38 | ca = c1.intpow(b - value + 1) * PK.h.intpow(ra)
39 | caa = ca.intpow(w2) * PK.h.intpow(raa)
40 |
41 | mst = w2 * (value - a + 1) * (b - value + 1)
42 | m4 = 0
43 | while not m4:
44 | m4 = _random_number(bytespace) % (int(sqrt(mst)) - 1)
45 | m3 = m4 * m4
46 | m1 = 0
47 | while not m1:
48 | m1 = _random_number(bytespace) % (mst - m4)
49 | m2 = mst - m1 - m3
50 |
51 | rst = w2 * ((b - value + 1) * r + ra) + raa
52 | r1 = 0
53 | while not r1:
54 | r1 = _random_number(bytespace * bytespace) % (rst // 2 - 1)
55 | r2 = 0
56 | while not r2:
57 | r2 = _random_number(bytespace * bytespace) % (rst // 2 - 1)
58 | r3 = rst - r1 - r2
59 |
60 | ca1 = PK.g.intpow(m1) * PK.h.intpow(r1)
61 | ca2 = PK.g.intpow(m2) * PK.h.intpow(r2)
62 | ca3 = caa // (ca1 * ca2)
63 |
64 | el = EL.create(b - value + 1, -r, ra, PK.g, PK.h, c1, PK.h, b, bitspace)
65 | sqr1 = SQR.create(w, raa, ca, PK.h, b, bitspace)
66 | sqr2 = SQR.create(m4, r3, PK.g, PK.h, b, bitspace)
67 |
68 | publicdata = PengBaoPublicData(PK, bitspace, PengBaoCommitment(c, c1, c2, ca, ca1, ca2, ca3, caa), el, sqr1, sqr2)
69 | privatedata = PengBaoCommitmentPrivate(m1, m2, m3, r1, r2, r3)
70 |
71 | return PengBaoAttestation(publicdata, privatedata)
72 |
--------------------------------------------------------------------------------
/ipv8/attestation/wallet/primitives/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/attestation/wallet/primitives/__init__.py
--------------------------------------------------------------------------------
/ipv8/attestation/wallet/primitives/attestation.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from hashlib import sha256, sha512
4 |
5 | from ....util import int2byte
6 |
7 |
8 | def to_ascii(value: str | bytes) -> bytes:
9 | """
10 | Convert any string type to bytes.
11 | """
12 | if isinstance(value, str):
13 | return b''.join(int2byte(ord(c)) for c in value)
14 | return value
15 |
16 |
17 | def sha512_as_int(value: str | bytes) -> int:
18 | """
19 | Convert a SHA512 hash to an integer.
20 | """
21 | out = 0
22 | hashed = sha512(to_ascii(value)).digest()
23 | for i in range(len(hashed)):
24 | out <<= 8
25 | out |= hashed[i]
26 | return out
27 |
28 |
29 | def sha256_as_int(value: str | bytes) -> int:
30 | """
31 | Convert a SHA256 hash to an integer.
32 | """
33 | out = 0
34 | hashed = sha256(to_ascii(value)).digest()
35 | for i in range(len(hashed)):
36 | out <<= 8
37 | out |= hashed[i]
38 | return out
39 |
40 |
41 | def sha256_4_as_int(value: str | bytes) -> int:
42 | """
43 | Convert a SHA256 4 byte hash to an integer.
44 | """
45 | out = 0
46 | hashed = sha256(to_ascii(value)).digest()[:4]
47 | for i in range(len(hashed)):
48 | out <<= 8
49 | out |= hashed[i]
50 | return out
51 |
--------------------------------------------------------------------------------
/ipv8/attestation/wallet/primitives/cryptography_wrapper.py:
--------------------------------------------------------------------------------
1 | from cryptography.hazmat.backends import default_backend
2 | from cryptography.hazmat.backends.openssl.backend import Backend
3 |
4 | # ruff: noqa: SLF001
5 |
6 |
7 | def generate_safe_prime(bit_length: int, backend: Backend = default_backend()) -> int: # noqa: B008
8 | """
9 | Generate a 'safe' prime p ((p-1)/2 is also prime).
10 |
11 | :param bit_length: the length of the generated prime in bits
12 | :type bit_length: int
13 | :param backend: the cryptography backend to use
14 | :type backend: Backend
15 | :return: the generated prime
16 | :rtype: int
17 | """
18 | generated = backend._lib.BN_new()
19 | err = backend._lib.BN_generate_prime_ex(generated, bit_length, 1,
20 | backend._ffi.NULL, backend._ffi.NULL, backend._ffi.NULL)
21 | # If the return value is 0, the generation failed
22 | if err == 0:
23 | backend._lib.BN_clear_free(generated)
24 | msg = "Failed to generate prime!"
25 | raise RuntimeError(msg)
26 | # We cannot simply convert the output to int (too long), use the hex representation and port that to int
27 | generated_hex = backend._lib.BN_bn2hex(generated)
28 | out = int(backend._ffi.string(generated_hex), 16)
29 | # Cleanup the memory
30 | backend._lib.OPENSSL_free(generated_hex)
31 | backend._lib.BN_set_word(generated, 0)
32 | backend._lib.BN_free(generated)
33 | return out
34 |
35 |
36 | def is_prime(number: int, backend: Backend = default_backend()) -> bool: # noqa: B008
37 | """
38 | Check a number for primality.
39 |
40 | :param number: the number to check for primality
41 | :type number: int
42 | :param backend: the cryptography backend to use
43 | :type backend: Backend
44 | :return: True is the n is expected to be prime, False otherwise
45 | :rtype: bool
46 | """
47 | # We cannot simply convert the output to int (too long), use the hex representation
48 | hex_n = hex(number)[2:]
49 | if hex_n.endswith('L'):
50 | hex_n = hex_n[:-1]
51 | bhex_n = hex_n.encode()
52 | generated = backend._lib.BN_new()
53 | bn_pp = backend._ffi.new("BIGNUM **", generated)
54 | err = backend._lib.BN_hex2bn(bn_pp, bhex_n)
55 | # If the return value is 0, the conversion to hex failed
56 | if err == 0:
57 | backend._lib.BN_clear_free(generated)
58 | msg = "Failed to read BIGNUM from hex string!"
59 | raise RuntimeError(msg)
60 | result = backend._lib.BN_is_prime_ex(generated, backend._lib.BN_prime_checks_for_size(int(len(bhex_n) * 8)),
61 | backend._ffi.NULL, backend._ffi.NULL)
62 | backend._lib.BN_set_word(generated, 0)
63 | backend._lib.BN_free(generated)
64 | return result == 1
65 |
--------------------------------------------------------------------------------
/ipv8/attestation/wallet/primitives/ec.py:
--------------------------------------------------------------------------------
1 | """
2 | Ported from "The Weil Pairing on Elliptic Curves and Its Cryptographic Applications" - Appendix D by Alex Edward Aftuk.
3 | """
4 | from __future__ import annotations
5 |
6 | from typing import cast
7 |
8 | from .value import FP2Value
9 |
10 | # ruff: noqa: N802,N803,N806
11 |
12 |
13 | def esum(mod: int, p: str | tuple[FP2Value, FP2Value],
14 | q: str | tuple[FP2Value, FP2Value]) -> str | tuple[FP2Value, FP2Value]:
15 | """
16 | Perform Elliptic Curve addition of points P and Q over Fp^2.
17 | """
18 | if p == "O" and q == "O":
19 | return "O"
20 | if p == "O":
21 | return q
22 | if q == "O":
23 | return p
24 | x1, y1 = cast(tuple[FP2Value, FP2Value], p)
25 | x2, y2 = cast(tuple[FP2Value, FP2Value], q)
26 | if x1 == x2 and y1 == FP2Value(mod, -1) * y2:
27 | return "O"
28 | if x1 == x2:
29 | l = ((FP2Value(mod, 3) * x1 * x1) // (FP2Value(mod, 2) * y1)).normalize()
30 | else:
31 | l = ((y1 - y2) // (x1 - x2)).normalize()
32 | x3 = l * l - x1 - x2
33 | y3 = l * (x3 - x1) + y1
34 | return x3.normalize(), (FP2Value(mod, -1) * y3).normalize()
35 |
36 |
37 | def H(mod: int, p: str | tuple[FP2Value, FP2Value], q: str | tuple[FP2Value, FP2Value], x: FP2Value, y: FP2Value) -> FP2Value:
38 | """
39 | Perform the h_{T,T} function for the Miller calculation with divisors P and Q for coordinate (x,y).
40 | """
41 | x1, y1 = cast(tuple[FP2Value, FP2Value], p)
42 | x2, y2 = cast(tuple[FP2Value, FP2Value], q)
43 | if x1 == x2 and y1 == FP2Value(mod, -1) * y2:
44 | return (x - x1).normalize()
45 | if x1 == x2 and y1 == y2:
46 | l = (FP2Value(mod, 3) * x1 * x1) // (FP2Value(mod, 2) * y1)
47 | return ((y - y1 - l * (x - x1)) // (x + (x1 + x2) - l * l)).normalize()
48 | l = (y2 - y1) // (x2 - x1)
49 | return ((y - y1 - l * (x - x1)) // (x + (x1 + x2) - l * l)).normalize()
50 |
51 |
52 | def millercalc(mod: int, M: int, p: tuple[FP2Value, FP2Value], R: tuple[FP2Value, FP2Value]) -> FP2Value:
53 | """
54 | Perform the Miller calculation for message M point P and coordinates given by R.
55 | """
56 | mlist = list(reversed([int(c) for c in str(bin(M))[2:]]))
57 | T: str | tuple[FP2Value, FP2Value] = p
58 | f = FP2Value(mod, 1)
59 | for i in reversed(list(range(len(mlist) - 1))):
60 | f = (f * f * H(mod, T, T, R[0], R[1])).normalize()
61 | T = esum(mod, T, T)
62 | if mlist[i] == 1:
63 | f = (f * H(mod, T, p, R[0], R[1])).normalize()
64 | T = esum(mod, T, p)
65 | return f
66 |
67 |
68 | def weilpairing(mod: int, m: int, P: tuple[FP2Value, FP2Value], Q: tuple[FP2Value, FP2Value],
69 | S: tuple[FP2Value, FP2Value]) -> FP2Value:
70 | """
71 | Create a Weil pairing for message m, points P and Q and DH secret S.
72 | """
73 | nS = (S[0], FP2Value(mod, -1) * S[1])
74 | A = millercalc(mod, m, P, cast(tuple[FP2Value, FP2Value], esum(mod, Q, S)))
75 | B = millercalc(mod, m, P, S)
76 | C = millercalc(mod, m, Q, cast(tuple[FP2Value, FP2Value], esum(mod, P, nS)))
77 | D = millercalc(mod, m, Q, nS)
78 | wp = ((A * D) // (B * C))
79 | return wp.wp_nominator() * wp.wp_denom_inverse()
80 |
--------------------------------------------------------------------------------
/ipv8/bootstrapping/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/bootstrapping/__init__.py
--------------------------------------------------------------------------------
/ipv8/bootstrapping/bootstrapper_interface.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from abc import ABC, abstractmethod
4 | from typing import TYPE_CHECKING
5 |
6 | if TYPE_CHECKING:
7 | from asyncio import Future
8 | from collections.abc import Coroutine, Iterable
9 |
10 | from ..types import Address, Community
11 |
12 |
13 | class Bootstrapper(ABC):
14 | """
15 | A script to connect to external bootstrapping resources, using external hardware.
16 |
17 | When not to crash:
18 | - Loading this module file when missing dependencies.
19 | - Initializing a subclass when the service is unavailable (return ``False``).
20 | - Failing to retrieve peers for a service (return an empty iterable, e.g. ``[]``).
21 |
22 | When to crash:
23 | - Initializing this module subclass when missing dependencies.
24 | """
25 |
26 | def __init__(self) -> None:
27 | """
28 | Create a new ``Bootstrapper``.
29 | """
30 | self.initialized = False
31 |
32 | @abstractmethod
33 | def initialize(self, overlay: Community) -> Future | Coroutine:
34 | """
35 | Start connecting to this bootstrapping service. Don't perform any network traffic in ``__init__``!
36 |
37 | You are encourages to implement this method as non-async to have faster bootstrapper inclusion.
38 |
39 | :param overlay: the network overlay to initialize for.
40 | :returns: whether the initialization was successful.
41 | """
42 |
43 | @abstractmethod
44 | async def get_addresses(self, overlay: Community, timeout: float) -> Iterable[Address]:
45 | """
46 | Return some IPv8 addresses (if available) from this bootstrapping service.
47 | These addresses should be walkable (not blocked by a NAT or firewall).
48 |
49 | :param overlay: the network overlay to get peers for.
50 | :param timeout: the maximum time we wish to wait until we get any result (i.e. an empty list).
51 | :returns: the addresses for the given service_id.
52 | """
53 |
54 | @abstractmethod
55 | def keep_alive(self, overlay: Community) -> None:
56 | """
57 | Periodically called to keep this bootstrap connection alive.
58 |
59 | :param overlay: the network overlay to keep alive.
60 | """
61 |
62 | @abstractmethod
63 | def blacklist(self) -> Iterable[Address]:
64 | """
65 | Returns the blacklisted addresses for this Bootstrapper.
66 | """
67 |
68 | @abstractmethod
69 | def unload(self) -> None:
70 | """
71 | Stop and unload all the resources used by this Bootstrapper.
72 | """
73 |
--------------------------------------------------------------------------------
/ipv8/bootstrapping/dispersy/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/bootstrapping/dispersy/__init__.py
--------------------------------------------------------------------------------
/ipv8/bootstrapping/udpbroadcast/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/bootstrapping/udpbroadcast/__init__.py
--------------------------------------------------------------------------------
/ipv8/dht/__init__.py:
--------------------------------------------------------------------------------
1 | class DHTError(Exception):
2 | """
3 | Generic error for DHT-related failures.
4 | """
5 |
--------------------------------------------------------------------------------
/ipv8/dht/churn.py:
--------------------------------------------------------------------------------
1 | from time import time
2 | from typing import cast
3 |
4 | from ..peer import Peer
5 | from ..peerdiscovery.discovery import DiscoveryStrategy
6 | from ..types import DHTCommunity
7 | from .routing import Node
8 |
9 |
10 | class PingChurn(DiscoveryStrategy):
11 | """
12 | Strategy to maintain the data structures of the DHT community.
13 | """
14 |
15 | def __init__(self, overlay: DHTCommunity, ping_interval: float = 25.0) -> None:
16 | """
17 | Create a new strategy that maintains the DHT Community and pings nodes at the given interval (in seconds).
18 | """
19 | super().__init__(overlay)
20 | self.ping_interval = ping_interval
21 |
22 | def take_step(self) -> None: # noqa: C901, PLR0912
23 | """
24 | Every tick (half-second by default), performs maintainence.
25 |
26 | If routing tables are set up:
27 | - Remove all "bad" nodes from the routing table.
28 | - Remove all nodes that are not part of any routing table.
29 | - Inspect the routing tables to register Network services for its peers.
30 | - Send pings to peers at our configured ping_interval.
31 | """
32 | self.overlay = cast(DHTCommunity, self.overlay)
33 | with self.walk_lock:
34 | # Nothing is happening yet, skip this step
35 | if not self.overlay.routing_tables:
36 | return
37 |
38 | for routing_table in self.overlay.routing_tables.values():
39 | for node in routing_table.remove_bad_nodes():
40 | self.overlay.network.remove_peer(node)
41 |
42 | for peer in self.overlay.get_peers():
43 | if peer.address in self.overlay.network.blacklist:
44 | continue
45 |
46 | node = Node(peer.key, peer.address)
47 | routing_table = self.overlay.get_routing_table(node)
48 | if not routing_table.has(node.id) and not routing_table.add(node):
49 | self.overlay.network.remove_peer(peer)
50 |
51 | for routing_table in self.overlay.routing_tables.values():
52 | for bucket in routing_table.trie.values():
53 | for node in bucket.nodes.values():
54 | if node not in self.overlay.get_peers():
55 | peer = Peer(node.key, node.address)
56 | self.overlay.network.add_verified_peer(peer)
57 | self.overlay.network.discover_services(peer, [self.overlay.community_id])
58 |
59 | now = time()
60 | for routing_table in self.overlay.routing_tables.values():
61 | for bucket in routing_table.trie.values():
62 | for node in bucket.nodes.values():
63 | if node.last_ping_sent + self.ping_interval <= now:
64 | self.overlay.ping(node)
65 |
--------------------------------------------------------------------------------
/ipv8/keyvault/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/keyvault/__init__.py
--------------------------------------------------------------------------------
/ipv8/keyvault/keys.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import abc
4 | from hashlib import sha1
5 |
6 |
7 | class Key(metaclass=abc.ABCMeta):
8 | """
9 | Interface for a public or private key.
10 | """
11 |
12 | @abc.abstractmethod
13 | def pub(self) -> PublicKey:
14 | """
15 | Return the public key for this key material.
16 | """
17 |
18 | @abc.abstractmethod
19 | def has_secret_key(self) -> bool:
20 | """
21 | Whether this key material includes a secret key.
22 |
23 | Public keys MAY also contain a private key but not the other way around.
24 | """
25 |
26 | @abc.abstractmethod
27 | def key_to_bin(self) -> bytes:
28 | """
29 | Convert this key material to bytes.
30 | """
31 |
32 | def key_to_hash(self) -> bytes:
33 | """
34 | Get the SHA-1 hash of this key.
35 | """
36 | if self.has_secret_key():
37 | return sha1(self.pub().key_to_bin()).digest()
38 | return sha1(self.key_to_bin()).digest()
39 |
40 |
41 | class PrivateKey(Key, metaclass=abc.ABCMeta):
42 | """
43 | Interface for a private key.
44 | """
45 |
46 | def has_secret_key(self) -> bool:
47 | """
48 | A private key is the secret key, always True.
49 | """
50 | return True
51 |
52 | @abc.abstractmethod
53 | def signature(self, msg: bytes) -> bytes:
54 | """
55 | Create a signature for the given data.
56 | """
57 |
58 |
59 | class PublicKey(Key, metaclass=abc.ABCMeta):
60 | """
61 | Interface for a public key.
62 | """
63 |
64 | def pub(self) -> PublicKey:
65 | """
66 | We are already the public key, return ourselves.
67 | """
68 | return self
69 |
70 | def has_secret_key(self) -> bool:
71 | """
72 | By default, a public key cannot be assumed to include private key material.
73 | """
74 | return False
75 |
76 | @abc.abstractmethod
77 | def verify(self, signature: bytes, msg: bytes) -> bool:
78 | """
79 | Verify that the given signature belongs to the given message for this public key.
80 | """
81 |
82 | @abc.abstractmethod
83 | def get_signature_length(self) -> int:
84 | """
85 | Get the length (in number of bytes) for signatures generated by this type of key.
86 | """
87 |
--------------------------------------------------------------------------------
/ipv8/keyvault/private/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/keyvault/private/__init__.py
--------------------------------------------------------------------------------
/ipv8/keyvault/private/libnaclkey.py:
--------------------------------------------------------------------------------
1 | import libnacl
2 | import libnacl.dual
3 | import libnacl.sign
4 |
5 | from ...keyvault.keys import PrivateKey
6 | from ...keyvault.public.libnaclkey import LibNaCLPK
7 |
8 |
9 | class LibNaCLSK(PrivateKey, LibNaCLPK):
10 | """
11 | A LibNaCL implementation of a secret key.
12 | """
13 |
14 | def __init__(self, binarykey: bytes = b"") -> None:
15 | """
16 | Create a new LibNaCL secret key. Optionally load it from a string representation.
17 | Otherwise generate it from the 25519 curve.
18 |
19 | :param binarykey: load the sk from this string (see key_to_bin())
20 | """
21 | # Load the key, if specified
22 | if binarykey:
23 | crypt, seed = (binarykey[:libnacl.crypto_box_SECRETKEYBYTES],
24 | binarykey[libnacl.crypto_box_SECRETKEYBYTES: libnacl.crypto_box_SECRETKEYBYTES
25 | + libnacl.crypto_sign_SEEDBYTES])
26 | key = libnacl.dual.DualSecret(crypt, seed)
27 | else:
28 | key = libnacl.dual.DualSecret()
29 |
30 | super().__init__(pk=key.pk, hex_vk=key.hex_vk())
31 | self.key = key
32 |
33 | def pub(self) -> LibNaCLPK:
34 | """
35 | Get the public key for this secret key.
36 | """
37 | return LibNaCLPK(pk=self.key.pk, hex_vk=self.veri.hex_vk())
38 |
39 | def signature(self, msg: bytes) -> bytes:
40 | """
41 | Create a signature for a message.
42 |
43 | :param msg: the message to sign
44 | :return: the signature for the message
45 | """
46 | return self.key.signature(msg)
47 |
48 | def key_to_bin(self) -> bytes:
49 | """
50 | Get the string representation of this key.
51 | """
52 | return b"LibNaCLSK:" + self.key.sk + self.key.seed
53 |
--------------------------------------------------------------------------------
/ipv8/keyvault/public/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/keyvault/public/__init__.py
--------------------------------------------------------------------------------
/ipv8/keyvault/public/libnaclkey.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import libnacl
4 | import libnacl.encode
5 | import libnacl.public
6 | import libnacl.sign
7 |
8 | from ...keyvault.keys import PublicKey
9 |
10 |
11 | class LibNaCLPK(PublicKey):
12 | """
13 | A LibNaCL implementation of a public key.
14 | """
15 |
16 | def __init__(self, binarykey: bytes = b"", pk: bytes | None = None, hex_vk: bytes | None = None) -> None:
17 | """
18 | Create a new LibNaCL public key. Optionally load it from a string representation or
19 | using a public key and verification key.
20 |
21 | :param binarykey: load the pk from this string (see key_to_bin())
22 | :param pk: the libnacl public key to use in byte format
23 | :param hex_vk: a verification key in hex format
24 | """
25 | # Load the key, if specified
26 | if binarykey:
27 | pk, vk = (binarykey[:libnacl.crypto_box_SECRETKEYBYTES],
28 | binarykey[libnacl.crypto_box_SECRETKEYBYTES: libnacl.crypto_box_SECRETKEYBYTES
29 | + libnacl.crypto_sign_SEEDBYTES])
30 | hex_vk = libnacl.encode.hex_encode(vk)
31 | # Construct the public key and verifier objects
32 | self.key = libnacl.public.PublicKey(pk)
33 | self.veri = libnacl.sign.Verifier(hex_vk)
34 |
35 | def verify(self, signature: bytes, msg: bytes) -> bool:
36 | """
37 | Verify whether a given signature is correct for a message.
38 |
39 | :param signature: the given signature
40 | :param msg: the given message
41 | """
42 | return self.veri.verify(signature + msg)
43 |
44 | def key_to_bin(self) -> bytes:
45 | """
46 | Get the string representation of this key.
47 | """
48 | return b"LibNaCLPK:" + self.key.pk + self.veri.vk
49 |
50 | def get_signature_length(self) -> int:
51 | """
52 | Returns the length, in bytes, of each signature made using EC.
53 | """
54 | return libnacl.crypto_sign_BYTES
55 |
--------------------------------------------------------------------------------
/ipv8/messaging/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/messaging/__init__.py
--------------------------------------------------------------------------------
/ipv8/messaging/anonymization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/messaging/anonymization/__init__.py
--------------------------------------------------------------------------------
/ipv8/messaging/anonymization/utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from asyncio import FIRST_COMPLETED, Future, wait
4 | from statistics import mean, median
5 | from timeit import default_timer
6 | from typing import TYPE_CHECKING
7 |
8 | from .tunnel import CIRCUIT_STATE_CLOSING, Circuit
9 |
10 | if TYPE_CHECKING:
11 | from .community import TunnelCommunity
12 |
13 |
14 | async def run_speed_test(tc: TunnelCommunity, circuit: Circuit, request_size: int, response_size: int,
15 | num_requests: int, window: int = 50) -> dict[str, int | float]:
16 | """
17 | Test a circuit's speed.
18 | """
19 | num_sent = 0
20 | num_ack = 0
21 | outstanding: set[Future[tuple[bytes, float]]] = set()
22 | start = default_timer()
23 | rtts = []
24 |
25 | while True:
26 | while num_sent < num_requests and len(outstanding) < window and circuit.state != CIRCUIT_STATE_CLOSING:
27 | outstanding.add(tc.send_test_request(circuit, request_size, response_size))
28 | num_sent += 1
29 | if not outstanding:
30 | break
31 | done, outstanding = await wait(outstanding, return_when=FIRST_COMPLETED, timeout=10)
32 | if not done:
33 | # We have received nothing for the past 10s.Any pending messages are considered lost.
34 | break
35 | # Make sure to only count futures that haven't been set by on_timeout.
36 | results = [f.result() for f in done if f.result() is not None]
37 | num_ack += len(results)
38 | rtts.extend([rtt for _, rtt in results])
39 |
40 | return {'speed_up': (num_ack * request_size / 1024) / (default_timer() - start),
41 | 'speed_down': (num_ack * response_size / 1024) / (default_timer() - start),
42 | 'messages_sent': num_ack + len(outstanding),
43 | 'messages_received': num_ack,
44 | 'rtt_mean': mean(rtts) if rtts else -1,
45 | 'rtt_median': median(rtts) if rtts else -1}
46 |
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/messaging/interfaces/__init__.py
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/dispatcher/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/messaging/interfaces/dispatcher/__init__.py
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/messaging/interfaces/lan_addresses/__init__.py
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/addressprovider.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import traceback
4 | from abc import ABC, abstractmethod
5 | from time import time
6 |
7 |
8 | class AddressProvider(ABC):
9 | """
10 | Interface for OS-specific methods of finding local interfaces addresses.
11 | """
12 |
13 | def __init__(self, verbose: bool = False) -> None:
14 | """
15 | Create a new ``AddressProvider``.
16 |
17 | :param verbose: Log any errors that are encountered while fetching addresses.
18 | """
19 | self.verbose = verbose
20 | self.addresses: set[str] = set()
21 | self.addresses_ts = 0.0
22 |
23 | def on_exception(self) -> None:
24 | """
25 | Called by provider implementations that encounter an ``Exception``.
26 | """
27 | if self.verbose:
28 | traceback.print_exc()
29 |
30 | def discover_addresses(self, min_interval: float = 10.0) -> None:
31 | """
32 | Discovers the LAN addresses using this provider. The addresses are only discovered if
33 | the previous call was more than ``min_interval`` seconds ago. The most recent results
34 | can be retrieved through ``get_addresses_buffered()``.
35 |
36 | :param min_interval: Minimum time in seconds between discoveries.
37 | """
38 | if time() - self.addresses_ts > min_interval:
39 | # Set the timestamp immediately to avoid concurrent calls
40 | self.addresses_ts = time()
41 | self.addresses = self.get_addresses()
42 | # Since get_addresses may take a while, we set the timestamp for a second time
43 | self.addresses_ts = time()
44 |
45 | @abstractmethod
46 | def get_addresses(self) -> set[str]:
47 | """
48 | Get a set of LAN addresses using this provider.
49 | """
50 |
51 | def get_addresses_buffered(self) -> set[str]:
52 | """
53 | Return the known addresses from when ``discover_addresses()`` was last successfully called.
54 | If discovery hasn't been performed yet, do so now.
55 | """
56 | if not self.addresses:
57 | self.discover_addresses()
58 | return self.addresses
59 |
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/any_os/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/messaging/interfaces/lan_addresses/any_os/__init__.py
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/any_os/getaddrinfo.py:
--------------------------------------------------------------------------------
1 | import socket
2 | from typing import cast
3 |
4 | from ..addressprovider import AddressProvider
5 |
6 |
7 | class SocketGetAddrInfo(AddressProvider):
8 | """
9 | Use the ``socket`` library to discover interface addresses.
10 | """
11 |
12 | def get_addresses(self) -> set:
13 | """
14 | Attempt to use ``getaddrinfo()`` to retrieve addresses.
15 | """
16 | interface_specifications = []
17 |
18 | try:
19 | interface_specifications.extend(socket.getaddrinfo(socket.getfqdn(), 0))
20 | except OSError:
21 | self.on_exception()
22 |
23 | try:
24 | interface_specifications.extend(socket.getaddrinfo(socket.gethostname(), None))
25 | except OSError:
26 | self.on_exception()
27 |
28 | try:
29 | interface_specifications.extend(socket.getaddrinfo(None, 0))
30 | except OSError:
31 | self.on_exception()
32 |
33 | return {i[4][0] for i in interface_specifications if cast(str, i[4][0]).find(".") != -1}
34 |
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/any_os/netifaces.py:
--------------------------------------------------------------------------------
1 | import ipaddress
2 | import socket
3 |
4 | import netifaces
5 |
6 | from ..addressprovider import AddressProvider
7 |
8 | NETMASK0_V6 = socket.inet_ntop(socket.AF_INET6, b'\x00' * 16)
9 |
10 |
11 | class Netifaces(AddressProvider):
12 | """
13 | Use the ``netifaces`` library to discover local interface addresses.
14 | """
15 |
16 | def get_addresses(self) -> set: # noqa: C901
17 | """
18 | Use ``netifaces.ifaddresses`` to retrieve addresses.
19 | """
20 | out_addresses = []
21 |
22 | try:
23 | for interface in netifaces.interfaces():
24 | try:
25 | addresses = netifaces.ifaddresses(interface)
26 | except ValueError:
27 | # some interfaces are given that are invalid, we encountered one called ppp0
28 | continue
29 |
30 | # IPv4 addresses
31 | for option in addresses.get(netifaces.AF_INET, []):
32 | if option.get("netmask") == "0.0.0.0":
33 | continue # The network interface isn't bound to any network, so we skip it
34 | address = option.get("addr")
35 | if address is not None:
36 | out_addresses.append(address)
37 |
38 | # IPv6 addresses
39 | for option in addresses.get(netifaces.AF_INET6, []):
40 | netmask = option.get("netmask")
41 | if (netmask is not None
42 | and ipaddress.IPv6Network(netmask, strict=False).network_address == NETMASK0_V6):
43 | continue # The network interface isn't bound to any network, so we skip it
44 | address = option.get("addr")
45 | if address is not None:
46 | selector = address.find("%")
47 | if selector != -1:
48 | address = address[:selector]
49 | out_addresses.append(address)
50 | except OSError:
51 | self.on_exception()
52 |
53 | return set(out_addresses)
54 |
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/any_os/testnet1.py:
--------------------------------------------------------------------------------
1 | import socket
2 |
3 | from ..addressprovider import AddressProvider
4 |
5 |
6 | class TestNet1(AddressProvider):
7 | """
8 | Use the ``TEST-NET-1`` address to discover local interface addresses.
9 | """
10 |
11 | def get_addresses(self) -> set:
12 | """
13 | Contact ``TEST-NET-1`` to retrieve addresses.
14 | """
15 | interface_specifications = []
16 |
17 | s = None
18 | try:
19 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
20 | s.connect(("192.0.2.0", 80))
21 | local_ip = s.getsockname()[0]
22 | s.close()
23 | s = None
24 | interface_specifications.append(local_ip)
25 | except OSError:
26 | self.on_exception()
27 | finally:
28 | if s is not None:
29 | try:
30 | s.close()
31 | s = None
32 | except OSError:
33 | self.on_exception()
34 |
35 | s = None
36 | try:
37 | s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
38 | s.connect(("::ffff:0:192.0.2.0", 80))
39 | local_ip = s.getsockname()[0]
40 | s.close()
41 | s = None
42 | interface_specifications.append(local_ip)
43 | except OSError:
44 | self.on_exception()
45 | finally:
46 | if s is not None:
47 | try:
48 | s.close()
49 | s = None
50 | except OSError:
51 | self.on_exception()
52 |
53 | return set(interface_specifications)
54 |
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/importshield.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import inspect
4 | import platform
5 | import sys
6 | import traceback
7 | from contextlib import AbstractContextManager
8 | from enum import Enum
9 | from typing import TYPE_CHECKING
10 |
11 | if TYPE_CHECKING:
12 | from types import TracebackType
13 |
14 |
15 | class Platform(Enum):
16 | """
17 | Platform identifier to select where providers should be run.
18 | """
19 |
20 | ANY = None
21 | NONE = 0 # For stubbing and testing.
22 | WINDOWS = "Windows"
23 | LINUX = "Linux"
24 |
25 |
26 | class conditional_import_shield(AbstractContextManager): # noqa: N801
27 | """
28 | Protect against imports in a context that could segfault when imported in the wrong OS.
29 |
30 | This context manager provides two things:
31 |
32 | 1. Conditional imports based on platform (``platform.system()``).
33 | 2. Exception handling and logging.
34 | """
35 |
36 | def __init__(self, platf: Platform = Platform.ANY, verbose: bool = False) -> None:
37 | """
38 | Create a new ``conditional_import_shield`` context manager.
39 |
40 | :param verbose: Log any errors that are encountered while fetching addresses.
41 | :param platf: The platform conditional (or ``None`` to run on all platforms).
42 | """
43 | self.right_platform = platf.value is None or platform.system() == platf.value
44 | f_current = inspect.currentframe()
45 | if f_current is None:
46 | msg = "Could not determine current frame!"
47 | raise RuntimeError(msg)
48 | f_back = f_current.f_back
49 | if f_back is None:
50 | msg = "Could not determine calling frame!"
51 | raise RuntimeError(msg)
52 | self.module_name = f_back.f_globals["__name__"]
53 | self.package_backup = sys.modules[self.module_name].__package__
54 | self.package_overwritten = False
55 | self.verbose = verbose
56 |
57 | def __enter__(self) -> conditional_import_shield: # noqa: PYI034
58 | """
59 | When we enter the context, check if we are running on the right platform.
60 |
61 | If we are not on the right platform, we temporarily sabotage the module's import system.
62 | """
63 | if self.right_platform:
64 | return self
65 | self.package_overwritten = True
66 | sys.modules[self.module_name].__package__ = ""
67 | return self
68 |
69 | def __exit__(self, exctype: type[BaseException] | None, excinst: BaseException | None,
70 | exctb: TracebackType | None) -> bool:
71 | """
72 | When we exit the context, unsabotage the import system and log any exceptions.
73 | """
74 | if self.package_overwritten:
75 | sys.modules[self.module_name].__package__ = self.package_backup
76 | # Should be an ImportError due to our sabotage. Otherwise, log the exception:
77 | if self.verbose and exctype is not ImportError:
78 | traceback.print_exception(exctype, excinst, exctb)
79 | return True
80 | # Should have finished without exception:
81 | if self.verbose and exctype is not None:
82 | traceback.print_exception(exctype, excinst, exctb)
83 | return True
84 |
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/interfaces.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import lru_cache
4 | from typing import TYPE_CHECKING
5 |
6 | from .importshield import Platform, conditional_import_shield
7 |
8 | if TYPE_CHECKING:
9 | from .addressprovider import AddressProvider
10 |
11 | try:
12 | import netifaces
13 | except ImportError:
14 | netifaces = None
15 | BLACKLIST = {"127.0.0.1", "127.0.1.1", "0.0.0.0", "255.255.255.255", "::1"}
16 | VERBOSE = False
17 |
18 |
19 | @lru_cache(maxsize=1)
20 | def get_providers() -> list[AddressProvider]:
21 | """
22 | Construct the ``AddressProvider``s that are applicable for this platform.
23 | """
24 | providers: list[AddressProvider] = []
25 | if netifaces is not None:
26 | # Netifaces is faster but archived since 2021 and unsupported >= Python 3.11
27 | with conditional_import_shield(Platform.ANY, VERBOSE):
28 | from .any_os.netifaces import Netifaces
29 | providers.append(Netifaces(VERBOSE))
30 | else:
31 | # Attempt to mimic netifaces with (slower) ctypes and other OS calls.
32 | with conditional_import_shield(Platform.ANY, VERBOSE):
33 | from .any_os.getaddrinfo import SocketGetAddrInfo
34 | providers.append(SocketGetAddrInfo(VERBOSE))
35 | with conditional_import_shield(Platform.ANY, VERBOSE):
36 | from .any_os.testnet1 import TestNet1
37 | providers.append(TestNet1(VERBOSE))
38 | with conditional_import_shield(Platform.WINDOWS, VERBOSE):
39 | from .windows.GetAdaptersAddresses import GetAdaptersAddresses
40 | providers.append(GetAdaptersAddresses(VERBOSE))
41 | with conditional_import_shield(Platform.LINUX, VERBOSE):
42 | from .unix.getifaddrs import GetIfAddrs
43 | providers.append(GetIfAddrs(VERBOSE))
44 | with conditional_import_shield(Platform.LINUX, VERBOSE):
45 | from .unix.ioctl import Ioctl
46 | providers.append(Ioctl(VERBOSE))
47 | return providers
48 |
49 |
50 | def get_lan_addresses() -> list[str]:
51 | """
52 | Attempt to find the LAN addresses of this machine using whatever means available.
53 | """
54 | votes: dict[str, int] = {}
55 | for provider in get_providers():
56 | for found in (provider.get_addresses_buffered() - BLACKLIST):
57 | votes[found] = votes.get(found, 0) + 1
58 | return sorted(votes.keys(), key=lambda key: votes[key], reverse=True)
59 |
60 |
61 | __all__ = ["get_lan_addresses", "get_providers"]
62 |
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/unix/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/messaging/interfaces/lan_addresses/unix/__init__.py
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/unix/ioctl.py:
--------------------------------------------------------------------------------
1 | import socket
2 | import struct
3 | import typing
4 |
5 | if typing.TYPE_CHECKING:
6 | def ioctl(__fd: int, __request: int, __arg: bytes, __mutate_flag: bool = ...) -> bytes:
7 | """
8 | Stub for the ioctl call's types.
9 | """
10 | else:
11 | from fcntl import ioctl
12 |
13 | from ..addressprovider import AddressProvider
14 |
15 | SIOCGIFADDR = 0x8915
16 | FMT_SOCKADDR = '16sH14s'
17 | FMT_FAMILY = 'H'
18 |
19 |
20 | class Ioctl(AddressProvider):
21 | """
22 | Attempt to find local addresses using the ``ioctl`` system call.
23 | """
24 |
25 | def get_addresses(self) -> set:
26 | """
27 | Attempt to use ``ioctl()`` to retrieve addresses.
28 |
29 | Note: SIOCGIFADDR only supports AF_INET.
30 | """
31 | out_addresses = []
32 |
33 | s = None
34 | try:
35 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
36 | for ifspec in socket.if_nameindex():
37 | ifreq = ioctl(s.fileno(), SIOCGIFADDR,
38 | struct.pack(FMT_SOCKADDR, ifspec[1].encode(), socket.AF_INET, b'\x00' * 14))
39 | family, = struct.unpack(FMT_FAMILY, ifreq[16:18])
40 | if family == socket.AF_INET:
41 | out_addresses.append(socket.inet_ntop(socket.AF_INET, ifreq[20:24]))
42 | s.close()
43 | s = None
44 | except OSError:
45 | self.on_exception()
46 | finally:
47 | if s is not None:
48 | try:
49 | s.close()
50 | s = None
51 | except OSError:
52 | self.on_exception()
53 |
54 | return set(out_addresses)
55 |
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/lan_addresses/windows/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/messaging/interfaces/lan_addresses/windows/__init__.py
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/network_stats.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 |
4 | class NetworkStat:
5 | """
6 | Represents an individual network statistic. This is used to compose the overall community statistics.
7 |
8 | Includes the following properties for the given statistic:
9 | - identifier: char
10 | - num_up: integer
11 | - num_down: integer
12 | - bytes_up: integer
13 | - bytes_down: integer
14 | - first_measured_up: float
15 | - first_measured_down: float
16 | - last_measured_up: float
17 | - last_measured_down: float
18 | """
19 |
20 | def __init__(self, identifier: int) -> None:
21 | """
22 | Create a new statistic for a single message identifier.
23 | """
24 | self.identifier: int = identifier
25 | """Message identifier."""
26 |
27 | self.num_up: int = 0
28 | """Number of messages sent."""
29 |
30 | self.num_down: int = 0
31 | """Number of messages received."""
32 |
33 | self.bytes_up: int = 0
34 | """Number of bytes sent."""
35 |
36 | self.bytes_down: int = 0
37 | """Number of bytes received."""
38 |
39 | self.first_measured_up: float = 0
40 | """Timestamp of the first message sent."""
41 |
42 | self.first_measured_down: float = 0
43 | """Timestamp of the first message received."""
44 |
45 | self.last_measured_up: float = 0
46 | """Timestamp of the most recent message sent."""
47 |
48 | self.last_measured_down: float = 0
49 | """Timestamp of the most recent message received."""
50 |
51 | def add_sent_stat(self, timestamp: float, num_bytes: int) -> None:
52 | """
53 | Callback for when a message of a given number of bytes is sent at a given timestamp.
54 | """
55 | self.num_up += 1
56 | self.bytes_up += num_bytes
57 | self.last_measured_up = timestamp
58 |
59 | if not self.first_measured_up:
60 | self.first_measured_up = timestamp
61 |
62 | def add_received_stat(self, timestamp: float, num_bytes: int) -> None:
63 | """
64 | Callback for when a message of a given number of bytes is received at a given timestamp.
65 | """
66 | self.num_down += 1
67 | self.bytes_down += num_bytes
68 | self.last_measured_down = timestamp
69 |
70 | if not self.first_measured_down:
71 | self.first_measured_down = timestamp
72 |
73 | def to_dict(self) -> dict[str, int | float]:
74 | """
75 | Convert this statistic to a plain dictionary.
76 | """
77 | return {
78 | "identifier": self.identifier,
79 | "num_up": self.num_up,
80 | "num_down": self.num_down,
81 | "bytes_up": self.bytes_up,
82 | "bytes_down": self.bytes_down,
83 | "first_measured_up": self.first_measured_up,
84 | "first_measured_down": self.first_measured_down,
85 | "last_measured_up": self.last_measured_up,
86 | "last_measured_down": self.last_measured_down
87 | }
88 |
89 | def __str__(self) -> str:
90 | """
91 | Create a short string representation of this statistic for debugging.
92 | """
93 | return (f"NetworkStat{{num_up:{self.num_up}, num_down:{self.num_down}, "
94 | f"bytes_up:{self.bytes_up}, bytes_down:{self.bytes_down}, ...}}")
95 |
--------------------------------------------------------------------------------
/ipv8/messaging/interfaces/udp/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/messaging/interfaces/udp/__init__.py
--------------------------------------------------------------------------------
/ipv8/messaging/payload_headers.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from .payload import Payload
4 |
5 |
6 | class BinMemberAuthenticationPayload(Payload):
7 | """
8 | Public key (bytes) storage payload.
9 | """
10 |
11 | format_list = ['varlenH', ]
12 |
13 | def __init__(self, public_key_bin: bytes) -> None:
14 | """
15 | Create a new payload.
16 | """
17 | super().__init__()
18 | self.public_key_bin = public_key_bin
19 |
20 | def to_pack_list(self) -> list[tuple]:
21 | """
22 | Convert this payload to a pack list.
23 | """
24 | return [('varlenH', self.public_key_bin)]
25 |
26 | @classmethod
27 | def from_unpack_list(cls: type[BinMemberAuthenticationPayload],
28 | public_key_bin: bytes) -> BinMemberAuthenticationPayload:
29 | """
30 | Read the serialized key material into a payload.
31 | """
32 | return BinMemberAuthenticationPayload(public_key_bin)
33 |
34 |
35 | class GlobalTimeDistributionPayload(Payload):
36 | """
37 | Payload to communicate (and synchronize) Lamport timestamps.
38 | """
39 |
40 | format_list = ['Q', ]
41 |
42 | def __init__(self, global_time: int) -> None:
43 | """
44 | Create a new payload.
45 | """
46 | super().__init__()
47 | self.global_time = global_time
48 |
49 | def to_pack_list(self) -> list[tuple]:
50 | """
51 | Convert this payload to a pack list.
52 | """
53 | return [('Q', self.global_time)]
54 |
55 | @classmethod
56 | def from_unpack_list(cls: type[GlobalTimeDistributionPayload], global_time: int) -> GlobalTimeDistributionPayload:
57 | """
58 | Read the serialized time into a payload.
59 | """
60 | return GlobalTimeDistributionPayload(global_time)
61 |
--------------------------------------------------------------------------------
/ipv8/peerdiscovery/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/peerdiscovery/__init__.py
--------------------------------------------------------------------------------
/ipv8/peerdiscovery/churn.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from random import sample
4 | from time import time
5 | from typing import TYPE_CHECKING, cast
6 |
7 | from ..types import Overlay
8 | from .discovery import DiscoveryStrategy
9 |
10 | if TYPE_CHECKING:
11 | from ..types import Address, Peer
12 |
13 |
14 | class RandomChurn(DiscoveryStrategy[Overlay]):
15 | """
16 | Select random peers, ping them if inactive, remove them if unresponsive.
17 | """
18 |
19 | def __init__(self, overlay: Overlay, sample_size: int = 8,
20 | ping_interval: float = 10.0, inactive_time: float = 27.5, drop_time: float = 57.5) -> None:
21 | """
22 | Random peer removal strategy.
23 |
24 | :param overlay: the overlay to sample peers from
25 | :param sample_size: the amount of peers to check at once
26 | :param ping_interval: time between pings in the range of inactive_time to drop_time
27 | :param inactive_time: time before pings are sent to check liveness
28 | :param drop_time: time after which a peer is dropped
29 | """
30 | super().__init__(overlay)
31 | self._pinged: dict[Address, float] = {}
32 | self.sample_size = sample_size
33 | self.ping_interval = ping_interval
34 | self.inactive_time = inactive_time
35 | self.drop_time = drop_time
36 |
37 | def should_drop(self, peer: Peer) -> bool:
38 | """
39 | Have we passed the time before we consider this peer to be unreachable.
40 | """
41 | if peer.last_response == 0:
42 | return False
43 | return time() > (peer.last_response + self.drop_time)
44 |
45 | def is_inactive(self, peer: Peer) -> bool:
46 | """
47 | Have we passed the time before we consider this peer to be inactive.
48 | """
49 | if peer.last_response == 0:
50 | return False
51 | return time() > (peer.last_response + self.inactive_time)
52 |
53 | def take_step(self) -> None:
54 | """
55 | Select a new (set of) peer(s) to investigate liveness for.
56 | """
57 | with self.walk_lock:
58 | # Find an inactive or droppable peer
59 | sample_size = min(len(self.overlay.network.verified_peers), self.sample_size)
60 | if sample_size:
61 | window = sample(list(self.overlay.network.verified_peers), sample_size)
62 |
63 | for peer in window:
64 | if self.should_drop(peer) and peer.address in self._pinged:
65 | self.overlay.network.remove_peer(peer)
66 | self._pinged.pop(peer.address)
67 | elif self.is_inactive(peer) or len(peer.pings) < cast(int, peer.pings.maxlen):
68 | if ((peer.address in self._pinged)
69 | and (time() > (self._pinged[peer.address] + self.ping_interval))):
70 | self._pinged.pop(peer.address)
71 | if peer.address not in self._pinged:
72 | self._pinged[peer.address] = time()
73 | self.overlay.send_ping(peer) # type: ignore[attr-defined]
74 |
--------------------------------------------------------------------------------
/ipv8/test/REST/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/REST/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/attestation/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/attestation/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/attestation/identity/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/attestation/identity/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/attestation/tokentree/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/attestation/tokentree/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/attestation/wallet/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/attestation/wallet/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/attestation/wallet/attestation.txt:
--------------------------------------------------------------------------------
1 | 01064c65dcb113f901064228da3ea57101064793a4f9c77901062b083e8690fb0106408293c67e9f010602eb9211e296010649440755f28301061e45e0a37dd3010609628f13507b010610116d37c48a01061fc38e532ea6010636bc724b9a0201061c64a9e3a9ed010605075a18347b010634a604333b15010644fed33153b301064363e691db3a010624bbcbb64a3601063a7e415126c501064a7e7dcf6b44010634be06f6a8f701060f75b0eb9c6501060aad5da0562d01064765de6ae2b8010624290e55922001061ddfddeb7a4a01060eadc893726d0106183aff08cecd010647a8c97ba14101060974201f23680106068f510e549a0106347d99e55ab601060b30906d3ee301062584033e6ea401062df3f9dd771a01061b48445c46e90106450bcaa2b2a8010642359deab2be01063bd71322126d0106292118c2d733010602f84784c9af01060a4d3f27bfe9010602bdafe3b45801063cedfd2b7929010615710d5318bd010615617d373004010616b7f30f154301063ced4e713b4e0106249411a8b74c01060609ac808852010603b97515cf1401062554117dc030010644464a9f71a301062df5cb58f18301061cdb20b13e7801060a8a8001fe7e01063e3302dab41701062ee7f53311a201061644abc740f90106109f8944351f01063c317678e07301060880f1d076f0010647922909eafa01062d539a0d805c010631161ee5fade01062368d43e6a2f01060688fc2c3deb01062b0cfc40f8fe010617f38f74ce5801062490ca02374b01061497e64c10ac0106442424d897ea0106226efd19fe83010622f6b31caabe01063df6f61dfa6901062384c957956e01063acf00edf75b01061472e2bd878b01061e89c2c507220106110f58e8603f0106336ad4171b780106281623a0e743010621412657de300106336b58ff0ec501060cc94f6f4d52010642c6d37ab968010647d4d1ed543001060ce76801b6d401061dd8505a17f501063f48fa77d6fc01060759c5e292f20106294312524d6a01060678a2daefa301062c1e9c7065cc010642bf2a5eb1d5010607e529f46952010618ce0c98756d010645e1b5056490010631266efab8a401063be1cb6867ef01060bb142e56412
2 |
--------------------------------------------------------------------------------
/ipv8/test/attestation/wallet/attestation_range.txt:
--------------------------------------------------------------------------------
1 | 01064c65dcb113f901064228da3ea57101064793a4f9c77901062b083e8690fb0106408293c67e9f01012001064c65dcb113f901060f3b65671cba010620fc847e16e701060d4eae4cd05f010631f3e26bef91010648d212207edb0106303606f4b0030106133c18dcddbb01064817d0aabcee0106302596ef9173010642d56a9baef10106036e3123912501060b033d742ded010648bca63cc60201062663e5c84c020106136ab99294bc010614fef69fc0490001242ae57ee97572913a0af42c21076af6248917ac8159142d7114cac707c60a85e376df21d00126190ae6dcf18144209fab5edc474b958fb60b63e40ea098fc8bc5b7af6967e52bf86a38f0c43101213ba93e958cb20e142b400c961883fb5a23f9a853fa05d88d039dcbc347f1620cfb012053eb3eb6393df7abd78f8aabbd356ccd8f700bb6d30568dcdae394c3e96a082001064c65dcb113f90105689fc56fba01061d0bd5613a9602012a03dedfc2a8a70e35cb12e981ffd6a54722e232fda86cd08f129d4f3e1cdde858afb209667c196390763e012705d43dab12db4e89df6b56495011159ce5d1c5b52ae3380591367b0e168a7fff6efe2e3ded83360123a6825da2687743346a7eccab02a4d47b93a822c468c3210d89031a4c7ef0d4753226260120fac2a46a1721597b233aab0492170e0e69948ac30000576d3d84ef085a33e5a001064c65dcb113f901062a18adf25d9d010606ec7eca9b4300012b0595e7baaa675c0990186a2a56ca7ea1549f7afd620cc974b885731196c33972db6502adc53b269263b1a201270149e0bca0161879507718987832c8ed59c6a69c35378aa06e4bc6281e565b18f046e94f0e7a2201232fe5ec3a1c44152e8cbe7afbbc666fb9c9f4fd602517c1cb6d1e8d3d314eb7677d8ad80120249859cdea4e0bb54faf89d319b35defcd153e233a647e2d80eec71bbf25aebd3f01062755fcd2d86d01064a3f49e3fdd5010625b1e76a527c010620c49ac8873501064a859082f2e301054ac262724a0106130ddd7b61df01062b281928e0480106167ce61b6417010614022d89eba8010629548caf46af01064b6c34b05d0e01062019f9577d1a010636538fe5c31101062a41d4a5750f01061b5eb4062f8501060789736291e30106402050a73437010622d2d19b76bd010603ce36e5f37701063089d9b8224601060f20aee6c81001063a19b4b33ea00106446d0ae71a7901063d015e69f7bb0106024c9171f9640106329a962446820106101299959524010611ab1a9df9eb01060fa140fd002a010614d0b9c819300106118451b9844501064205bdea5e200106494baadda91c01061cd22322d93701061b8e73661b9a01063e3611e5c4ff010624210c93308e01060270a8667e58010646576d47966f010617cc860e2b20010631ede9ede7c10106449aab2127ec01060747d152864b010646ea6d30fffb0106284760bd83a301059f3305b06801064486afc32fa101064375d526298f01061be45bddb9d40106481837c6a16c01062c6941d3c7af01064164b251c8e60106106b52279954010638a3ad27fe3a010602b8ea9f8677010610b5dc01d251010640065584c572010615b3320f31dd01064afc197e4ee701062d42b89dea6501062918374cba1c01061790e418e24d010613ecc7595a0a01062ba76e74ee4101063fc5f6f49b9a010621fafebecb800105b45171235501063d6fd59ed971010626b0538e9e5f01064648b133cab701050e4ba5b7bd01062d2a3d564eff0106026359fbebd901062d2a260c412e01060d0d6406792c01060492c320955301061ed9e75f651a010633edd1d694e1010647c399a9bd140106061ceed1ed910106221b4b3ed00201063ec0a8f4163601061af31c79cde701061518949cd5e9010607a62cac59c501062f4f8570ef8e0106063ac38ce7fe01063e9ead11641a010646d579b0bfd5010623e46c218e8c0106071320e16e0f0106232e8607d3e601061528e03987bf010607259ff11c350106335a91802f4001064bb7a51e19f8010606a21c29b303010649d6376445ce01061538bdffb3f30106346e45435af501061e52ffeb6f5501060b8033d111a6010635f271a61b8e01062123a6c87ddf010605c61d3f03c8010604db24a4743a010634f0d3a5dd210106222376368db501063138c4df46f5010603ac2eb4420401060794d9321fee010641daa213078001060414f0a00d2d01064bc796c1d73701063031b8e8c165010641cf392fe7240106185f06fc1c42010608b1caa541fd010623956c47039501063db3468b3dea010646b91cdb793c0106115e478055c8010624a1683fa0a6010627e7c86aa52601061c73dae11bad
--------------------------------------------------------------------------------
/ipv8/test/attestation/wallet/bonehexact/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/attestation/wallet/bonehexact/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/attestation/wallet/pengbaorange/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/attestation/wallet/pengbaorange/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/attestation/wallet/primitives/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/attestation/wallet/primitives/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/attestation/wallet/primitives/test_ec.py:
--------------------------------------------------------------------------------
1 | from .....attestation.wallet.primitives.ec import esum, weilpairing
2 | from .....attestation.wallet.primitives.value import FP2Value
3 | from ....base import TestBase
4 |
5 |
6 | class TestPairing(TestBase):
7 | """
8 | Tests related to creating Weil pairings.
9 | """
10 |
11 | def test_small_weilpairing(self) -> None:
12 | """
13 | Check if Weil pairing in E[4] mod 11 of (5, 4) and (5x, 4) with S=(7, 6) equals 9 + 7x.
14 | """
15 | mod = 11
16 | wp = weilpairing(mod,
17 | 4,
18 | (FP2Value(mod, 5), FP2Value(mod, 4)),
19 | (FP2Value(mod, b=5), FP2Value(mod, 4)),
20 | (FP2Value(mod, 7), FP2Value(mod, 6)))
21 |
22 | self.assertEqual(wp.a, 9)
23 | self.assertEqual(wp.b, 7)
24 | self.assertEqual(wp.c, 0)
25 | self.assertEqual(wp.aC, 1)
26 | self.assertEqual(wp.bC, 0)
27 | self.assertEqual(wp.cC, 0)
28 |
29 | def test_medium_weilpairing(self) -> None:
30 | """
31 | Check if Weil pairing in E[408] mod 1223 of (764, 140) and (18x, 84) with S=(0, 1222) equals 438 + 50x.
32 | """
33 | mod = 1223
34 | wp = weilpairing(mod,
35 | 408,
36 | (FP2Value(mod, 764), FP2Value(mod, 140)),
37 | (FP2Value(mod, b=18), FP2Value(mod, 84)),
38 | (FP2Value(mod, 0), FP2Value(mod, 1222)))
39 |
40 | self.assertEqual(wp.a, 438)
41 | self.assertEqual(wp.b, 50)
42 | self.assertEqual(wp.c, 0)
43 | self.assertEqual(wp.aC, 1)
44 | self.assertEqual(wp.bC, 0)
45 | self.assertEqual(wp.cC, 0)
46 |
47 | def test_oob_esum(self) -> None:
48 | """
49 | Check if EC sum of the point of infinity with itself is the point at infinity.
50 | """
51 | self.assertEqual(esum(11, "O", "O"), "O")
52 |
53 | def test_spob_esum(self) -> None:
54 | """
55 | Check if EC sum of the point of infinity with another point equals the other point.
56 | """
57 | p = (FP2Value(11, 1), FP2Value(11, 2))
58 |
59 | self.assertEqual(esum(11, p, "O"), p)
60 | self.assertEqual(esum(11, "O", p), p)
61 |
--------------------------------------------------------------------------------
/ipv8/test/bootstrapping/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/bootstrapping/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/bootstrapping/dispersy/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/bootstrapping/dispersy/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/dht/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/dht/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/dht/base.py:
--------------------------------------------------------------------------------
1 | from typing import TypeVar
2 |
3 | from ...dht.routing import Node, RoutingTable
4 | from ...dht.storage import Storage
5 | from ...types import DHTCommunity
6 | from ..base import TestBase
7 |
8 | OT = TypeVar("OT", bound=DHTCommunity)
9 |
10 |
11 | class TestDHTBase(TestBase[OT]):
12 | """
13 | Extension to TestBase that provides common DHT shortcuts.
14 | """
15 |
16 | def dht_node(self, i: int) -> Node:
17 | """
18 | Get the node instance of node i.
19 | """
20 | address_cls = self.overlay(i).get_address_class(self.my_peer(i))
21 | address = self.my_peer(i).addresses.get(address_cls, self.overlay(i).my_estimated_wan)
22 | return Node(self.private_key(i), address)
23 |
24 | def routing_table(self, i: int) -> RoutingTable:
25 | """
26 | Get the routing table of node i.
27 | """
28 | return self.overlay(i).get_routing_table(self.dht_node(i))
29 |
30 | def storage(self, i: int) -> Storage:
31 | """
32 | Get the storage of node i.
33 | """
34 | return self.overlay(i).get_storage(self.dht_node(i))
35 |
36 | def my_node_id(self, i: int) -> bytes:
37 | """
38 | Get the DHT node id of node i.
39 | """
40 | return self.overlay(i).get_my_node_id(self.my_peer(i))
41 |
--------------------------------------------------------------------------------
/ipv8/test/dht/test_churn.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from ...dht.churn import PingChurn
4 | from ...dht.community import DHTCommunity
5 | from .base import TestDHTBase
6 |
7 |
8 | class TestPingChurn(TestDHTBase[DHTCommunity]):
9 | """
10 | Tests for pinging nodes in the DHT community.
11 | """
12 |
13 | def setUp(self) -> None:
14 | """
15 | Create two nodes with a ping churn strategy.
16 | """
17 | super().setUp()
18 | self.initialize(DHTCommunity, 2)
19 | self.strategies = [PingChurn(self.overlay(i), ping_interval=0.0) for i in range(2)]
20 |
21 | async def test_ping_all(self) -> None:
22 | """
23 | Check if a failed node without a previous response is pinged and if it responds.
24 | """
25 | await self.introduce_nodes()
26 | bucket = self.routing_table(0).trie['']
27 |
28 | node1 = bucket.get(self.my_node_id(1))
29 | node1.failed = 1
30 | node1.last_response = 0
31 |
32 | self.strategies[0].take_step()
33 | await self.deliver_messages()
34 |
35 | self.assertTrue(node1.failed == 0)
36 | self.assertNotEqual(node1.last_response, 0)
37 |
38 | async def test_ping_all_skip(self) -> None:
39 | """
40 | Check if a failed node that recently responded is not spammed with a ping.
41 | """
42 | await self.introduce_nodes()
43 | bucket = self.routing_table(0).trie['']
44 | node1 = bucket.get(self.my_node_id(1))
45 | node1.failed = 1
46 | node1.last_response = time.time() + 5
47 |
48 | self.strategies[0].take_step()
49 | self.assertTrue(node1.failed == 1)
50 |
--------------------------------------------------------------------------------
/ipv8/test/dht/test_provider.py:
--------------------------------------------------------------------------------
1 | from typing import cast
2 | from unittest.mock import Mock
3 |
4 | from ...dht.provider import DHTCommunityProvider
5 | from ...keyvault.crypto import ECCrypto
6 | from ...messaging.anonymization.tunnel import IntroductionPoint
7 | from ...peer import Peer
8 | from ...util import succeed
9 | from ..base import TestBase
10 |
11 |
12 | class TestNode(TestBase):
13 | """
14 | Tests related to announce and lookup functionality.
15 | """
16 |
17 | def setUp(self) -> None:
18 | """
19 |
20 | :return:
21 | """
22 | super().setUp()
23 | self.crypto = ECCrypto()
24 |
25 | self.ip_pk = self.crypto.key_from_public_bin(b'LibNaCLPK:\xc8\xf38};U\xe4\xd5\xf7\xfd\xbc+J!\xbe\xba'
26 | b'\x81M\xda\xef\xb7\x8c\xacL\x1eZ\x9d\xaf\xaaX+&\xac\xe2'
27 | b'\xd2\xdd\x86\xa9\x97\xb8T\x9b\x82\xc1>\xa2\r\x11?\xef'
28 | b'\x137\xf1\xdc!\x7f\x9fW\xe7\x11.\xe2\xc8)')
29 | self.seeder_pk = self.crypto.key_from_public_bin(b'LibNaCLPK:/N\xc5\xd1#\xd4\xc5\x02\xca\xb4\xa4\xd4vKD'
30 | b'\xf1"\xf01,\\\xde\x14\x87\xa9\xf6T\x90\xd9\xb0qk\xdbPS'
31 | b'\xfbqm\xc1,i\xca\x88\x7fm\xe8\\\x0f\xe9\xee\xec\xce\xbeN'
32 | b'\xdc\x94\xc4\x84\'\x8b\xb8\x8e\x1b\xc4')
33 |
34 | self.intro_point = IntroductionPoint(Peer(self.ip_pk, ('1.2.3.4', 567)),
35 | self.seeder_pk.key_to_bin(), last_seen=0)
36 | self.info_hash = bytes(range(20))
37 | self.provider = DHTCommunityProvider(Mock(), self.intro_point.peer.address[1])
38 | self.dht_value = (b'\x01\x01\x02\x03\x04\x027\x00\x00\x00\x00\x00@\xc8\xf38};U\xe4\xd5\xf7\xfd\xbc+J!\xbe'
39 | b'\xba\x81M\xda\xef\xb7\x8c\xacL\x1eZ\x9d\xaf\xaaX+&\xac\xe2\xd2\xdd\x86\xa9\x97\xb8T\x9b'
40 | b'\x82\xc1>\xa2\r\x11?\xef\x137\xf1\xdc!\x7f\x9fW\xe7\x11.\xe2\xc8)\x00@/N\xc5\xd1#\xd4\xc5'
41 | b'\x02\xca\xb4\xa4\xd4vKD\xf1"\xf01,\\\xde\x14\x87\xa9\xf6T\x90\xd9\xb0qk\xdbPS\xfbqm\xc1,i'
42 | b'\xca\x88\x7fm\xe8\\\x0f\xe9\xee\xec\xce\xbeN\xdc\x94\xc4\x84\'\x8b\xb8\x8e\x1b\xc4')
43 |
44 | async def test_announce(self) -> None:
45 | """
46 | Check if the DHT value is stored after an announce.
47 | """
48 | mock_store_value = cast(Mock, self.provider.dht_community.store_value)
49 | mock_store_value.return_value = succeed(None)
50 | await self.provider.announce(self.info_hash, self.intro_point)
51 | mock_store_value.assert_called_once_with(self.info_hash, self.dht_value)
52 |
53 | async def test_lookup(self) -> None:
54 | """
55 | Check if an introduction point is properly created after a lookup.
56 | """
57 | self.provider.dht_community.find_values = lambda _: succeed([(self.dht_value, None)])
58 | info_hash, intro_points = await self.provider.lookup(self.info_hash)
59 | assert info_hash == self.info_hash
60 | assert intro_points[0].peer.address == self.intro_point.peer.address
61 | assert intro_points[0].peer.public_key.key_to_bin() == self.intro_point.peer.public_key.key_to_bin()
62 | assert intro_points[0].seeder_pk == self.intro_point.seeder_pk
63 |
--------------------------------------------------------------------------------
/ipv8/test/dht/test_storage.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | from ...dht.storage import Storage
4 | from ..base import TestBase
5 |
6 |
7 | class TestStorage(TestBase):
8 | """
9 | Tests related to Storage objects.
10 | """
11 |
12 | def test_get_and_put(self) -> None:
13 | """
14 | Check that unique values can be added to a storage key.
15 | """
16 | storage = Storage()
17 |
18 | storage.put(b'key', b'value1')
19 | self.assertEqual(storage.get(b'key'), [b'value1'])
20 |
21 | storage.put(b'key', b'value2')
22 | self.assertEqual(storage.get(b'key'), [b'value2', b'value1'])
23 |
24 | storage.put(b'key', b'value1')
25 | self.assertEqual(storage.get(b'key'), [b'value1', b'value2'])
26 |
27 | def test_items_older_than(self) -> None:
28 | """
29 | Check that inserted values can be filtered based on their age.
30 | """
31 | storage = Storage()
32 | storage.put(b'key', b'value')
33 | storage.items[b'key'][0].last_update = time.time() - 1
34 | self.assertEqual(storage.items_older_than(0), [(b'key', b'value')])
35 | self.assertEqual(storage.items_older_than(10), [])
36 |
37 | def test_clean(self) -> None:
38 | """
39 | Check that expired values are removed when cleaning a storage.
40 | """
41 | storage = Storage()
42 |
43 | storage.put(b'key', b'value', max_age=60)
44 | storage.items[b'key'][0].last_update = time.time() - 120
45 | storage.clean()
46 | self.assertEqual(storage.get(b'key'), [])
47 |
48 | storage.put(b'key', b'value', 60)
49 | storage.items[b'key'][0].last_update = time.time()
50 | storage.clean()
51 | self.assertEqual(storage.get(b'key'), [b'value'])
52 |
--------------------------------------------------------------------------------
/ipv8/test/keyvault/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/keyvault/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/keyvault/test_serialization.py:
--------------------------------------------------------------------------------
1 | from base64 import decodebytes
2 | from typing import cast
3 |
4 | from ...keyvault.crypto import default_eccrypto
5 | from ...keyvault.private.libnaclkey import LibNaCLSK
6 | from ...keyvault.private.m2crypto import M2CryptoSK
7 | from ...keyvault.public.m2crypto import M2CryptoPK
8 | from ..base import TestBase
9 |
10 |
11 | class TestSerialization(TestBase):
12 | """
13 | Test whether keys can be serialized and unserialized correctly.
14 | """
15 |
16 | def setUp(self) -> None:
17 | """
18 | Create a M2Crypto private key and a LibNaCL private key.
19 | """
20 | super().setUp()
21 | self.ec = default_eccrypto
22 | self.key = cast(M2CryptoSK, self.ec.generate_key("very-low"))
23 | self.key_nacl = cast(LibNaCLSK, self.ec.generate_key("curve25519"))
24 |
25 | def test_private_to_bin(self) -> None:
26 | """
27 | Check if M2Crypto derived key bins are valid.
28 | """
29 | private_bin = self.key.key_to_bin()
30 |
31 | self.assertTrue(self.ec.is_valid_private_bin(private_bin))
32 |
33 | def test_private_nacl_to_bin(self) -> None:
34 | """
35 | Check if libnacl derived key bins are valid.
36 | """
37 | private_bin = self.key_nacl.key_to_bin()
38 |
39 | self.assertTrue(self.ec.is_valid_private_bin(private_bin))
40 |
41 | def test_private_to_pem(self) -> None:
42 | """
43 | Check if keys can be serialized and loaded correctly in PEM format.
44 | """
45 | private_pem = self.key.key_to_pem()
46 |
47 | # Convert the PEM to a DER keystring
48 | prefix = "-----BEGIN EC PRIVATE KEY-----\n"
49 | postfix = "-----END EC PRIVATE KEY-----\n"
50 | keystring = decodebytes(private_pem[len(prefix):-len(postfix)])
51 |
52 | # Reconstruct a key with this keystring
53 | key = M2CryptoSK(keystring=keystring)
54 |
55 | self.assertEqual(private_pem, key.key_to_pem())
56 |
57 | def test_public_to_bin(self) -> None:
58 | """
59 | Check if M2Crypto derived public key bins are valid.
60 | """
61 | public_bin = self.key.pub().key_to_bin()
62 |
63 | self.assertTrue(self.ec.is_valid_public_bin(public_bin))
64 |
65 | def test_public_nacl_to_bin(self) -> None:
66 | """
67 | Check if libnacl derived public key bins are valid.
68 | """
69 | public_bin = self.key_nacl.pub().key_to_bin()
70 |
71 | self.assertTrue(self.ec.is_valid_public_bin(public_bin))
72 |
73 | def test_public_to_pem(self) -> None:
74 | """
75 | Check if public keys can be serialized and loaded correctly in PEM format.
76 | """
77 | public_pem = self.key.pub().key_to_pem()
78 |
79 | # Convert the PEM to a DER keystring
80 | prefix = "-----BEGIN PUBLIC KEY-----\n"
81 | postfix = "-----END PUBLIC KEY-----\n"
82 | keystring = decodebytes(public_pem[len(prefix):-len(postfix)])
83 |
84 | # Reconstruct a key with this keystring
85 | key = M2CryptoPK(keystring=keystring)
86 |
87 | self.assertEqual(public_pem, key.key_to_pem())
88 |
--------------------------------------------------------------------------------
/ipv8/test/keyvault/test_signature.py:
--------------------------------------------------------------------------------
1 | from ...keyvault.crypto import default_eccrypto
2 | from ..base import TestBase
3 |
4 |
5 | class TestSignatures(TestBase):
6 | """
7 | Test whether signatures can be created and then decoded correctly.
8 | """
9 |
10 | def setUp(self) -> None:
11 | """
12 | Generate fake data to test with.
13 | """
14 | super().setUp()
15 | self.ec = default_eccrypto
16 | self.data = bytes(range(256))
17 |
18 | def test_vlow(self) -> None:
19 | """
20 | Check if very-low security keys generate a valid signature.
21 | """
22 | key = self.ec.generate_key("very-low")
23 |
24 | signature = key.signature(self.data)
25 |
26 | self.assertTrue(self.ec.is_valid_signature(key.pub(), self.data, signature))
27 |
28 | def test_low(self) -> None:
29 | """
30 | Check if low security keys generate a valid signature.
31 | """
32 | key = self.ec.generate_key("low")
33 |
34 | signature = key.signature(self.data)
35 |
36 | self.assertTrue(self.ec.is_valid_signature(key.pub(), self.data, signature))
37 |
38 | def test_medium(self) -> None:
39 | """
40 | Check if medium security keys generate a valid signature.
41 | """
42 | key = self.ec.generate_key("medium")
43 |
44 | signature = key.signature(self.data)
45 |
46 | self.assertTrue(self.ec.is_valid_signature(key.pub(), self.data, signature))
47 |
48 | def test_high(self) -> None:
49 | """
50 | Check if high security keys generate a valid signature.
51 | """
52 | key = self.ec.generate_key("high")
53 |
54 | signature = key.signature(self.data)
55 |
56 | self.assertTrue(self.ec.is_valid_signature(key.pub(), self.data, signature))
57 |
58 | def test_curve25519(self) -> None:
59 | """
60 | Check if curve25519 keys generate a valid signature.
61 | """
62 | key = self.ec.generate_key("curve25519")
63 |
64 | signature = key.signature(self.data)
65 |
66 | self.assertTrue(self.ec.is_valid_signature(key.pub(), self.data, signature))
67 |
68 | def test_invalid_m2crypto(self) -> None:
69 | """
70 | Check if an M2Crypto key detects an invalid signature.
71 | """
72 | key = self.ec.generate_key("very-low")
73 |
74 | signature = ""
75 |
76 | self.assertFalse(self.ec.is_valid_signature(key.pub(), self.data, signature))
77 |
78 | def test_invalid_nacl(self) -> None:
79 | """
80 | Check if an libnacl key detects an invalid signature.
81 | """
82 | key = self.ec.generate_key("curve25519")
83 |
84 | signature = ""
85 |
86 | self.assertFalse(self.ec.is_valid_signature(key.pub(), self.data, signature))
87 |
--------------------------------------------------------------------------------
/ipv8/test/messaging/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/messaging/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/messaging/anonymization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/messaging/anonymization/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/messaging/anonymization/mock.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from collections import defaultdict
4 | from typing import TYPE_CHECKING
5 |
6 | from ....dht.provider import DHTCommunityProvider
7 |
8 | if TYPE_CHECKING:
9 | from ....messaging.anonymization.tunnel import IntroductionPoint
10 | from ....types import Peer
11 |
12 | # Map of info_hash -> peer list
13 | global_dht_services = defaultdict(list)
14 |
15 |
16 | class MockDHTProvider(DHTCommunityProvider):
17 | """
18 | A mocked provider for DHT info.
19 | """
20 |
21 | def __init__(self, peer: Peer) -> None:
22 | """
23 | Our peer to register in the mocked DHT.
24 | """
25 | super().__init__(None, 0)
26 | self.peer = peer
27 | # DHTDiscoveryCommunity functionality
28 | global_dht_services[peer.mid].append(peer)
29 |
30 | async def peer_lookup(self, mid: bytes, peer: Peer | None = None) -> None:
31 | """
32 | Look for peers with the corresponding mid.
33 | """
34 | return await self.lookup(mid)
35 |
36 | async def lookup(self, info_hash: bytes) -> tuple[bytes, list[IntroductionPoint]] | None:
37 | """
38 | Look for peers providing generic SHA-1 resources.
39 | """
40 | return info_hash, global_dht_services.get(info_hash, [])
41 |
42 | async def announce(self, info_hash: bytes, intro_point: IntroductionPoint) -> None:
43 | """
44 | Announce that a certain peer is serving a given SHA-1 resource.
45 | """
46 | global_dht_services[info_hash].append(intro_point)
47 |
--------------------------------------------------------------------------------
/ipv8/test/messaging/anonymization/test_datachecker.py:
--------------------------------------------------------------------------------
1 | from binascii import unhexlify
2 |
3 | from ....messaging.anonymization.exit_socket import DataChecker
4 | from ...base import TestBase
5 |
6 | tracker_pkt = unhexlify('00000417271019800000000012345678')
7 | dht_pkt = b'd1:ad2:id20:abcdefghij01234567899:info_hash20:mnopqrstuvwxyz123456e1:q9:get_peers1:t2:aa1:y1:qe'
8 | utp_pkt = unhexlify('210086446ed69ec1ddbd9e6000100000f32e86be')
9 | utp_ext3_pkt = unhexlify('110309d69087c1e7b69c0980001000009868b984000400000008')
10 | ipv8_pkt = unhexlify('0002123456789abcdef123456789abcdef123456789a00000001')
11 | tunnel_pkt = unhexlify('000281ded07332bdc775aa5a46f96de9f8f390bbc9f300000001')
12 |
13 |
14 | class TestDataChecker(TestBase):
15 | """
16 | Tests related to the DataChecker.
17 | """
18 |
19 | def test_could_be_dht(self) -> None:
20 | """
21 | Check if a DHT packet is correctly identified.
22 | """
23 | self.assertFalse(DataChecker.could_be_dht(tracker_pkt))
24 | self.assertTrue(DataChecker.could_be_dht(dht_pkt))
25 | self.assertFalse(DataChecker.could_be_dht(utp_pkt))
26 | self.assertFalse(DataChecker.could_be_dht(ipv8_pkt))
27 | self.assertFalse(DataChecker.could_be_dht(tunnel_pkt))
28 |
29 | def test_could_be_udp_tracker(self) -> None:
30 | """
31 | Check if a UDP tracker packet is correctly identified.
32 | """
33 | self.assertTrue(DataChecker.could_be_udp_tracker(tracker_pkt))
34 | self.assertFalse(DataChecker.could_be_udp_tracker(dht_pkt))
35 | self.assertFalse(DataChecker.could_be_udp_tracker(utp_pkt))
36 | self.assertFalse(DataChecker.could_be_udp_tracker(ipv8_pkt))
37 | self.assertFalse(DataChecker.could_be_udp_tracker(tunnel_pkt))
38 |
39 | def test_could_be_utp(self) -> None:
40 | """
41 | Check if a UTP packet is correctly identified.
42 | """
43 | self.assertFalse(DataChecker.could_be_utp(tracker_pkt))
44 | self.assertFalse(DataChecker.could_be_utp(dht_pkt))
45 | self.assertTrue(DataChecker.could_be_utp(utp_pkt))
46 | self.assertTrue(DataChecker.could_be_utp(utp_ext3_pkt)) # non-BEP29 extension 3 (close reason)
47 | self.assertFalse(DataChecker.could_be_utp(ipv8_pkt))
48 | self.assertFalse(DataChecker.could_be_utp(tunnel_pkt))
49 |
50 | def test_could_be_ipv8(self) -> None:
51 | """
52 | Check if a IPv8 packet is correctly identified.
53 | """
54 | self.assertFalse(DataChecker.could_be_ipv8(tracker_pkt))
55 | self.assertFalse(DataChecker.could_be_ipv8(dht_pkt))
56 | self.assertFalse(DataChecker.could_be_ipv8(utp_pkt))
57 | self.assertTrue(DataChecker.could_be_ipv8(ipv8_pkt))
58 | self.assertTrue(DataChecker.could_be_ipv8(tunnel_pkt))
59 |
60 | def test_could_be_bt(self) -> None:
61 | """
62 | Check if a BitTorrent packet is correctly identified.
63 | """
64 | self.assertTrue(DataChecker.could_be_bt(tracker_pkt))
65 | self.assertTrue(DataChecker.could_be_bt(dht_pkt))
66 | self.assertTrue(DataChecker.could_be_bt(utp_pkt))
67 | self.assertFalse(DataChecker.could_be_bt(ipv8_pkt))
68 | self.assertFalse(DataChecker.could_be_bt(tunnel_pkt))
69 |
--------------------------------------------------------------------------------
/ipv8/test/messaging/anonymization/test_exit_socket.py:
--------------------------------------------------------------------------------
1 | from binascii import unhexlify
2 | from unittest.mock import Mock
3 |
4 | from ....keyvault.private.libnaclkey import LibNaCLSK
5 | from ....messaging.anonymization.exit_socket import TunnelExitSocket
6 | from ....messaging.anonymization.tunnel import PEER_FLAG_EXIT_BT, PEER_FLAG_EXIT_IPV8
7 | from ....peer import Peer
8 | from ...base import TestBase
9 | from .test_datachecker import dht_pkt, ipv8_pkt, tracker_pkt, tunnel_pkt, utp_pkt
10 |
11 |
12 | class TestExitSocket(TestBase):
13 | """
14 | Tests related to the exit socket.
15 | """
16 |
17 | async def test_is_allowed(self) -> None:
18 | """
19 | Check if the ExitSocket correctly detects forbidden packets.
20 | """
21 | get_prefix = Mock(return_value=unhexlify('000281ded07332bdc775aa5a46f96de9f8f390bbc9f3'))
22 | overlay = Mock(get_prefix=get_prefix)
23 | exit_socket = TunnelExitSocket(0, Mock(peer=Peer(LibNaCLSK(b"\x00" * 64))), overlay)
24 |
25 | overlay.settings.peer_flags = {}
26 | self.assertFalse(exit_socket.is_allowed(tracker_pkt))
27 | self.assertFalse(exit_socket.is_allowed(dht_pkt))
28 | self.assertFalse(exit_socket.is_allowed(utp_pkt))
29 | self.assertFalse(exit_socket.is_allowed(ipv8_pkt))
30 | self.assertTrue(exit_socket.is_allowed(tunnel_pkt))
31 |
32 | overlay.settings.peer_flags = {PEER_FLAG_EXIT_BT}
33 | self.assertTrue(exit_socket.is_allowed(tracker_pkt))
34 | self.assertTrue(exit_socket.is_allowed(dht_pkt))
35 | self.assertTrue(exit_socket.is_allowed(utp_pkt))
36 | self.assertFalse(exit_socket.is_allowed(ipv8_pkt))
37 | self.assertTrue(exit_socket.is_allowed(tunnel_pkt))
38 |
39 | overlay.settings.peer_flags = {PEER_FLAG_EXIT_IPV8}
40 | self.assertFalse(exit_socket.is_allowed(tracker_pkt))
41 | self.assertFalse(exit_socket.is_allowed(dht_pkt))
42 | self.assertFalse(exit_socket.is_allowed(utp_pkt))
43 | self.assertTrue(exit_socket.is_allowed(ipv8_pkt))
44 | self.assertTrue(exit_socket.is_allowed(tunnel_pkt))
45 |
46 | overlay.settings.peer_flags = {PEER_FLAG_EXIT_BT, PEER_FLAG_EXIT_IPV8}
47 | self.assertTrue(exit_socket.is_allowed(tracker_pkt))
48 | self.assertTrue(exit_socket.is_allowed(dht_pkt))
49 | self.assertTrue(exit_socket.is_allowed(utp_pkt))
50 | self.assertTrue(exit_socket.is_allowed(ipv8_pkt))
51 | self.assertTrue(exit_socket.is_allowed(tunnel_pkt))
52 |
53 | await exit_socket.close()
54 |
--------------------------------------------------------------------------------
/ipv8/test/messaging/interfaces/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/messaging/interfaces/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/messaging/interfaces/dispatcher/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/messaging/interfaces/dispatcher/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/messaging/interfaces/lan_addresses/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/messaging/interfaces/lan_addresses/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/messaging/interfaces/lan_addresses/test_importshield.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import contextlib
4 | import io
5 | import sys
6 | from importlib.abc import MetaPathFinder
7 |
8 | from .....messaging.interfaces.lan_addresses.importshield import Platform, conditional_import_shield
9 | from ....base import TestBase
10 |
11 |
12 | class SegfaultingImporter(MetaPathFinder):
13 | """
14 | Importer that causes segfaults, scary!
15 | """
16 |
17 | def find_module(self, fullname: str, path: str) -> SegfaultingImporter | None:
18 | """
19 | Only serve imports from this class called "killer_import", as a safety feature.
20 | """
21 | if fullname == f"{self.__module__[:self.__module__.rindex('.')]}.killer_import":
22 | return self
23 | return None
24 |
25 | def load_module(self, _: str) -> None:
26 | """
27 | Cause a segfault when the module is actually loaded.
28 |
29 | We cannot simply raise an ``AssertionError`` here, as the import protection SHOULD also serve as a general
30 | ``try: ... except Exception: ...`` handler.
31 | """
32 | import ctypes
33 | return ctypes.cast(id(0), ctypes.POINTER(ctypes.c_char_p)).contents.value
34 |
35 |
36 | class TestImportShield(TestBase):
37 | """
38 | Tests relating to the import shield.
39 | """
40 |
41 | def test_stop_import(self) -> None:
42 | """
43 | Check that segfaulting imports are properly ignored when the platform does not match.
44 | """
45 | # Create an importable module ".killer_import" that segfaults the Python interpreter when imported.
46 | sys.meta_path.append(SegfaultingImporter())
47 |
48 | # The result value should remain unaltered if the import was properly ignored
49 | result = 42.0
50 |
51 | with conditional_import_shield(Platform.NONE, True):
52 | from .killer_import import ctypes
53 | result = ctypes.__version__ # We should've already segfaulted here, just in case: also change the result
54 |
55 | self.assertEqual(42.0, result)
56 |
57 | def test_allow_import(self) -> None:
58 | """
59 | Check that allowed imports are actually imported.
60 | """
61 | result = 0.0
62 |
63 | with conditional_import_shield(Platform.ANY, False):
64 | import math
65 | result = sum(math.frexp(80) * 8) - 19.0 # Does not work without the ``math`` import.
66 |
67 | self.assertEqual(42.0, result)
68 |
69 | def test_allow_import_log_exception(self) -> None:
70 | """
71 | Check that allowed imports are actually imported.
72 | """
73 | log = io.StringIO()
74 | with contextlib.redirect_stderr(log), conditional_import_shield(Platform.ANY, True):
75 | import math
76 | print(math.factorial(-1)) # This leads to an error that we should print. # noqa: T201
77 |
78 | self.assertNotEqual("", log.getvalue())
79 |
--------------------------------------------------------------------------------
/ipv8/test/messaging/interfaces/lan_addresses/test_interfaces.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import TYPE_CHECKING
4 |
5 | from .....messaging.interfaces.lan_addresses.addressprovider import AddressProvider
6 | from .....messaging.interfaces.lan_addresses.interfaces import get_lan_addresses
7 | from ....base import TestBase
8 |
9 | if TYPE_CHECKING:
10 | from collections.abc import Collection
11 |
12 |
13 | class PresetProvider(AddressProvider):
14 | """
15 | Provider that returns a specific value.
16 | """
17 |
18 | def __init__(self, return_value: set[str]) -> None:
19 | """
20 | Create a new provider with a preset return value.
21 | """
22 | super().__init__()
23 | self.return_value = return_value
24 |
25 | def get_addresses(self) -> set[str]:
26 | """
27 | Return our preset return value.
28 | """
29 | return self.return_value
30 |
31 |
32 | class MockProviders:
33 | """
34 | Manage a list of providers with preset return values.
35 | """
36 |
37 | def __init__(self) -> None:
38 | """
39 | Initialize an empty list of providers.
40 | """
41 | super().__init__()
42 | self.return_values = []
43 |
44 | def set_return_values(self, return_values: Collection[set[str]]) -> None:
45 | """
46 | Initialize providers for the given return values.
47 | """
48 | self.return_values = [PresetProvider(return_value) for return_value in return_values]
49 |
50 | def get_providers(self, _: bool = False) -> list[PresetProvider]:
51 | """
52 | Get our providers.
53 | """
54 | return self.return_values
55 |
56 |
57 | class TestInterfaces(TestBase):
58 | """
59 | Tests related to the interface api.
60 | """
61 |
62 | def setUp(self) -> None:
63 | """
64 | Create mocked providers to test with.
65 | """
66 | super().setUp()
67 |
68 | self.mock_providers = MockProviders()
69 | get_lan_addresses.__globals__["get_providers"] = self.mock_providers.get_providers
70 |
71 | def test_aggregate_votes_none(self) -> None:
72 | """
73 | Check that aggregating no results with no results leads to no results.
74 | """
75 | self.mock_providers.set_return_values([set(), set()])
76 | self.assertListEqual([], get_lan_addresses())
77 |
78 | def test_aggregate_votes_one(self) -> None:
79 | """
80 | Check that aggregating one result with no results leads to one result.
81 | """
82 | self.mock_providers.set_return_values([{"1.2.3.4"}, set()])
83 | self.assertListEqual(["1.2.3.4"], get_lan_addresses())
84 |
85 | def test_aggregate_votes_many(self) -> None:
86 | """
87 | Check that aggregating two results with one results leads to two results, sorted on frequency.
88 | """
89 | self.mock_providers.set_return_values([{"5.6.7.8.9", "1.2.3.4"}, {"1.2.3.4"}])
90 | self.assertListEqual(["1.2.3.4", "5.6.7.8.9"], get_lan_addresses())
91 |
92 | def test_aggregate_votes_blacklisted(self) -> None:
93 | """
94 | Check that results do not include blacklisted IPs.
95 | """
96 | self.mock_providers.set_return_values([{"5.6.7.8.9", "127.0.0.1"},
97 | {"127.0.0.1", "127.0.1.1", "0.0.0.0", "255.255.255.255", "::1"}])
98 | self.assertListEqual(["5.6.7.8.9"], get_lan_addresses())
99 |
--------------------------------------------------------------------------------
/ipv8/test/messaging/interfaces/udp/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/messaging/interfaces/udp/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/mocking/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/mocking/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/mocking/community.py:
--------------------------------------------------------------------------------
1 | from ...community import CommunitySettings
2 | from ...keyvault.crypto import default_eccrypto
3 | from ...peer import Peer
4 | from ...peerdiscovery.community import DiscoveryCommunity
5 | from ...peerdiscovery.network import Network
6 | from .endpoint import AutoMockEndpoint
7 |
8 | DEFAULT_COMMUNITY_SETTINGS = CommunitySettings()
9 |
10 |
11 | class MockCommunity(DiscoveryCommunity):
12 | """
13 | Semi-inert version of the DiscoveryCommunity for testing.
14 | """
15 |
16 | def __init__(self, settings: CommunitySettings = DEFAULT_COMMUNITY_SETTINGS) -> None:
17 | """
18 | Create a new MockCommunity.
19 | """
20 | endpoint = AutoMockEndpoint()
21 | endpoint.open()
22 |
23 | settings = CommunitySettings(
24 | endpoint=endpoint,
25 | network=Network(),
26 | my_peer=Peer(default_eccrypto.generate_key("very-low"), endpoint.wan_address)
27 | )
28 | super().__init__(settings)
29 | # workaround for race conditions in deliver_messages
30 | self._use_main_thread = False
31 | self.my_estimated_lan = endpoint.lan_address
32 | self.my_estimated_wan = endpoint.wan_address
33 |
--------------------------------------------------------------------------------
/ipv8/test/mocking/discovery.py:
--------------------------------------------------------------------------------
1 | from ...peerdiscovery.discovery import DiscoveryStrategy
2 |
3 |
4 | class MockWalk(DiscoveryStrategy):
5 | """
6 | Walker that connects to a random pre-known peer every step.
7 | """
8 |
9 | def take_step(self) -> None:
10 | """
11 | Walk to a random verified peer.
12 | """
13 | for peer in self.overlay.network.verified_peers:
14 | self.overlay.walk_to(peer.address)
15 |
--------------------------------------------------------------------------------
/ipv8/test/mocking/exit_socket.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import TYPE_CHECKING
4 |
5 | from ...messaging.anonymization.exit_socket import DataChecker, TunnelExitSocket
6 | from ...messaging.interfaces.endpoint import EndpointListener
7 | from ..mocking.endpoint import AutoMockEndpoint
8 |
9 | if TYPE_CHECKING:
10 | from ...types import Address
11 |
12 |
13 | class MockTunnelExitSocket(TunnelExitSocket, EndpointListener):
14 | """
15 | Mocked TunnelExitsocket that uses a mock endpoint.
16 | """
17 |
18 | def __init__(self, parent: TunnelExitSocket) -> None:
19 | """
20 | Wrap a tunnel exit socket to route it through the fake IPv8-only internet.
21 | """
22 | self.endpoint = AutoMockEndpoint()
23 | self.endpoint.open()
24 | self.parent = parent
25 |
26 | TunnelExitSocket.__init__(self, parent.circuit_id, parent.hop, parent.overlay)
27 | EndpointListener.__init__(self, self.endpoint, main_thread=False)
28 |
29 | self.endpoint.add_listener(self)
30 |
31 | def enable(self) -> None:
32 | """
33 | Set this exit node to enabled.
34 | """
35 | self.enabled = True
36 |
37 | def sendto(self, data: bytes, destination: Address) -> None:
38 | """
39 | Send data through to another mock endpoint's address.
40 | """
41 | if DataChecker.could_be_bt(data) or DataChecker.could_be_ipv8(data):
42 | self.endpoint.send(destination, data)
43 | else:
44 | msg = f"Attempted to exit data which is not allowed: {data!r}"
45 | raise AssertionError(msg)
46 |
47 | def on_packet(self, packet: tuple[Address, bytes]) -> None:
48 | """
49 | Callback for when data is received.
50 | """
51 | source_address, data = packet
52 | self.datagram_received(data, source_address)
53 |
54 | async def close(self) -> None:
55 | """
56 | Close our fake exit socket.
57 | """
58 | await self.shutdown_task_manager()
59 | await self.parent.close()
60 |
--------------------------------------------------------------------------------
/ipv8/test/peerdiscovery/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/ipv8/test/peerdiscovery/__init__.py
--------------------------------------------------------------------------------
/ipv8/test/test_database.py:
--------------------------------------------------------------------------------
1 | from ..database import Database
2 | from .base import TestBase
3 |
4 |
5 | class MockDatabase(Database):
6 | """
7 | Database that only creates the bare minimum versioning.
8 | """
9 |
10 | def check_database(self, database_version: bytes) -> int:
11 | """
12 | Inject a database version of 0 and succeed.
13 | """
14 | self.execute("CREATE TABLE option(key TEXT PRIMARY KEY, value BLOB)")
15 | self.execute("INSERT INTO option(key, value) VALUES('database_version', '0')")
16 | self.commit()
17 | return 0
18 |
19 |
20 | class TestDatabase(TestBase):
21 | """
22 | Tests related to the database class.
23 | """
24 |
25 | def setUp(self) -> None:
26 | """
27 | Create a memory-based database.
28 | """
29 | super().setUp()
30 | self.database = MockDatabase(":memory:")
31 |
32 | def test_unloaded(self) -> None:
33 | """
34 | Check if an unloaded database returns None for queries.
35 | """
36 | self.assertIsNone(self.database.execute("SELECT * FROM option"))
37 |
38 | def test_closed(self) -> None:
39 | """
40 | Check if an unloaded database returns None for queries.
41 | """
42 | self.database.open()
43 | self.assertListEqual([(b'database_version', b'0')], list(self.database.execute("SELECT * FROM option")))
44 | self.database.close(True)
45 | self.assertIsNone(self.database.execute("SELECT * FROM option"))
46 |
--------------------------------------------------------------------------------
/ipv8/util.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | import operator
4 | import signal
5 | import struct
6 | from asyncio import Event, Future, iscoroutine
7 | from typing import TYPE_CHECKING, Any, Callable, TypeVar
8 |
9 | if TYPE_CHECKING:
10 | from collections.abc import Awaitable, Coroutine
11 |
12 | maximum_integer = 2147483647
13 |
14 | int2byte = struct.Struct(">B").pack
15 | byte2int = operator.itemgetter(0)
16 |
17 | T = TypeVar("T")
18 |
19 |
20 | def succeed(result: T) -> Future[T]:
21 | """
22 | Convert a value to a future with the value set as the result.
23 | """
24 | future: Future[T] = Future()
25 | future.set_result(result)
26 | return future
27 |
28 |
29 | def fail(exception: type | BaseException) -> Future:
30 | """
31 | Return a future with the given exception set as its exception.
32 | """
33 | future: Future = Future()
34 | future.set_exception(exception)
35 | return future
36 |
37 |
38 | def maybe_coroutine(func: Callable, *args: Any, **kwargs) -> Awaitable: # noqa: ANN401
39 | """
40 | Ensure the return value of a callable is awaitable.
41 | """
42 | value = func(*args, **kwargs)
43 | if iscoroutine(value) or isinstance(value, Future):
44 | return value
45 |
46 | async def coro(): # noqa: ANN202
47 | return value
48 | return coro()
49 |
50 |
51 | def coroutine(func: Callable) -> Callable[[tuple[Any, ...], dict[str, Any]], Coroutine[Any, Any, Awaitable]]:
52 | """
53 | Ensure that the given callable is awaitable.
54 | """
55 | async def call_async(*args: Any, **kwargs) -> Awaitable: # noqa: ANN401
56 | return func(*args, **kwargs)
57 | return call_async
58 |
59 |
60 | def strip_sha1_padding(s: bytes) -> bytes:
61 | """
62 | Strip the artificial SHA-1 prefix to make it the same byte space as SHA3-256.
63 | """
64 | return s[12:] if s.startswith(b'SHA-1\x00\x00\x00\x00\x00\x00\x00') else s
65 |
66 |
67 | def create_event_with_signals(*args: int) -> Event:
68 | """
69 | Creates an event that gets set when certain signals are received. If signals are omitted,
70 | the signals SIGINT and SIGTERM will be used. If you don't need access to the event itself,
71 | the use of run_forever is preferred.
72 |
73 | :param args: signals after which the event should be set
74 | :type args: [int]
75 | :rtype: asyncio.Event
76 | """
77 | event = Event()
78 |
79 | for sig in (args or (signal.SIGINT, signal.SIGTERM)):
80 | signal.signal(sig, lambda _, __: event.set())
81 |
82 | return event
83 |
84 |
85 | def run_forever() -> Coroutine:
86 | """
87 | Helper function for waiting until the user presses Ctrl+C. Commonly used
88 | for keeping an application alive until shutdown.
89 |
90 | :rtype: coroutine
91 | """
92 | return create_event_with_signals().wait()
93 |
--------------------------------------------------------------------------------
/mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | ignore_missing_imports = True
3 |
4 | [mypy-ipv8_service]
5 | follow_imports = skip
6 |
7 | [mypy-ipv8.attestation.wallet.irmaexact.enroll_script]
8 | ignore_errors = True
9 |
10 | [mypy-ipv8.test.*]
11 | ignore_errors = True
12 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | cryptography
2 | libnacl
3 | aiohttp
4 | aiohttp_apispec>=3.0.0b1
5 | pyOpenSSL
6 | pyasn1
7 | marshmallow
8 | typing-extensions
9 | packaging
10 | apispec>=6.0.0
11 |
--------------------------------------------------------------------------------
/scripts/__scriptpath__.py:
--------------------------------------------------------------------------------
1 | import pathlib
2 | import sys
3 |
4 | sys.path.insert(1, str(pathlib.Path(__file__, "..", "..").resolve()))
5 |
--------------------------------------------------------------------------------
/scripts/ipv8_plugin.py:
--------------------------------------------------------------------------------
1 | """
2 | This script enables to start IPv8 headless.
3 | """
4 | from __future__ import annotations
5 |
6 | import argparse
7 | import ssl
8 | import sys
9 | from asyncio import run
10 |
11 | # Check if we are running from the root directory
12 | # If not, modify our path so that we can import IPv8
13 | try:
14 | import ipv8
15 | del ipv8
16 | except ImportError:
17 | import __scriptpath__ # noqa: F401
18 |
19 |
20 | from ipv8.configuration import get_default_configuration
21 | from ipv8.REST.rest_manager import RESTManager
22 | from ipv8.util import run_forever
23 | from ipv8_service import IPv8
24 |
25 |
26 | class IPV8Service:
27 | """
28 | Service to orchestrate an IPv8 instance and a REST API.
29 | """
30 |
31 | def __init__(self) -> None:
32 | """
33 | Initialize the variables of the IPV8Service.
34 | """
35 | self.ipv8 = None
36 | self.restapi = None
37 |
38 | async def start_ipv8(self, statistics: bool, no_rest_api: bool, api_key: str | None, cert_file: str) -> None:
39 | """
40 | Main method to startup IPv8.
41 | """
42 | print("Starting IPv8") # noqa: T201
43 |
44 | self.ipv8 = IPv8(get_default_configuration(), enable_statistics=statistics)
45 | await self.ipv8.start()
46 |
47 | if not no_rest_api:
48 | # Load the certificate/key file. A new one can be generated as follows:
49 | # openssl req \
50 | # -newkey rsa:2048 -nodes -keyout private.key \
51 | # -x509 -days 365 -out certfile.pem
52 | # cat private.key >> certfile.pem
53 | # rm private.key
54 | ssl_context = None
55 | if cert_file:
56 | ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
57 | ssl_context.load_cert_chain(cert_file)
58 |
59 | self.restapi = RESTManager(self.ipv8)
60 | await self.restapi.start(api_key=api_key, ssl_context=ssl_context)
61 |
62 | async def stop_ipv8(self) -> None:
63 | """
64 | Stop the service.
65 | """
66 | print("Stopping IPv8") # noqa: T201
67 |
68 | if self.restapi:
69 | await self.restapi.stop()
70 | if self.ipv8:
71 | await self.ipv8.stop()
72 |
73 |
74 | async def main(argv: list[str]) -> None:
75 | """
76 | Create a service to run IPv8 and a REST API from some given commandline arguments.
77 | """
78 | parser = argparse.ArgumentParser(description='Starts IPv8 as a service')
79 | parser.add_argument('--statistics', '-s', action='store_true', help='Enable IPv8 overlay statistics')
80 | parser.add_argument('--no-rest-api', '-a', action='store_true', help='Autonomous: disable the REST api')
81 | parser.add_argument('--api-key', '-k', help='API key to use. If not given API key protection is disabled.')
82 | parser.add_argument('--cert-file', '-c', help='Path to combined certificate/key file. If not given HTTP is used.')
83 |
84 | args = parser.parse_args(sys.argv[1:])
85 | service = IPV8Service()
86 |
87 | await service.start_ipv8(args.statistics, args.no_rest_api, args.api_key, args.cert_file)
88 | await run_forever()
89 | await service.stop_ipv8()
90 |
91 |
92 | if __name__ == "__main__":
93 | run(main(sys.argv[1:]))
94 |
--------------------------------------------------------------------------------
/scripts/tracker_plugin.py:
--------------------------------------------------------------------------------
1 | """
2 | This script enables to start the tracker.
3 |
4 | Select the port you want to use by setting the `listen_port` command line argument.
5 | """
6 | import argparse
7 | import sys
8 | from asyncio import run
9 |
10 | # Check if we are running from the root directory
11 | # If not, modify our path so that we can import IPv8
12 | try:
13 | import ipv8
14 | del ipv8
15 | except ImportError:
16 | import __scriptpath__ # noqa: F401
17 |
18 | from tracker_service import TrackerService
19 |
20 | from ipv8.util import run_forever
21 |
22 |
23 | async def main() -> None:
24 | """
25 | Start a tracker service with some given commandline arguments.
26 | """
27 | parser = argparse.ArgumentParser(add_help=False,
28 | description='IPv8 tracker plugin')
29 | parser.add_argument('--help', '-h', action='help',
30 | default=argparse.SUPPRESS,
31 | help='Show this help message and exit')
32 | parser.add_argument('--listen_port', '-p', default=8090, type=int,
33 | help='Use an alternative IPv8 port')
34 | parser.add_argument('--listen_port_api', '-a', default=-1, type=int,
35 | help='Use an alternative API port')
36 | parser.add_argument('--api_key', '-k',
37 | help='API key to use. If not given API key protection is disabled.')
38 | parser.add_argument('--cert_file', '-c',
39 | help='Path to combined certificate/key file. If not given HTTP is used.')
40 |
41 | args = parser.parse_args(sys.argv[1:])
42 |
43 | service = TrackerService()
44 | await service.start_tracker(args.listen_port)
45 | if args.listen_port_api >= 0:
46 | await service.start_api(args.listen_port_api, args.api_key, args.cert_file)
47 | await run_forever()
48 | await service.shutdown()
49 |
50 |
51 | if __name__ == "__main__":
52 | run(main())
53 |
--------------------------------------------------------------------------------
/scripts/tracker_reporter_plugin.py:
--------------------------------------------------------------------------------
1 | """
2 | This script enables to start the tracker which reports anonymized statistics.
3 |
4 | Select the port you want to use by setting the `listen_port` command line argument.
5 | """
6 | import argparse
7 | import sys
8 | from asyncio import run
9 |
10 | from trackermetricsreporter import MetricsReporter
11 |
12 | # Check if we are running from the root directory
13 | # If not, modify our path so that we can import IPv8
14 | try:
15 | import ipv8
16 | del ipv8
17 | except ImportError:
18 | import __scriptpath__ # noqa: F401
19 |
20 | from tracker_service import EndpointServer, TrackerService
21 |
22 | from ipv8.types import Address, Endpoint, Peer
23 | from ipv8.util import run_forever
24 |
25 |
26 | class ReportingEndpointServer(EndpointServer):
27 | """
28 | Extend the tracker community by adding a reporter that listens in on all incoming introduction requests.
29 | """
30 |
31 | def __init__(self, endpoint: Endpoint, reporter: MetricsReporter) -> None:
32 | """
33 | Create a new server that notifies the given reporter.
34 | """
35 | super().__init__(endpoint)
36 | self.reporter = reporter
37 |
38 | def on_peer_introduction_request(self, peer: Peer, source_address: Address, service_id: bytes) -> None:
39 | """
40 | Callback for when a peer has sent an introduction request.
41 | """
42 | self.reporter.count_peer(peer.mid, source_address, service_id)
43 |
44 |
45 | class ReportingTrackerService(TrackerService):
46 | """
47 | Extend the tracker service by adding a reporter that listens in on all incoming introduction requests.
48 | """
49 |
50 | def __init__(self, reporter: MetricsReporter) -> None:
51 | """
52 | Create a new service that notifies the given reporter.
53 | """
54 | super().__init__()
55 | self.reporter = reporter
56 |
57 | def create_endpoint_server(self) -> EndpointServer:
58 | """
59 | Instantiate our reporting Community.
60 | """
61 | return ReportingEndpointServer(self.endpoint, self.reporter)
62 |
63 |
64 | async def main() -> None:
65 | """
66 | Start an reporting tracker service with some given commandline arguments.
67 | """
68 | parser = argparse.ArgumentParser(
69 | add_help=False,
70 | description='IPv8 tracker plugin which reports anonymized stats')
71 | parser.add_argument('--help', '-h', action='help',
72 | default=argparse.SUPPRESS,
73 | help='Show this help message and exit')
74 | parser.add_argument('--listen_port', '-p', default=8090, type=int,
75 | help='Use an alternative port')
76 |
77 | args = parser.parse_args(sys.argv[1:])
78 | listen_port = args.listen_port
79 |
80 | reporter = MetricsReporter(listen_port)
81 | service = ReportingTrackerService(reporter)
82 |
83 | await service.start_tracker(listen_port)
84 | reporter.start()
85 |
86 | await run_forever()
87 |
88 | await service.shutdown()
89 | reporter.shutdown()
90 |
91 |
92 | if __name__ == "__main__":
93 | run(main())
94 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import find_packages, setup
2 |
3 | with open("README.md") as fh:
4 | long_description = fh.read()
5 |
6 | setup(
7 | name='pyipv8',
8 | author='Tribler',
9 | description='The Python implementation of the IPV8 library',
10 | long_description=long_description,
11 | long_description_content_type='text/markdown',
12 | version='3.0.0', # Do not change manually! Handled by github_increment_version.py
13 | url='https://github.com/Tribler/py-ipv8',
14 | package_data={'': ['*.*']},
15 | packages=find_packages(),
16 | py_modules=['ipv8_service'],
17 | install_requires=[
18 | "cryptography",
19 | "libnacl",
20 | "aiohttp",
21 | "aiohttp_apispec",
22 | "pyOpenSSL",
23 | "pyasn1",
24 | "marshmallow",
25 | "typing-extensions",
26 | "packaging"
27 | ],
28 | extras_require={
29 | "all": ["coverage"],
30 | "tests": ["coverage"]
31 | },
32 | classifiers=[
33 | "Development Status :: 5 - Production/Stable",
34 | "Intended Audience :: Developers",
35 | "License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)",
36 | "Natural Language :: English",
37 | "Operating System :: OS Independent",
38 | "Programming Language :: Python :: 3.9",
39 | "Programming Language :: Python :: 3.10",
40 | "Programming Language :: Python :: 3.11",
41 | "Programming Language :: Python :: 3.12",
42 | "Programming Language :: Python :: 3.13",
43 | "Topic :: Scientific/Engineering",
44 | "Topic :: Software Development :: Libraries :: Python Modules",
45 | "Topic :: System :: Distributed Computing",
46 | "Topic :: System :: Networking"
47 | ]
48 | )
49 |
--------------------------------------------------------------------------------
/stresstest/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Tribler/py-ipv8/689cdecbeb63eef4f681987eb8efd3ba0e88f71f/stresstest/__init__.py
--------------------------------------------------------------------------------
/stresstest/__scriptpath__.py:
--------------------------------------------------------------------------------
1 | import pathlib
2 | import sys
3 |
4 | sys.path.insert(1, str(pathlib.Path(__file__, "..", "..").resolve()))
5 |
--------------------------------------------------------------------------------
/stresstest/bootstrap_introductions.r:
--------------------------------------------------------------------------------
1 | library(ggplot2)
2 |
3 | bootstrap_introductions <- read.table("bootstrap_introductions.txt", header=T, quote="\"")
4 | p <- ggplot(data=bootstrap_introductions, aes(x=Address, y=Peers, fill=factor(Type))) +
5 | geom_bar(position=position_dodge2(reverse=TRUE, width=0.8), width=0.7, stat="identity") +
6 | coord_flip() +
7 | ggtitle("Number of addresses discovered while bootstrapping") +
8 | scale_x_discrete(limits=rev(sort(unique(bootstrap_introductions$Address)))) +
9 | scale_fill_discrete(labels=c("Total WAN addresses", "Unique WAN addresses", "Reachable WAN addresses", "Total LAN addresses")) +
10 | theme(legend.title=element_blank())
11 | p
12 | ggsave("bootstrap_introductions.png", width=10, height=6, dpi=100)
13 | q(save="no")
14 |
--------------------------------------------------------------------------------
/systemd/ipv8-exit-node@.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=IPv8 exit node listening at port %i
3 |
4 | [Service]
5 | ProtectSystem=yes
6 |
7 | Environment="PYTHONPATH=/opt/ipv8"
8 |
9 | WorkingDirectory=/opt/ipv8
10 |
11 | ExecStartPre=/bin/mkdir -p ${HOME}/%i
12 | ExecStart=/usr/bin/python3 scripts/exitnode_ipv8_only_plugin.py --listen_port=%i --statedir=${HOME}/%i
13 |
14 | User=ipv8_exitnode
15 | Group=ipv8_exitnode
16 |
17 | Restart=always
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
--------------------------------------------------------------------------------
/systemd/ipv8-tracker@.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=IPv8 tracker listening at port %i
3 |
4 | [Service]
5 | ProtectSystem=yes
6 |
7 | Environment="PYTHONPATH=/opt/ipv8"
8 |
9 | WorkingDirectory=/opt/ipv8
10 |
11 | ExecStartPre=/bin/mkdir -p ${HOME}/%i
12 | ExecStart=/usr/bin/python3 scripts/tracker_plugin.py --listen_port=%i
13 |
14 | User=ipv8_tracker
15 | Group=ipv8_tracker
16 |
17 | Restart=always
18 |
19 | [Install]
20 | WantedBy=multi-user.target
21 |
--------------------------------------------------------------------------------