├── wgkex
├── __init__.py
├── broker
│ ├── __init__.py
│ ├── templates
│ │ └── index.html
│ ├── BUILD
│ ├── metrics.py
│ ├── metrics_test.py
│ └── app.py
├── common
│ ├── __init__.py
│ ├── mqtt.py
│ ├── BUILD
│ ├── utils_test.py
│ ├── logger.py
│ └── utils.py
├── worker
│ ├── __init__.py
│ ├── msg_queue.py
│ ├── BUILD
│ ├── app_test.py
│ ├── app.py
│ ├── mqtt_test.py
│ ├── mqtt.py
│ ├── netlink.py
│ └── netlink_test.py
└── config
│ ├── __init__.py
│ ├── BUILD
│ ├── config_test.py
│ └── config.py
├── CHANGELOG.md
├── CODEOWNERS
├── .github
├── buildkitd.toml
├── workflows
│ ├── black.yml
│ ├── pylint.yml
│ ├── bazelversion.yml
│ ├── bazel.yml
│ ├── build.yml
│ └── publish.yml
└── dependabot.yml
├── .bazelrc
├── Docs
├── architecture.png
├── architecture-dark.png
└── architecture.dot
├── .bazelversion
├── .coveragerc
├── requirements.txt
├── BUILD
├── docker-compose.override.yaml.example
├── env.example
├── MODULE.bazel
├── Dockerfile
├── entrypoint
├── wgkex.yaml.example
├── docker-compose.yml
├── .gitignore
├── README.md
├── MODULE.bazel.lock
├── requirements_lock.txt
└── LICENSE
/wgkex/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ### latest
2 |
--------------------------------------------------------------------------------
/wgkex/broker/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/wgkex/common/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/wgkex/worker/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @freifunkMUC/salt-stack
2 |
--------------------------------------------------------------------------------
/.github/buildkitd.toml:
--------------------------------------------------------------------------------
1 | [worker.oci]
2 | max-parallelism = 4
3 |
--------------------------------------------------------------------------------
/.bazelrc:
--------------------------------------------------------------------------------
1 | # Enable Bzlmod for every Bazel command
2 | common --enable_bzlmod
3 |
--------------------------------------------------------------------------------
/Docs/architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/freifunkMUC/wgkex/HEAD/Docs/architecture.png
--------------------------------------------------------------------------------
/.bazelversion:
--------------------------------------------------------------------------------
1 | 8.4.2
2 | # make sure that the image in the Dockerfile and this version are identical
3 |
--------------------------------------------------------------------------------
/Docs/architecture-dark.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/freifunkMUC/wgkex/HEAD/Docs/architecture-dark.png
--------------------------------------------------------------------------------
/wgkex/config/__init__.py:
--------------------------------------------------------------------------------
1 | from wgkex.config.config import get_config
2 |
3 | __all__ = ["get_config"]
4 |
--------------------------------------------------------------------------------
/wgkex/broker/templates/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | wgkex
4 |
5 |
6 | WGKEX
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.coveragerc:
--------------------------------------------------------------------------------
1 | [paths]
2 | source = traceflow
3 |
4 | [run]
5 | branch = True
6 | source = traceflow
7 | parallel = true
8 |
9 | [report]
10 | show_missing = true
11 | precision = 2
12 | omit = *migrations*
13 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | NetLink~=0.1
2 | flask-mqtt
3 | pyroute2~=0.9.5
4 | PyYAML~=6.0.3
5 | Flask~=3.1.1
6 | waitress~=3.0.0
7 |
8 | # Common
9 | ipaddress~=1.0.23
10 | mock~=5.2.0
11 | coverage
12 | paho-mqtt~=2.1.0
13 |
--------------------------------------------------------------------------------
/BUILD:
--------------------------------------------------------------------------------
1 | load("@rules_python//python:pip.bzl", "compile_pip_requirements")
2 |
3 | compile_pip_requirements(
4 | name = "requirements",
5 | src = "//:requirements.txt",
6 | requirements_txt = "requirements_lock.txt",
7 | visibility = ["//visibility:public"],
8 | )
9 |
--------------------------------------------------------------------------------
/wgkex/common/mqtt.py:
--------------------------------------------------------------------------------
1 | """Common MQTT constants like topic string templates"""
2 |
3 | TOPIC_WORKER_WG_DATA = "wireguard-worker/{worker}/{domain}/data"
4 | TOPIC_WORKER_STATUS = "wireguard-worker/{worker}/status"
5 | CONNECTED_PEERS_METRIC = "connected_peers"
6 | TOPIC_CONNECTED_PEERS = "wireguard-metrics/{domain}/{worker}/" + CONNECTED_PEERS_METRIC
7 |
--------------------------------------------------------------------------------
/docker-compose.override.yaml.example:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 |
3 | services:
4 | mqtt:
5 | #profiles: ["do-not-start"]
6 | #volumes:
7 | # - ./config/mosquitto:/mosquitto/config
8 | broker:
9 | #profiles: ["do-not-start"]
10 | #volumes:
11 | # - ./config/broker/wgkex.yaml:/etc/wgkex.yaml
12 | worker:
13 | profiles: ["do-not-start"]
14 | #volumes:
15 | # - ./config/broker/wgkex.yaml:/etc/wgkex.yaml
16 |
--------------------------------------------------------------------------------
/env.example:
--------------------------------------------------------------------------------
1 | # Copy or rename this file to .env and modify if for your needs
2 |
3 | #WGKEX_DOMAINS="ffmuc_muc_cty, ffmuc_muc_nord, ffmuc_muc_ost, ffmuc_muc_sued, ffmuc_muc_west, ffmuc_welt, ffwert_city"
4 | #WGKEX_DOMAIN_PREFIXES="ffmuc_, ffdon_, ffwert_"
5 | #WGKEX_DEBUG="DEBUG"
6 |
7 | #MQTT_BROKER_URL="mqtt"
8 | #MQTT_BROKER_PORT="1883"
9 | #MQTT_USERNAME=""
10 | #MQTT_PASSWORD=""
11 | #MQTT_KEEPALIVE="5"
12 | #MQTT_TLS="False"
13 |
--------------------------------------------------------------------------------
/.github/workflows/black.yml:
--------------------------------------------------------------------------------
1 | name: Lint
2 |
3 | on: [push, pull_request]
4 |
5 | permissions:
6 | contents: read
7 |
8 | jobs:
9 | black:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
13 | - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0
14 | - uses: psf/black@af0ba72a73598c76189d6dd1b21d8532255d5942 # v25.9.0
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # Docs:
2 |
3 | version: 2
4 |
5 | updates:
6 | - package-ecosystem: pip
7 | directory: /
8 | schedule: {interval: monthly}
9 |
10 | - package-ecosystem: github-actions
11 | directory: /
12 | schedule: {interval: monthly}
13 |
14 | - package-ecosystem: docker
15 | directory: /
16 | schedule: {interval: monthly}
17 |
--------------------------------------------------------------------------------
/.github/workflows/pylint.yml:
--------------------------------------------------------------------------------
1 | on: [push, pull_request]
2 | name: pylint
3 |
4 | permissions:
5 | contents: read
6 | pull-requests: read
7 |
8 | jobs:
9 | gitHubActionForPylint:
10 | name: GitHub Action for pylint
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
14 | - uses: astral-sh/ruff-action@57714a7c8a2e59f32539362ba31877a1957dded1 # v3.5.1
15 | with:
16 | src: "./wgkex"
17 |
--------------------------------------------------------------------------------
/MODULE.bazel:
--------------------------------------------------------------------------------
1 | bazel_dep(name = "rules_python", version = "1.0.0-rc2")
2 |
3 | python = use_extension("@rules_python//python/extensions:python.bzl", "python")
4 | python.toolchain(
5 | configure_coverage_tool = True,
6 | python_version = "3.13",
7 | )
8 |
9 | pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip")
10 | pip.parse(
11 | hub_name = "pip",
12 | python_version = "3.13",
13 | requirements_lock = "//:requirements_lock.txt",
14 | )
15 | use_repo(pip, "pip")
16 |
--------------------------------------------------------------------------------
/wgkex/config/BUILD:
--------------------------------------------------------------------------------
1 | load("@rules_python//python:defs.bzl", "py_binary", "py_test")
2 | load("@pip//:requirements.bzl", "requirement")
3 |
4 |
5 | py_library(
6 | name="config",
7 | srcs=["config.py"],
8 | visibility=["//visibility:public"],
9 | deps=[requirement("PyYAML"),
10 | "//wgkex/common:utils",
11 | "//wgkex/common:logger",
12 | ],
13 | )
14 |
15 | py_test(
16 | name="config_test",
17 | srcs=["config_test.py"],
18 | deps=[
19 | "//wgkex/config:config",
20 | requirement("mock"),
21 | ],
22 | size="small",
23 | )
24 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM gcr.io/bazel-public/bazel:8.4.2 AS builder
2 | # Make sure .bazelversion and the version of image are identical
3 |
4 | WORKDIR /wgkex
5 |
6 | COPY .bazelrc BUILD MODULE.bazel MODULE.bazel.lock requirements_lock.txt ./
7 | COPY wgkex ./wgkex
8 |
9 | RUN ["bazel", "build", "//wgkex/broker:app"]
10 | RUN ["bazel", "build", "//wgkex/worker:app"]
11 | RUN ["cp", "-rL", "bazel-bin", "bazel"]
12 |
13 |
14 | FROM python:3.14.0-slim-bookworm
15 | WORKDIR /wgkex
16 |
17 | COPY --from=builder /wgkex/bazel /wgkex/
18 |
19 | COPY entrypoint /entrypoint
20 |
21 | EXPOSE 5000
22 |
23 | ENTRYPOINT ["/entrypoint"]
24 | CMD ["broker"]
25 |
--------------------------------------------------------------------------------
/wgkex/common/BUILD:
--------------------------------------------------------------------------------
1 | load("@rules_python//python:defs.bzl", "py_binary", "py_test")
2 | load("@pip//:requirements.bzl", "requirement")
3 |
4 |
5 | py_library(
6 | name = "utils",
7 | srcs = ["utils.py"],
8 | visibility = ["//visibility:public"],
9 | deps = [
10 | requirement("ipaddress"),
11 | ],
12 | )
13 |
14 | py_test(
15 | name = "utils_test",
16 | srcs = ["utils_test.py"],
17 | deps = [
18 | "//wgkex/common:utils",
19 | "//wgkex/config:config",
20 | requirement("mock"),
21 | ],
22 | size="small",
23 | )
24 |
25 | py_library(
26 | name = "logger",
27 | srcs = ["logger.py"],
28 | visibility = ["//visibility:public"]
29 | )
30 |
31 | py_library(
32 | name = "mqtt",
33 | srcs = ["mqtt.py"],
34 | visibility = ["//visibility:public"]
35 | )
36 |
--------------------------------------------------------------------------------
/wgkex/broker/BUILD:
--------------------------------------------------------------------------------
1 | load("@rules_python//python:defs.bzl", "py_binary", "py_test")
2 | load("@pip//:requirements.bzl", "requirement")
3 |
4 | py_library(
5 | name = "metrics",
6 | srcs = ["metrics.py"],
7 | visibility = ["//visibility:public"],
8 | deps = [
9 | "//wgkex/common:mqtt",
10 | "//wgkex/common:logger",
11 | "//wgkex/config:config",
12 | ],
13 | )
14 |
15 | py_test(
16 | name="metrics_test",
17 | srcs=["metrics_test.py"],
18 | deps = [
19 | "//wgkex/broker:metrics",
20 | requirement("mock"),
21 | ],
22 | size="small",
23 | )
24 |
25 | py_binary(
26 | name="app",
27 | srcs=["app.py"],
28 | data=["templates/index.html"],
29 | visibility=["//visibility:public"],
30 | deps=[
31 | requirement("flask"),
32 | requirement("flask-mqtt"),
33 | requirement("waitress"),
34 | "//wgkex/config:config",
35 | "//wgkex/common:mqtt",
36 | ":metrics"
37 | ],
38 | )
39 |
--------------------------------------------------------------------------------
/wgkex/common/utils_test.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from wgkex.common import utils
4 |
5 |
6 | class UtilsTest(unittest.TestCase):
7 | def test_mac2eui64_success(self):
8 | """Verify mac2eui64 can convert mac successfully."""
9 | ret = utils.mac2eui64("c4:91:0c:b2:c5:a0")
10 | self.assertEqual("c691:0cff:feb2:c5a0", ret)
11 |
12 | def test_mac2eui64_fails_bad_mac(self):
13 | """Verify mac2eui64 fails with bad mac address."""
14 | with self.assertRaises(ValueError):
15 | utils.mac2eui64("not_a_mac_address")
16 |
17 | def test_mac2eui64_success_with_prefix(self):
18 | """Verify mac2eui64 succeeds with prefix."""
19 | ret = utils.mac2eui64("c4:91:0c:b2:c5:a0", "FE80::/10")
20 | self.assertEqual("fe80::c691:cff:feb2:c5a0/10", ret)
21 |
22 | def test_mac2eui64_fails_bad_prefix(self):
23 | """Verify mac2eui64 fails with bad prefix."""
24 | with self.assertRaises(ValueError):
25 | utils.mac2eui64("c4:91:0c:b2:c5:a0", "not_ipv6_addr")
26 |
27 |
28 | if __name__ == "__main__":
29 | unittest.main()
30 |
--------------------------------------------------------------------------------
/entrypoint:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | : "${WGKEX_DOMAINS:=ffmuc_muc_cty, ffmuc_muc_nord, ffmuc_muc_ost, ffmuc_muc_sued, ffmuc_muc_west, ffmuc_welt, ffwert_city}"
5 | : "${WGKEX_DOMAIN_PREFIXES:=ffmuc_, ffdon_, ffwert_}"
6 | : "${WGKEX_DEBUG:=DEBUG}"
7 | : "${MQTT_BROKER_URL:=mqtt}"
8 | : "${MQTT_BROKER_PORT:=1883}"
9 | : "${MQTT_USERNAME:=}"
10 | : "${MQTT_PASSWORD:=}"
11 | : "${MQTT_KEEPALIVE:=5}"
12 | : "${MQTT_TLS:=False}"
13 |
14 | mk_config() {
15 | if [ ! -e /etc/wgkex.yaml ] ; then
16 | (
17 | echo "domains:"
18 | IFS=", "
19 | for i in $WGKEX_DOMAINS; do
20 | echo " - $i"
21 | done
22 | echo "domain_prefixes:"
23 | for i in $WGKEX_DOMAIN_PREFIXES; do
24 | echo " - $i"
25 | done
26 | cat < /etc/wgkex.yaml
38 | fi
39 | }
40 |
41 | mk_config
42 |
43 | case "$1" in
44 | broker)
45 | exec ./wgkex/broker/app
46 | ;;
47 | worker)
48 | exec ./wgkex/worker/app
49 | ;;
50 | esac
51 |
52 | exec "$@"
53 |
54 |
--------------------------------------------------------------------------------
/Docs/architecture.dot:
--------------------------------------------------------------------------------
1 | # http://www.graphviz.org/content/cluster
2 |
3 | digraph G {
4 | graph [fontname = "Handlee"];
5 | node [fontname = "Handlee"];
6 | edge [fontname = "Handlee"];
7 |
8 | bgcolor=transparent;
9 |
10 | subgraph wgkex_broker {
11 | style=filled;
12 | color=lightgrey;
13 | style="dotted"
14 | node [style=filled,color="#d8b365"];
15 | wgkex_broker [label="*WGKex Broker*"];
16 | fontsize = 20;
17 | }
18 |
19 | subgraph wgkex_mqtt {
20 | node [style=filled,color="#f5f5f5"];
21 | style="dotted"
22 | mqtt [label="*Mosquitto*"];
23 | fontsize = 20;
24 | color=blue
25 | }
26 |
27 | subgraph wgkex_worker {
28 | node [style=filled,color="#5ab4ac"];
29 | style="dotted"
30 | color=blue;
31 | label = "*WGKEX Worker*";
32 | fontsize = 20;
33 | color=blue
34 | mqtt -> wgkex_worker -> netlink -> wireguard -> vxlan [style=invis];
35 | wgkex_worker [label="*WGKex Worker*"];
36 | netlink [label="netlink\n(pyroute2)"];
37 | wireguard [label="wireguard\n(pyroute2)"];
38 | vxlan [label="vxlan FDB\n(pyroute2)"];
39 | }
40 | client -> wgkex_broker [label="RESTFul API"];
41 | wgkex_broker -> mqtt [label="publish"];
42 | wgkex_worker -> mqtt [label="Subscribe"];
43 | wgkex_worker -> netlink [label="Route\nInjection"];
44 | wgkex_worker -> wireguard [label="Peer\nCreation"];
45 | wgkex_worker -> vxlan [label="VxLAN FDB\nEntry"];
46 |
47 | client [shape=Mdiamond];
48 | {rank=same wgkex_broker mqtt}
49 | {rank=same netlink wireguard vxlan}
50 | }
51 |
--------------------------------------------------------------------------------
/wgkex.yaml.example:
--------------------------------------------------------------------------------
1 | # [broker, worker] The domains that should be accepted by clients and for which matching WireGuard interfaces exist
2 | domains:
3 | - ffmuc_muc_cty
4 | - ffmuc_muc_nord
5 | - ffmuc_muc_ost
6 | - ffmuc_muc_sued
7 | - ffmuc_muc_west
8 | - ffmuc_welt
9 | - ffwert_city
10 | # [broker, worker] The prefix is trimmed from the domain name and replaced with 'wg-' and 'vx-'
11 | # to calculate the WireGuard and VXLAN interface names
12 | domain_prefixes:
13 | - ffmuc_
14 | - ffdon_
15 | - ffwert_
16 | # [broker] The dict of workers mapping their hostname to their respective weight for weighted peer distribution
17 | workers:
18 | gw04.in.ffmuc.net:
19 | weight: 30
20 | gw05.in.ffmuc.net:
21 | weight: 30
22 | gw06.in.ffmuc.net:
23 | weight: 20
24 | gw07.in.ffmuc.net:
25 | weight: 20
26 | # [worker] The external hostname of this worker
27 | externalName: gw04.ext.ffmuc.net
28 | # [broker, worker] MQTT connection informations
29 | mqtt:
30 | broker_url: broker.hivemq.com
31 | broker_port: 1883
32 | username: user
33 | password: SECRET
34 | keepalive: 5
35 | tls: False
36 | # [broker]
37 | broker_listen:
38 | host: 0.0.0.0
39 | port: 5000
40 | # [broker, worker]
41 | logging_config:
42 | formatters:
43 | standard:
44 | format: '%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s'
45 | handlers:
46 | console:
47 | class: logging.StreamHandler
48 | formatter: standard
49 | root:
50 | handlers:
51 | - console
52 | level: DEBUG
53 | version: 1
54 |
--------------------------------------------------------------------------------
/.github/workflows/bazelversion.yml:
--------------------------------------------------------------------------------
1 | name: Sync .bazelversion with Dockerfile
2 |
3 | on:
4 | pull_request:
5 |
6 | jobs:
7 | update-bazelversion:
8 | if: github.actor == 'dependabot[bot]'
9 | permissions:
10 | contents: write
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - name: Checkout PR
15 | uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
16 | with:
17 | ref: ${{ github.head_ref }}
18 |
19 | - name: Extract Bazel version from Dockerfile
20 | id: extract_version
21 | run: |
22 | version=$(grep -oP '(?<=gcr.io/bazel-public/bazel:)[0-9]+\.[0-9]+\.[0-9]+' Dockerfile)
23 | echo "bazel_version=$version" >> $GITHUB_OUTPUT
24 |
25 | - name: Update .bazelversion if different
26 | run: |
27 | current_version=$(head -n1 .bazelversion | tr -d '[:space:]')
28 | if [ "$current_version" != "${{ steps.extract_version.outputs.bazel_version }}" ]; then
29 | echo "${{ steps.extract_version.outputs.bazel_version }}" > .bazelversion
30 | echo "# make sure that the image in the Dockerfile and this version are identical" >> .bazelversion
31 | git config user.name "github-actions[bot]"
32 | git config user.email "github-actions[bot]@users.noreply.github.com"
33 | git add .bazelversion
34 | git commit -m "chore: sync .bazelversion with Dockerfile"
35 | git push origin ${{ github.head_ref }}
36 | else
37 | echo ".bazelversion is already up to date."
38 | fi
39 |
--------------------------------------------------------------------------------
/.github/workflows/bazel.yml:
--------------------------------------------------------------------------------
1 | name: Bazel tests
2 |
3 | on: [push, pull_request]
4 | permissions:
5 | contents: read
6 | actions: read
7 | checks: write
8 |
9 | jobs:
10 | bazel-run:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Setup cache
14 | uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 # v4.3.0
15 | with:
16 | path: "/home/runner/.cache/bazel"
17 | key: bazel
18 | - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
19 | - name: Run Bazel tests
20 | run: bazel test ...:all --test_output=all --action_env=WGKEX_CONFIG_FILE=`pwd`/wgkex.yaml.example
21 | - name: Python coverage
22 | run: |
23 | sudo apt-get install -y lcov
24 | mkdir "${GITHUB_WORKSPACE}/src"
25 | cd "${GITHUB_WORKSPACE}/src"
26 | curl -L https://files.pythonhosted.org/packages/89/26/4a96807b193b011588099c3b5c89fbb05294e5b90e71018e065465f34eb6/coverage-7.12.0.tar.gz | tar xvz
27 | cd "${GITHUB_WORKSPACE}"
28 | bazel coverage --combined_report=lcov --java_runtime_version=remotejdk_11 -t- --instrument_test_targets \
29 | --test_output=errors --linkopt=--coverage --linkopt=-lc \
30 | --test_env=PYTHON_COVERAGE=${GITHUB_WORKSPACE}/src/coverage-7.12.0/__main__.py \
31 | --test_verbose_timeout_warnings --define=config_file=test ...:all
32 | - name: Coveralls
33 | uses: coverallsapp/github-action@5cbfd81b66ca5d10c19b062c04de0199c215fb6e # v2.3.7
34 | with:
35 | github-token: ${{ secrets.GITHUB_TOKEN }}
36 | path-to-lcov: bazel-out/_coverage/_coverage_report.dat
37 |
--------------------------------------------------------------------------------
/wgkex/common/logger.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | from logging import config
3 | from logging import critical as critical
4 | from logging import debug as debug
5 | from logging import error as error
6 | from logging import info as info
7 | from logging import warning as warning
8 |
9 | import yaml
10 |
11 | from wgkex.config.config import WG_CONFIG_DEFAULT_LOCATION
12 |
13 | _LOGGING_DEFAULT_CONFIG = {
14 | "version": 1,
15 | "handlers": {
16 | "console": {
17 | "class": "logging.StreamHandler",
18 | "formatter": "standard",
19 | }
20 | },
21 | "formatters": {
22 | "standard": {
23 | "format": "%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s"
24 | },
25 | },
26 | "root": {"level": "DEBUG", "handlers": ["console"]},
27 | }
28 |
29 |
30 | def fetch_logging_configuration():
31 | """Fetches logging configuration from disk, if exists.
32 |
33 | If the config exists, then we check to see if the key 'logging_config' is set. If it is, we return this configuration.
34 | Otherwise, we return the default configuration (_LOGGING_DEFAULT_CONFIG).
35 |
36 | Returns:
37 | Logging configuration.
38 | """
39 | logging_cfg = dict()
40 | if os.path.isfile(WG_CONFIG_DEFAULT_LOCATION):
41 | with open(WG_CONFIG_DEFAULT_LOCATION) as cfg_file:
42 | logging_cfg = yaml.load(cfg_file, Loader=yaml.FullLoader)
43 | if logging_cfg.get("logging_config"):
44 | return logging_cfg.get("logging_config")
45 | return _LOGGING_DEFAULT_CONFIG
46 |
47 |
48 | cfg = fetch_logging_configuration()
49 | config.dictConfig(cfg)
50 | info("Initialised logger, using configuration: %s", cfg)
51 |
--------------------------------------------------------------------------------
/wgkex/worker/msg_queue.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | import threading
3 | from queue import Queue
4 | from time import sleep
5 |
6 | from wgkex.common import logger
7 | from wgkex.worker.netlink import WireGuardClient, link_handler
8 |
9 |
10 | class UniqueQueue(Queue):
11 | def put(self, item, block=True, timeout=None):
12 | if item not in self.queue:
13 | Queue.put(self, item, block, timeout)
14 |
15 | def _init(self, maxsize):
16 | self.queue = set()
17 |
18 | def _put(self, item):
19 | self.queue.add(item)
20 |
21 | def _get(self):
22 | return self.queue.pop()
23 |
24 |
25 | q = UniqueQueue()
26 |
27 |
28 | def watch_queue() -> None:
29 | """Watches the queue for new messages."""
30 | logger.debug("Starting queue watcher")
31 | threading.Thread(target=pick_from_queue, daemon=True).start()
32 |
33 |
34 | def pick_from_queue() -> None:
35 | """Picks a message from the queue and processes it."""
36 | logger.debug("Starting queue processor")
37 | while True:
38 | if not q.empty():
39 | logger.debug("Queue is not empty current size is %i", q.qsize())
40 | domain, message = q.get()
41 | logger.debug("Processing queue item %s for domain %s", message, domain)
42 | client = WireGuardClient(
43 | public_key=message,
44 | domain=domain,
45 | remove=False,
46 | )
47 | logger.info(
48 | f"Processing queue for key {client.public_key} on domain {domain} with lladdr {client.lladdr}"
49 | )
50 | logger.debug(link_handler(client))
51 | q.task_done()
52 | else:
53 | logger.debug("Queue is empty")
54 | sleep(1)
55 |
--------------------------------------------------------------------------------
/wgkex/common/utils.py:
--------------------------------------------------------------------------------
1 | """A collection of general utilities."""
2 |
3 | import ipaddress
4 | import re
5 |
6 | from wgkex.config import config
7 |
8 |
9 | def mac2eui64(mac: str, prefix=None) -> str:
10 | """Converts a MAC address to an EUI64 identifier.
11 |
12 | If prefix is supplied, further convert the EUI64 address to an IPv6 address.
13 | eg:
14 | c4:91:0c:b2:c5:a0 -> c691:0cff:feb2:c5a0
15 | c4:91:0c:b2:c5:a0, FE80::/10 -> fe80::c691:cff:feb2:c5a0/10
16 |
17 | Arguments:
18 | mac: The mac address to convert.
19 | prefix: Prefix to use to create IPv6 address.
20 |
21 | Raises:
22 | ValueError: If mac or prefix is not correct format.
23 |
24 | Returns:
25 | An EUI64 address, or IPv6 Prefix.
26 | """
27 | if mac.count(":") != 5:
28 | raise ValueError(
29 | f"{mac} does not appear to be a correctly formatted mac address"
30 | )
31 | # http://tools.ietf.org/html/rfc4291#section-2.5.1
32 | eui64 = re.sub(r"[.:-]", "", mac).lower()
33 | eui64 = eui64[0:6] + "fffe" + eui64[6:]
34 | eui64 = hex(int(eui64[0:2], 16) | 2)[2:].zfill(2) + eui64[2:]
35 |
36 | if not prefix:
37 | return ":".join(re.findall(r".{4}", eui64))
38 | net = ipaddress.ip_network(prefix, strict=False)
39 | euil = int(f"0x{eui64:16}", 16)
40 | return f"{net[euil]}/{net.prefixlen}"
41 |
42 |
43 | def is_valid_domain(domain: str) -> bool:
44 | """Verifies if the domain is configured.
45 |
46 | Arguments:
47 | domain: The domain to verify.
48 |
49 | Returns:
50 | True if the domain is valid, False otherwise.
51 | """
52 | if domain not in config.get_config().domains:
53 | return False
54 | for prefix in config.get_config().domain_prefixes:
55 | if domain.startswith(prefix):
56 | return True
57 | return False
58 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 |
3 | services:
4 | mqtt:
5 | image: eclipse-mosquitto:latest
6 | restart: unless-stopped
7 | volumes:
8 | #- ./config/mosquitto:/mosquitto/config
9 | - ./volumes/mosquitto/data:/mosquitto/data
10 | - ./volumes/mosquitto/log:/mosquitto/log
11 | ports:
12 | - "9001:9001"
13 |
14 | broker:
15 | image: ghcr.io/freifunkmuc/wgkex:latest
16 | command: broker
17 | restart: unless-stopped
18 | ports:
19 | - "5000:5000"
20 | #volumes:
21 | #- ./config/broker/wgkex.yaml:/etc/wgkex.yaml
22 | environment:
23 | WGKEX_DOMAINS: ${WGKEX_DOMAINS-ffmuc_muc_cty, ffmuc_muc_nord, ffmuc_muc_ost, ffmuc_muc_sued, ffmuc_muc_west, ffmuc_welt, ffwert_city}
24 | WGKEX_DOMAIN_PREFIXES: ${WGKEX_DOMAIN_PREFIXES-ffmuc_, ffdon_, ffwert_}
25 | WGKEX_DEBUG: ${WGKEX_DEBUG-DEBUG}
26 | MQTT_BROKER_URL: ${MQTT_BROKER_URL-mqtt}
27 | MQTT_BROKER_PORT: ${MQTT_BROKER_PORT-1883}
28 | MQTT_USERNAME: ${MQTT_USERNAME-}
29 | MQTT_PASSWORD: ${MQTT_PASSWORD-}
30 | MQTT_KEEPALIVE: ${MQTT_KEEPALIVE-5}
31 | MQTT_TLS: ${MQTT_TLS-False}
32 |
33 | worker:
34 | image: ghcr.io/freifunkmuc/wgkex:latest
35 | command: worker
36 | restart: unless-stopped
37 | #volumes:
38 | #- ./config/worker/wgkex.yaml:/etc/wgkex.yaml
39 | environment:
40 | WGKEX_DOMAINS: ${WGKEX_DOMAINS-ffmuc_muc_cty, ffmuc_muc_nord, ffmuc_muc_ost, ffmuc_muc_sued, ffmuc_muc_west, ffmuc_welt, ffwert_city}
41 | WGKEX_DOMAIN_PREFIXES: ${WGKEX_DOMAIN_PREFIXES-ffmuc_, ffdon_, ffwert_}
42 | WGKEX_DEBUG: ${WGKEX_DEBUG-DEBUG}
43 | MQTT_BROKER_URL: ${MQTT_BROKER_URL-mqtt}
44 | MQTT_BROKER_PORT: ${MQTT_BROKER_PORT-1883}
45 | MQTT_USERNAME: ${MQTT_USERNAME-}
46 | MQTT_PASSWORD: ${MQTT_PASSWORD-}
47 | MQTT_KEEPALIVE: ${MQTT_KEEPALIVE-5}
48 | MQTT_TLS: ${MQTT_TLS-False}
49 |
--------------------------------------------------------------------------------
/wgkex/worker/BUILD:
--------------------------------------------------------------------------------
1 | load("@rules_python//python:defs.bzl", "py_binary", "py_test")
2 | load("@pip//:requirements.bzl", "requirement")
3 |
4 |
5 | py_library(
6 | name = "netlink",
7 | srcs = ["netlink.py"],
8 | visibility = ["//visibility:public"],
9 | deps = [
10 | requirement("NetLink"),
11 | requirement("paho-mqtt"),
12 | requirement("pyroute2"),
13 | "//wgkex/common:utils",
14 | "//wgkex/common:logger",
15 | "//wgkex/config:config",
16 | ],
17 | )
18 |
19 |
20 | py_test(
21 | name = "netlink_test",
22 | srcs = ["netlink_test.py"],
23 | deps = [
24 | "//wgkex/worker:netlink",
25 | requirement("mock"),
26 | requirement("pyroute2"),
27 | ],
28 | size="small",
29 | )
30 |
31 | py_library(
32 | name = "mqtt",
33 | srcs = ["mqtt.py"],
34 | visibility = ["//visibility:public"],
35 | deps = [
36 | requirement("NetLink"),
37 | requirement("paho-mqtt"),
38 | requirement("pyroute2"),
39 | "//wgkex/common:logger",
40 | "//wgkex/common:mqtt",
41 | "//wgkex/common:utils",
42 | "//wgkex/config:config",
43 | ":msg_queue",
44 | ":netlink",
45 | ],
46 | )
47 |
48 | py_test(
49 | name = "mqtt_test",
50 | srcs = ["mqtt_test.py"],
51 | deps = [
52 | "//wgkex/worker:mqtt",
53 | "//wgkex/worker:msg_queue",
54 | requirement("mock"),
55 | ],
56 | size="small",
57 | )
58 |
59 | py_binary(
60 | name = "app",
61 | srcs = ["app.py"],
62 | deps = [
63 | ":mqtt",
64 | ":msg_queue",
65 | "//wgkex/config:config",
66 | "//wgkex/common:logger",
67 | ],
68 | )
69 |
70 | py_test(
71 | name = "app_test",
72 | srcs = ["app_test.py"],
73 | deps = [
74 | "//wgkex/worker:app",
75 | "//wgkex/worker:msg_queue",
76 | requirement("mock"),
77 | ],
78 | size="small",
79 | )
80 |
81 | py_library(
82 | name = "msg_queue",
83 | srcs = ["msg_queue.py"],
84 | visibility = ["//visibility:public"],
85 | deps = [
86 | "//wgkex/common:logger",
87 | ],
88 | )
89 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: Build Docker image
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | pull_request:
8 |
9 | env:
10 | REGISTRY: ghcr.io
11 | IMAGE_NAME: ${{ github.repository }}
12 |
13 |
14 | permissions:
15 | contents: read
16 | jobs:
17 | build:
18 | runs-on: ubuntu-latest
19 | steps:
20 | - name: Checkout
21 | uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
22 |
23 | - name: Set up QEMU
24 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
25 |
26 | - name: Set up Docker Buildx
27 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
28 | with:
29 | buildkitd-config: .github/buildkitd.toml
30 |
31 | - name: Retrieve author data
32 | id: author
33 | run: |
34 | AUTHOR=$(curl -sSL ${{ github.event.repository.owner.url }} | jq -r '.name')
35 | echo "AUTHOR=$AUTHOR" >> $GITHUB_ENV
36 |
37 | - name: Extract metadata (tags, labels) for Docker
38 | id: meta
39 | uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5
40 | with:
41 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
42 | labels: |
43 | org.opencontainers.image.authors=${{ env.AUTHOR }}
44 | org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
45 | org.opencontainers.image.created=${{ steps.meta.outputs.created }}
46 |
47 | - name: Build Docker image
48 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
49 | with:
50 | context: .
51 | platforms: linux/amd64
52 | push: false
53 | load: true
54 | cache-from: type=gha
55 | cache-to: type=gha,mode=max
56 | tags: ${{ steps.meta.outputs.tags }}
57 | labels: ${{ steps.meta.outputs.labels }}
58 | build-args: |
59 | VERSION=${{ github.head_ref || github.ref_name }}
60 | COMMIT=${{ github.sha }}
61 |
62 | - name: Inspect Docker image
63 | run: docker image inspect ${{ steps.meta.outputs.tags }}
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish Docker image
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | tags:
8 | - 'v*.*.*'
9 |
10 | env:
11 | REGISTRY: ghcr.io
12 | IMAGE_NAME: ${{ github.repository }}
13 |
14 | jobs:
15 | push_to_registry:
16 | name: Push Docker image to GitHub Packages
17 | runs-on: ubuntu-latest
18 | permissions:
19 | packages: write
20 | contents: read
21 | steps:
22 | - name: Checkout
23 | uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
24 |
25 | - name: Set up QEMU
26 | uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
27 | with:
28 | platforms: all
29 |
30 | - name: Set up Docker Buildx
31 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
32 | with:
33 | buildkitd-config: .github/buildkitd.toml
34 |
35 | - name: Login to GitHub Container Registry
36 | uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
37 | with:
38 | registry: ${{ env.REGISTRY }}
39 | username: ${{ github.actor }}
40 | password: ${{ secrets.GITHUB_TOKEN }}
41 |
42 | - name: Retrieve author data
43 | id: author
44 | run: |
45 | AUTHOR=$(curl -sSL ${{ github.event.repository.owner.url }} | jq -r '.name')
46 | echo "AUTHOR=$AUTHOR" >> $GITHUB_ENV
47 |
48 | - name: Extract metadata (tags, labels) for Docker
49 | id: meta
50 | uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5
51 | with:
52 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
53 | tags: |
54 | type=ref,event=branch
55 | type=ref,event=tag
56 | type=sha,prefix=sha-
57 | labels: |
58 | org.opencontainers.image.authors=${{ env.AUTHOR }}
59 | org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}
60 | org.opencontainers.image.created=${{ steps.meta.outputs.created }}
61 |
62 | - name: Build and push container image
63 | uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
64 | with:
65 | context: .
66 | platforms: linux/amd64,linux/arm64/v8,linux/arm/v7,linux/ppc64le,linux/s390x
67 | push: true
68 | cache-from: type=gha
69 | cache-to: type=gha,mode=max
70 | tags: ${{ steps.meta.outputs.tags }}
71 | labels: ${{ steps.meta.outputs.labels }}
72 | build-args: |
73 | VERSION=${{ github.head_ref || github.ref_name }}
74 | COMMIT=${{ github.sha }}
--------------------------------------------------------------------------------
/wgkex/config/config_test.py:
--------------------------------------------------------------------------------
1 | """Tests for configuration handling class."""
2 |
3 | import unittest
4 |
5 | import mock
6 | import yaml
7 |
8 | from wgkex.config import config
9 |
10 | _VALID_CFG = (
11 | "domain_prefixes:\n- ffmuc_\n- ffdon_\n- ffwert_\nlog_level: DEBUG\ndomains:\n- a\n- b\nmqtt:\n broker_port: 1883"
12 | "\n broker_url: mqtt://broker\n keepalive: 5\n password: pass\n tls: true\n username: user\n"
13 | )
14 | _INVALID_LINT = (
15 | "domain_prefixes: ffmuc_\nBAD_KEY_FOR_DOMAIN:\n- a\n- b\nmqtt:\n broker_port: 1883\n broker_url: "
16 | "mqtt://broker\n keepalive: 5\n password: pass\n tls: true\n username: user\n"
17 | )
18 | _INVALID_CFG = "asdasdasdasd"
19 |
20 |
21 | class TestConfig(unittest.TestCase):
22 | def tearDown(self) -> None:
23 | config._parsed_config = None
24 | return super().tearDown()
25 |
26 | def test_load_config_success(self):
27 | """Test loads and lint config successfully."""
28 | mock_open = mock.mock_open(read_data=_VALID_CFG)
29 | with mock.patch("builtins.open", mock_open):
30 | self.assertDictEqual(yaml.safe_load(_VALID_CFG), config.get_config().raw)
31 |
32 | @mock.patch.object(config.sys, "exit", autospec=True)
33 | def test_load_config_fails_good_yaml_bad_format(self, exit_mock):
34 | """Test loads yaml successfully and fails lint."""
35 | mock_open = mock.mock_open(read_data=_INVALID_LINT)
36 | with mock.patch("builtins.open", mock_open):
37 | config.get_config()
38 | exit_mock.assert_called_with(2)
39 |
40 | @mock.patch.object(config.sys, "exit", autospec=True)
41 | def test_load_config_fails_bad_yaml(self, exit_mock):
42 | """Test loads bad YAML."""
43 | mock_open = mock.mock_open(read_data=_INVALID_CFG)
44 | with mock.patch("builtins.open", mock_open):
45 | config.get_config()
46 | exit_mock.assert_called_with(2)
47 |
48 | def test_fetch_config_from_disk_success(self):
49 | """Test fetch file from disk."""
50 | mock_open = mock.mock_open(read_data=_VALID_CFG)
51 | with mock.patch("builtins.open", mock_open):
52 | self.assertEqual(config.fetch_config_from_disk(), _VALID_CFG)
53 |
54 | def test_fetch_config_from_disk_fails_file_not_found(self):
55 | """Test fails on file not found on disk."""
56 | mock_open = mock.mock_open()
57 | mock_open.side_effect = FileNotFoundError
58 | with mock.patch("builtins.open", mock_open):
59 | with self.assertRaises(config.ConfigFileNotFoundError):
60 | config.fetch_config_from_disk()
61 |
62 | def test_raw_get_success(self):
63 | """Test fetch key from configuration."""
64 | mock_open = mock.mock_open(read_data=_VALID_CFG)
65 | with mock.patch("builtins.open", mock_open):
66 | self.assertListEqual(["a", "b"], config.get_config().raw.get("domains"))
67 |
68 | def test_raw_get_no_key_in_config(self):
69 | """Test fetch non-existent key from configuration."""
70 | mock_open = mock.mock_open(read_data=_VALID_CFG)
71 | with mock.patch("builtins.open", mock_open):
72 | self.assertIsNone(config.get_config().raw.get("key_does_not_exist"))
73 |
74 |
75 | if __name__ == "__main__":
76 | unittest.main()
77 |
--------------------------------------------------------------------------------
/wgkex/worker/app_test.py:
--------------------------------------------------------------------------------
1 | """Unit tests for app.py"""
2 |
3 | import threading
4 | import unittest
5 | from time import sleep
6 |
7 | import mock
8 |
9 | from wgkex.worker import app
10 |
11 |
12 | def _get_config_mock(domains=None):
13 | test_prefixes = ["_TEST_PREFIX_", "_TEST_PREFIX2_"]
14 | config_mock = mock.MagicMock()
15 | config_mock.domains = (
16 | domains if domains is not None else [f"{test_prefixes[1]}domain.one"]
17 | )
18 | config_mock.domain_prefixes = test_prefixes
19 | return config_mock
20 |
21 |
22 | class AppTest(unittest.TestCase):
23 | """unittest.TestCase class"""
24 |
25 | def setUp(self) -> None:
26 | """set up unittests"""
27 | app._CLEANUP_TIME = 0
28 |
29 | def test_unique_domains_success(self):
30 | """Ensure domain suffixes are unique."""
31 | test_prefixes = ["TEST_PREFIX_", "TEST_PREFIX2_"]
32 | test_domains = [
33 | "TEST_PREFIX_DOMAINSUFFIX1",
34 | "TEST_PREFIX_DOMAINSUFFIX2",
35 | "TEST_PREFIX2_DOMAINSUFFIX3",
36 | ]
37 | self.assertTrue(
38 | app.check_all_domains_unique(test_domains, test_prefixes),
39 | "unique domains are not detected unique",
40 | )
41 |
42 | def test_unique_domains_fail(self):
43 | """Ensure domain suffixes are not unique."""
44 | test_prefixes = ["TEST_PREFIX_", "TEST_PREFIX2_"]
45 | test_domains = [
46 | "TEST_PREFIX_DOMAINSUFFIX1",
47 | "TEST_PREFIX_DOMAINSUFFIX2",
48 | "TEST_PREFIX2_DOMAINSUFFIX1",
49 | ]
50 | self.assertFalse(
51 | app.check_all_domains_unique(test_domains, test_prefixes),
52 | "non-unique domains are detected as unique",
53 | )
54 |
55 | def test_unique_domains_not_list(self):
56 | """Ensure domain prefixes are a list."""
57 | test_prefixes = "TEST_PREFIX_, TEST_PREFIX2_"
58 | test_domains = [
59 | "TEST_PREFIX_DOMAINSUFFIX1",
60 | "TEST_PREFIX_DOMAINSUFFIX2",
61 | "TEST_PREFIX2_DOMAINSUFFIX1",
62 | ]
63 | with self.assertRaises(TypeError):
64 | app.check_all_domains_unique(test_domains, test_prefixes)
65 |
66 | @mock.patch.object(app.config, "get_config")
67 | @mock.patch.object(app.mqtt, "connect", autospec=True)
68 | def test_main_success(self, connect_mock, config_mock):
69 | """Ensure we can execute main."""
70 | connect_mock.return_value = None
71 | config_mock.return_value = _get_config_mock()
72 | with mock.patch.object(app, "flush_workers", return_value=None):
73 | app.main()
74 | connect_mock.assert_called()
75 |
76 | @mock.patch.object(app.config, "get_config")
77 | @mock.patch.object(app.mqtt, "connect", autospec=True)
78 | def test_main_fails_no_domain(self, connect_mock, config_mock):
79 | """Ensure we fail when domains are not configured."""
80 | config_mock.return_value = _get_config_mock(domains=[])
81 | connect_mock.return_value = None
82 | with self.assertRaises(app.DomainsNotInConfig):
83 | app.main()
84 |
85 | @mock.patch.object(app.config, "get_config")
86 | @mock.patch.object(app.mqtt, "connect", autospec=True)
87 | def test_main_fails_bad_domain(self, connect_mock, config_mock):
88 | """Ensure we fail when domains are badly formatted."""
89 | config_mock.return_value = _get_config_mock(domains=["cant_split_domain"])
90 | connect_mock.return_value = None
91 | with self.assertRaises(app.InvalidDomain):
92 | app.main()
93 | connect_mock.assert_not_called()
94 |
95 | @mock.patch.object(app, "_CLEANUP_TIME", 1)
96 | @mock.patch.object(app, "wg_flush_stale_peers")
97 | def test_flush_workers_doesnt_throw(self, wg_flush_mock):
98 | """Ensure the flush_workers thread doesn't throw and exit if it encounters an exception."""
99 | wg_flush_mock.side_effect = AttributeError(
100 | "'NoneType' object has no attribute 'get'"
101 | )
102 |
103 | thread = threading.Thread(
104 | target=app.flush_workers, args=("dummy_domain",), daemon=True
105 | )
106 | thread.start()
107 |
108 | i = 0
109 | while i < 20 and not wg_flush_mock.called:
110 | i += 1
111 | sleep(0.1)
112 |
113 | wg_flush_mock.assert_called()
114 | # Assert that the thread hasn't crashed and is still running
115 | self.assertTrue(thread.is_alive())
116 | # If Python would allow it without writing custom signalling, this would be the place to stop the thread again
117 |
118 |
119 | if __name__ == "__main__":
120 | unittest.main()
121 |
--------------------------------------------------------------------------------
/wgkex/worker/app.py:
--------------------------------------------------------------------------------
1 | """Initialises the MQTT worker."""
2 |
3 | import signal
4 | import sys
5 | import threading
6 | import time
7 | from typing import Text
8 |
9 | from wgkex.common import logger
10 | from wgkex.common.utils import is_valid_domain
11 | from wgkex.config import config
12 | from wgkex.worker import mqtt
13 | from wgkex.worker.msg_queue import watch_queue
14 | from wgkex.worker.netlink import wg_flush_stale_peers
15 |
16 | _CLEANUP_TIME = 3600
17 |
18 |
19 | class Error(Exception):
20 | """Base Exception handling class."""
21 |
22 |
23 | class DomainsNotInConfig(Error):
24 | """If no domains exist in configuration file."""
25 |
26 |
27 | class PrefixesNotInConfig(Error):
28 | """If no prefixes exist in configuration file."""
29 |
30 |
31 | class DomainsAreNotUnique(Error):
32 | """If non-unique domains exist in configuration file."""
33 |
34 |
35 | class InvalidDomain(Error):
36 | """If the domains is invalid and is not listed in the configuration file."""
37 |
38 |
39 | def flush_workers(domain: Text) -> None:
40 | """Calls peer flush every _CLEANUP_TIME interval."""
41 | while True:
42 | try:
43 | time.sleep(_CLEANUP_TIME)
44 | logger.info(f"Running cleanup task for {domain}")
45 | logger.info("Cleaned up domains: %s", wg_flush_stale_peers(domain))
46 | except Exception as e:
47 | # Don't crash the thread when an exception is encountered
48 | logger.error(f"Exception during cleanup task for {domain}:")
49 | logger.error(e)
50 |
51 |
52 | def clean_up_worker() -> None:
53 | """Wraps flush_workers in a thread for all given domains.
54 |
55 | Arguments:
56 | domains: list of domains.
57 | """
58 | domains = config.get_config().domains
59 | prefixes = config.get_config().domain_prefixes
60 | logger.debug("Cleaning up the following domains: %s", domains)
61 | cleanup_counter = 0
62 | # ToDo: do we need a check if every domain got gleaned?
63 | for prefix in prefixes:
64 | for domain in domains:
65 | if prefix in domain:
66 | logger.info("Scheduling cleanup task for %s, ", domain)
67 | try:
68 | cleaned_domain = domain.split(prefix)[1]
69 | cleanup_counter += 1
70 | except IndexError:
71 | logger.error(
72 | "Cannot strip domain with prefix %s from passed value %s. Skipping cleanup operation",
73 | prefix,
74 | domain,
75 | )
76 | continue
77 | thread = threading.Thread(
78 | target=flush_workers, args=(cleaned_domain,), daemon=True
79 | )
80 | thread.start()
81 | if cleanup_counter < len(domains):
82 | logger.error(
83 | "Not every domain got cleaned. Check domains for missing prefixes",
84 | repr(domains),
85 | repr(prefixes),
86 | )
87 |
88 |
89 | def check_all_domains_unique(domains, prefixes):
90 | """strips off prefixes and checks if domains are unique
91 |
92 | Args:
93 | domains: [str]
94 | Returns:
95 | boolean
96 | """
97 | if not prefixes:
98 | raise PrefixesNotInConfig("Could not locate prefixes in configuration.")
99 | if not isinstance(prefixes, list):
100 | raise TypeError("prefixes is not a list")
101 | unique_domains = []
102 | for domain in domains:
103 | for prefix in prefixes:
104 | if prefix in domain:
105 | stripped_domain = domain.split(prefix)[1]
106 | if stripped_domain in unique_domains:
107 | logger.error(
108 | f"Domain {domain} is not unique after stripping the prefix"
109 | )
110 | return False
111 | unique_domains.append(stripped_domain)
112 | return True
113 |
114 |
115 | def main():
116 | """Starts MQTT listener.
117 |
118 | Raises:
119 | DomainsNotInConfig: If no domains were found in configuration file.
120 | DomainsAreNotUnique: If there were non-unique domains after stripping prefix
121 | """
122 | exit_event = threading.Event()
123 |
124 | def on_exit(sig_number, stack_frame) -> None:
125 | logger.info("Shutting down...")
126 | exit_event.set()
127 | time.sleep(2)
128 | sys.exit()
129 |
130 | signal.signal(signal.SIGINT, on_exit)
131 |
132 | domains = config.get_config().domains
133 | prefixes = config.get_config().domain_prefixes
134 | if not domains:
135 | raise DomainsNotInConfig("Could not locate domains in configuration.")
136 | if not check_all_domains_unique(domains, prefixes):
137 | raise DomainsAreNotUnique("There are non-unique domains! Check config.")
138 | for domain in domains:
139 | if not is_valid_domain(domain):
140 | raise InvalidDomain(f"Domain {domain} has invalid prefix.")
141 | clean_up_worker()
142 | watch_queue()
143 | mqtt.connect(exit_event)
144 |
145 |
146 | if __name__ == "__main__":
147 | main()
148 |
--------------------------------------------------------------------------------
/wgkex/broker/metrics.py:
--------------------------------------------------------------------------------
1 | import dataclasses
2 | from operator import itemgetter
3 | from typing import Any, Dict, Optional, Tuple
4 |
5 | from wgkex.common import logger
6 | from wgkex.common.mqtt import CONNECTED_PEERS_METRIC
7 | from wgkex.config import config
8 |
9 |
10 | @dataclasses.dataclass
11 | class WorkerMetrics:
12 | """Metrics of a single worker"""
13 |
14 | worker: str
15 | # domain -> [metric name -> metric data]
16 | domain_data: Dict[str, Dict[str, Any]] = dataclasses.field(default_factory=dict)
17 | online: bool = False
18 |
19 | def is_online(self, domain: str = "") -> bool:
20 | if domain:
21 | return (
22 | self.online
23 | and self.get_domain_metrics(domain).get(CONNECTED_PEERS_METRIC, -1) >= 0
24 | )
25 | else:
26 | return self.online
27 |
28 | def get_domain_metrics(self, domain: str) -> Dict[str, Any]:
29 | return self.domain_data.get(domain, {})
30 |
31 | def set_metric(self, domain: str, metric: str, value: Any) -> None:
32 | if domain in self.domain_data:
33 | self.domain_data[domain][metric] = value
34 | else:
35 | self.domain_data[domain] = {metric: value}
36 |
37 | def get_peer_count(self) -> int:
38 | """Returns the sum of connected peers on this worker over all domains"""
39 | total = 0
40 | for data in self.domain_data.values():
41 | total += max(
42 | data.get(CONNECTED_PEERS_METRIC, 0),
43 | 0,
44 | )
45 |
46 | return total
47 |
48 |
49 | @dataclasses.dataclass
50 | class WorkerMetricsCollection:
51 | """A container for all worker metrics
52 | # TODO make threadsafe / fix data races
53 | """
54 |
55 | # worker -> WorkerMetrics
56 | data: Dict[str, WorkerMetrics] = dataclasses.field(default_factory=dict)
57 |
58 | def get(self, worker: str) -> WorkerMetrics:
59 | return self.data.get(worker, WorkerMetrics(worker=worker))
60 |
61 | def set(self, worker: str, metrics: WorkerMetrics) -> None:
62 | self.data[worker] = metrics
63 |
64 | def update(self, worker: str, domain: str, metric: str, value: Any) -> None:
65 | if worker in self.data:
66 | self.data[worker].set_metric(domain, metric, value)
67 | else:
68 | metrics = WorkerMetrics(worker)
69 | metrics.set_metric(domain, metric, value)
70 | self.data[worker] = metrics
71 |
72 | def set_online(self, worker: str) -> None:
73 | if worker in self.data:
74 | self.data[worker].online = True
75 | else:
76 | metrics = WorkerMetrics(worker)
77 | metrics.online = True
78 | self.data[worker] = metrics
79 |
80 | def set_offline(self, worker: str) -> None:
81 | if worker in self.data:
82 | self.data[worker].online = False
83 |
84 | def get_total_peer_count(self) -> int:
85 | """Returns the sum of connected peers over all workers and domains"""
86 | total = 0
87 | for worker in self.data:
88 | worker_data = self.data.get(worker)
89 | if not worker_data:
90 | continue
91 | for domain in worker_data.domain_data:
92 | total += max(
93 | worker_data.get_domain_metrics(domain).get(
94 | CONNECTED_PEERS_METRIC, 0
95 | ),
96 | 0,
97 | )
98 |
99 | return total
100 |
101 | def get_best_worker(self, domain: str) -> Tuple[Optional[str], int, int]:
102 | """Analyzes the metrics and determines the best worker that a new client should connect to.
103 | The best worker is defined as the one with the most number of clients missing
104 | to its should-be target value according to its weight.
105 |
106 | Returns:
107 | A 3-tuple containing the worker name, difference to target peers, number of connected peers.
108 | The worker name can be None if none is online.
109 | """
110 | # Map metrics to a list of (target diff, peer count, worker) tuples for online workers
111 |
112 | peers_worker_tuples = []
113 | total_peers = self.get_total_peer_count()
114 | worker_cfg = config.get_config().workers
115 |
116 | for wm in self.data.values():
117 | if not wm.is_online(domain):
118 | continue
119 |
120 | peers = wm.get_peer_count()
121 | rel_weight = worker_cfg.relative_worker_weight(wm.worker)
122 | target = rel_weight * total_peers
123 | diff = peers - target
124 | logger.debug(
125 | f"Worker candidate {wm.worker}: current {peers}, target {target} (total {total_peers}, rel weight {rel_weight}), diff {diff}"
126 | )
127 | peers_worker_tuples.append((diff, peers, wm.worker))
128 |
129 | # Sort by diff (ascending), workers with most peers missing to target are sorted first
130 | peers_worker_tuples = sorted(peers_worker_tuples, key=itemgetter(0))
131 |
132 | if len(peers_worker_tuples) > 0:
133 | best = peers_worker_tuples[0]
134 | return best[2], best[0], best[1]
135 | return None, 0, 0
136 |
--------------------------------------------------------------------------------
/wgkex/broker/metrics_test.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import mock
4 |
5 | from wgkex.broker.metrics import WorkerMetricsCollection
6 | from wgkex.config import config
7 |
8 |
9 | class TestMetrics(unittest.TestCase):
10 | @classmethod
11 | def setUpClass(cls) -> None:
12 | # Give each test a placeholder config
13 | test_config = config.Config.from_dict(
14 | {
15 | "domains": [],
16 | "domain_prefixes": "",
17 | "workers": {},
18 | "mqtt": {"broker_url": "", "username": "", "password": ""},
19 | }
20 | )
21 | mocked_config = mock.create_autospec(spec=test_config, spec_set=True)
22 | config._parsed_config = mocked_config
23 |
24 | @classmethod
25 | def tearDownClass(cls) -> None:
26 | config._parsed_config = None
27 |
28 | def test_set_online_matches_is_online(self):
29 | """Verify set_online sets worker online and matches result of is_online."""
30 | worker_metrics = WorkerMetricsCollection()
31 | worker_metrics.set_online("worker1")
32 |
33 | ret = worker_metrics.get("worker1").is_online()
34 | self.assertTrue(ret)
35 |
36 | def test_set_offline_matches_is_online(self):
37 | """Verify set_offline sets worker offline and matches negated result of is_online."""
38 | worker_metrics = WorkerMetricsCollection()
39 | worker_metrics.set_offline("worker1")
40 |
41 | ret = worker_metrics.get("worker1").is_online()
42 | self.assertFalse(ret)
43 |
44 | def test_unkown_is_offline(self):
45 | """Verify an unkown worker is considered offline."""
46 | worker_metrics = WorkerMetricsCollection()
47 |
48 | ret = worker_metrics.get("worker1").is_online()
49 | self.assertFalse(ret)
50 |
51 | def test_set_online_matches_is_online_domain(self):
52 | """Verify set_online sets worker online and matches result of is_online with domain."""
53 | worker_metrics = WorkerMetricsCollection()
54 | worker_metrics.set_online("worker1")
55 | worker_metrics.update("worker1", "d", "connected_peers", 5)
56 |
57 | ret = worker_metrics.get("worker1").is_online("d")
58 | self.assertTrue(ret)
59 |
60 | def test_set_online_matches_is_online_offline_domain(self):
61 | """Verify worker is considered offline if connected_peers for domain is <0."""
62 | worker_metrics = WorkerMetricsCollection()
63 | worker_metrics.set_online("worker1")
64 | worker_metrics.update("worker1", "d", "connected_peers", -1)
65 |
66 | ret = worker_metrics.get("worker1").is_online("d")
67 | self.assertFalse(ret)
68 |
69 | @mock.patch("wgkex.broker.metrics.config.get_config", autospec=True)
70 | def test_get_best_worker_returns_best(self, config_mock):
71 | """Verify get_best_worker returns the worker with least connected clients for equally weighted workers."""
72 | test_config = mock.MagicMock(spec=config.Config)
73 | test_config.workers = config.Workers.from_dict({})
74 | config_mock.return_value = test_config
75 |
76 | worker_metrics = WorkerMetricsCollection()
77 | worker_metrics.update("1", "d", "connected_peers", 20)
78 | worker_metrics.update("2", "d", "connected_peers", 19)
79 | worker_metrics.set_online("1")
80 | worker_metrics.set_online("2")
81 |
82 | (worker, diff, connected) = worker_metrics.get_best_worker("d")
83 | self.assertEqual(worker, "2")
84 | self.assertEqual(diff, -20) # 19-(1*(20+19))
85 | self.assertEqual(connected, 19)
86 |
87 | @mock.patch("wgkex.broker.metrics.config.get_config", autospec=True)
88 | def test_get_best_worker_returns_best_imbalanced_domains(self, config_mock):
89 | """Verify get_best_worker returns the worker with overall least connected clients even if it has more clients on this domain."""
90 | test_config = mock.MagicMock(spec=config.Config)
91 | test_config.workers = config.Workers.from_dict({})
92 | config_mock.return_value = test_config
93 |
94 | worker_metrics = WorkerMetricsCollection()
95 | worker_metrics.update("1", "domain1", "connected_peers", 25)
96 | worker_metrics.update("1", "domain2", "connected_peers", 5)
97 | worker_metrics.update("2", "domain1", "connected_peers", 20)
98 | worker_metrics.update("2", "domain2", "connected_peers", 20)
99 | worker_metrics.set_online("1")
100 | worker_metrics.set_online("2")
101 |
102 | (worker, diff, connected) = worker_metrics.get_best_worker("domain1")
103 | self.assertEqual(worker, "1")
104 | self.assertEqual(diff, -40) # 30-(1*(25+5+20+20))
105 | self.assertEqual(connected, 30)
106 |
107 | @mock.patch("wgkex.broker.metrics.config.get_config", autospec=True)
108 | def test_get_best_worker_weighted_returns_best(self, config_mock):
109 | """Verify get_best_worker returns the worker with least client differential for weighted workers."""
110 | test_config = mock.MagicMock(spec=config.Config)
111 | test_config.workers = config.Workers.from_dict(
112 | {"1": {"weight": 84}, "2": {"weight": 42}}
113 | )
114 | config_mock.return_value = test_config
115 |
116 | worker_metrics = WorkerMetricsCollection()
117 | worker_metrics.update("1", "d", "connected_peers", 21)
118 | worker_metrics.update("2", "d", "connected_peers", 19)
119 | worker_metrics.set_online("1")
120 | worker_metrics.set_online("2")
121 |
122 | (worker, _, _) = worker_metrics.get_best_worker("d")
123 | config_mock.assert_called()
124 | self.assertEqual(worker, "1")
125 |
126 | def test_get_best_worker_no_worker_online_returns_none(self):
127 | """Verify get_best_worker returns None if there is no online worker."""
128 | worker_metrics = WorkerMetricsCollection()
129 | worker_metrics.update("1", "d", "connected_peers", 20)
130 | worker_metrics.update("2", "d", "connected_peers", 19)
131 | worker_metrics.set_offline("1")
132 | worker_metrics.set_offline("2")
133 |
134 | (worker, _, _) = worker_metrics.get_best_worker("d")
135 | self.assertIsNone(worker)
136 |
137 | def test_get_best_worker_no_worker_registered_returns_none(self):
138 | """Verify get_best_worker returns None if there is no online worker."""
139 | worker_metrics = WorkerMetricsCollection()
140 |
141 | (worker, _, _) = worker_metrics.get_best_worker("d")
142 | self.assertIsNone(worker)
143 |
144 |
145 | if __name__ == "__main__":
146 | unittest.main()
147 |
--------------------------------------------------------------------------------
/wgkex/config/config.py:
--------------------------------------------------------------------------------
1 | """Configuration handling class."""
2 |
3 | import dataclasses
4 | import logging
5 | import os
6 | import sys
7 | from typing import Any, Dict, List, Optional
8 |
9 | import yaml
10 |
11 |
12 | class Error(Exception):
13 | """Base Exception handling class."""
14 |
15 |
16 | class ConfigFileNotFoundError(Error):
17 | """File could not be found on disk."""
18 |
19 |
20 | WG_CONFIG_OS_ENV = "WGKEX_CONFIG_FILE"
21 | WG_CONFIG_DEFAULT_LOCATION = "/etc/wgkex.yaml"
22 |
23 |
24 | @dataclasses.dataclass
25 | class Worker:
26 | """A representation of the values of the 'workers' dict in the configuration file.
27 |
28 | Attributes:
29 | weight: The relative weight of a worker, defaults to 1.
30 | """
31 |
32 | weight: int
33 |
34 | @classmethod
35 | def from_dict(cls, worker_cfg: Dict[str, Any]) -> "Worker":
36 | return cls(
37 | weight=int(worker_cfg["weight"]) if worker_cfg["weight"] else 1,
38 | )
39 |
40 |
41 | @dataclasses.dataclass
42 | class Workers:
43 | """A representation of the 'workers' key in the configuration file.
44 |
45 | Attributes:
46 | total_weight: Calculated on init, the total weight of all configured workers.
47 | """
48 |
49 | total_weight: int
50 | _workers: Dict[str, Worker]
51 |
52 | @classmethod
53 | def from_dict(cls, workers_cfg: Dict[str, Dict[str, Any]]) -> "Workers":
54 | d = {key: Worker.from_dict(value) for (key, value) in workers_cfg.items()}
55 |
56 | total = 0
57 | for worker in d.values():
58 | total += worker.weight
59 | total = max(total, 1)
60 |
61 | return cls(total_weight=total, _workers=d)
62 |
63 | def get(self, worker: str) -> Optional[Worker]:
64 | return self._workers.get(worker)
65 |
66 | def relative_worker_weight(self, worker_name: str) -> float:
67 | worker = self.get(worker_name)
68 | if worker is None:
69 | return 1 / self.total_weight
70 | return worker.weight / self.total_weight
71 |
72 |
73 | @dataclasses.dataclass
74 | class BrokerListen:
75 | """A representation of the 'broker_listen' key in Configuration file.
76 |
77 | Attributes:
78 | host: The listen address the broker should listen to for the HTTP API.
79 | port: The port the broker should listen to for the HTTP API.
80 | """
81 |
82 | host: Optional[str]
83 | port: Optional[int]
84 |
85 | @classmethod
86 | def from_dict(cls, broker_listen: Dict[str, Any]) -> "BrokerListen":
87 | return cls(
88 | host=broker_listen.get("host"),
89 | port=broker_listen.get("port"),
90 | )
91 |
92 |
93 | @dataclasses.dataclass
94 | class MQTT:
95 | """A representation of the 'mqtt' key in Configuration file.
96 |
97 | Attributes:
98 | broker_url: The broker URL for MQTT to connect to.
99 | username: The username to use for MQTT.
100 | password: The password to use for MQTT.
101 | tls: If TLS is used or not.
102 | broker_port: The port for MQTT to connect on.
103 | keepalive: The keepalive in seconds to use.
104 | """
105 |
106 | broker_url: str
107 | username: str
108 | password: str
109 | tls: bool = False
110 | broker_port: int = 1883
111 | keepalive: int = 5
112 |
113 | @classmethod
114 | def from_dict(cls, mqtt_cfg: Dict[str, str]) -> "MQTT":
115 | """seems to generate a mqtt config object from dictionary
116 |
117 | Args:
118 | mqtt_cfg ():
119 |
120 | Returns:
121 | mqtt config object
122 | """
123 | return cls(
124 | broker_url=mqtt_cfg["broker_url"],
125 | username=mqtt_cfg["username"],
126 | password=mqtt_cfg["password"],
127 | tls=bool(mqtt_cfg.get("tls", cls.tls)),
128 | broker_port=int(mqtt_cfg.get("broker_port", cls.broker_port)),
129 | keepalive=int(mqtt_cfg.get("keepalive", cls.keepalive)),
130 | )
131 |
132 |
133 | @dataclasses.dataclass
134 | class Config:
135 | """A representation of the configuration file.
136 |
137 | Attributes:
138 | domains: The list of domains to listen for.
139 | domain_prefixes: The list of prefixes to pre-pend to a given domain.
140 | mqtt: The MQTT configuration.
141 | workers: The worker weights configuration (broker-only).
142 | externalName: The publicly resolvable domain name or public IP address of this worker (worker-only).
143 | """
144 |
145 | raw: Dict[str, Any]
146 | domains: List[str]
147 | domain_prefixes: List[str]
148 | broker_listen: BrokerListen
149 | mqtt: MQTT
150 | workers: Workers
151 | external_name: Optional[str]
152 |
153 | @classmethod
154 | def from_dict(cls, cfg: Dict[str, Any]) -> "Config":
155 | """Creates a Config object from a configuration file.
156 | Arguments:
157 | cfg: The configuration file as a dict.
158 | Returns:
159 | A Config object.
160 | """
161 | broker_listen = BrokerListen.from_dict(cfg.get("broker_listen", {}))
162 | mqtt_cfg = MQTT.from_dict(cfg["mqtt"])
163 | workers_cfg = Workers.from_dict(cfg.get("workers", {}))
164 | return cls(
165 | raw=cfg,
166 | domains=cfg["domains"],
167 | domain_prefixes=cfg["domain_prefixes"],
168 | broker_listen=broker_listen,
169 | mqtt=mqtt_cfg,
170 | workers=workers_cfg,
171 | external_name=cfg.get("externalName"),
172 | )
173 |
174 | def get(self, key: str) -> Any:
175 | """Get the value of key from the raw dict representation of the config file"""
176 | return self.raw.get(key)
177 |
178 |
179 | _parsed_config: Optional[Config] = None
180 |
181 |
182 | def get_config() -> Config:
183 | """Returns a parsed Config object.
184 |
185 | Raises:
186 | ConfigFileNotFoundError: If we could not find the configuration file on disk.
187 | Returns:
188 | The Config representation of the config file
189 | """
190 | global _parsed_config
191 | if _parsed_config is None:
192 | cfg_contents = fetch_config_from_disk()
193 | try:
194 | config = yaml.safe_load(cfg_contents)
195 | except yaml.YAMLError as e:
196 | print("Failed to load YAML file: %s" % e)
197 | sys.exit(1)
198 | try:
199 | config = Config.from_dict(config)
200 | except (KeyError, TypeError, AttributeError) as e:
201 | print("Failed to lint file: %s" % e)
202 | sys.exit(2)
203 | _parsed_config = config
204 | return _parsed_config
205 |
206 |
207 | def fetch_config_from_disk() -> str:
208 | """Fetches config file from disk and returns as string.
209 |
210 | Raises:
211 | ConfigFileNotFoundError: If we could not find the configuration file on disk.
212 | Returns:
213 | The file contents as string.
214 | """
215 | config_file = os.environ.get(WG_CONFIG_OS_ENV, WG_CONFIG_DEFAULT_LOCATION)
216 | logging.debug("getting config_file: %s", repr(config_file))
217 | try:
218 | with open(config_file, "r") as stream:
219 | return stream.read()
220 | except FileNotFoundError as e:
221 | raise ConfigFileNotFoundError(
222 | f"Could not locate configuration file in {config_file}"
223 | ) from e
224 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.toptal.com/developers/gitignore/api/python,bazel,pycharm
2 | # Edit at https://www.toptal.com/developers/gitignore?templates=python,bazel,pycharm
3 |
4 | ### Bazel ###
5 | # gitignore template for Bazel build system
6 | # website: https://bazel.build/
7 |
8 | # Ignore all bazel-* symlinks. There is no full list since this can change
9 | # based on the name of the directory bazel is cloned into.
10 | /bazel-*
11 |
12 | # Directories for the Bazel IntelliJ plugin containing the generated
13 | # IntelliJ project files and plugin configuration. Seperate directories are
14 | # for the IntelliJ, Android Studio and CLion versions of the plugin.
15 | /.ijwb/
16 | /.aswb/
17 | /.clwb/
18 |
19 | ### PyCharm ###
20 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
21 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
22 |
23 | # User-specific stuff
24 | .idea/**/workspace.xml
25 | .idea/**/tasks.xml
26 | .idea/**/usage.statistics.xml
27 | .idea/**/dictionaries
28 | .idea/**/shelf
29 |
30 | # AWS User-specific
31 | .idea/**/aws.xml
32 |
33 | # Generated files
34 | .idea/**/contentModel.xml
35 |
36 | # Sensitive or high-churn files
37 | .idea/**/dataSources/
38 | .idea/**/dataSources.ids
39 | .idea/**/dataSources.local.xml
40 | .idea/**/sqlDataSources.xml
41 | .idea/**/dynamic.xml
42 | .idea/**/uiDesigner.xml
43 | .idea/**/dbnavigator.xml
44 |
45 | # Gradle
46 | .idea/**/gradle.xml
47 | .idea/**/libraries
48 |
49 | # Gradle and Maven with auto-import
50 | # When using Gradle or Maven with auto-import, you should exclude module files,
51 | # since they will be recreated, and may cause churn. Uncomment if using
52 | # auto-import.
53 | # .idea/artifacts
54 | # .idea/compiler.xml
55 | # .idea/jarRepositories.xml
56 | # .idea/modules.xml
57 | # .idea/*.iml
58 | # .idea/modules
59 | # *.iml
60 | # *.ipr
61 |
62 | # CMake
63 | cmake-build-*/
64 |
65 | # Mongo Explorer plugin
66 | .idea/**/mongoSettings.xml
67 |
68 | # File-based project format
69 | *.iws
70 |
71 | # IntelliJ
72 | out/
73 |
74 | # mpeltonen/sbt-idea plugin
75 | .idea_modules/
76 |
77 | # JIRA plugin
78 | atlassian-ide-plugin.xml
79 |
80 | # Cursive Clojure plugin
81 | .idea/replstate.xml
82 |
83 | # SonarLint plugin
84 | .idea/sonarlint/
85 |
86 | # Crashlytics plugin (for Android Studio and IntelliJ)
87 | com_crashlytics_export_strings.xml
88 | crashlytics.properties
89 | crashlytics-build.properties
90 | fabric.properties
91 |
92 | # Editor-based Rest Client
93 | .idea/httpRequests
94 |
95 | # Android studio 3.1+ serialized cache file
96 | .idea/caches/build_file_checksums.ser
97 |
98 | ### PyCharm Patch ###
99 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
100 |
101 | # *.iml
102 | # modules.xml
103 | # .idea/misc.xml
104 | # *.ipr
105 |
106 | # Sonarlint plugin
107 | # https://plugins.jetbrains.com/plugin/7973-sonarlint
108 | .idea/**/sonarlint/
109 |
110 | # SonarQube Plugin
111 | # https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
112 | .idea/**/sonarIssues.xml
113 |
114 | # Markdown Navigator plugin
115 | # https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
116 | .idea/**/markdown-navigator.xml
117 | .idea/**/markdown-navigator-enh.xml
118 | .idea/**/markdown-navigator/
119 |
120 | # Cache file creation bug
121 | # See https://youtrack.jetbrains.com/issue/JBR-2257
122 | .idea/$CACHE_FILE$
123 |
124 | # CodeStream plugin
125 | # https://plugins.jetbrains.com/plugin/12206-codestream
126 | .idea/codestream.xml
127 |
128 | # Azure Toolkit for IntelliJ plugin
129 | # https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij
130 | .idea/**/azureSettings.xml
131 |
132 | ### Python ###
133 | # Byte-compiled / optimized / DLL files
134 | __pycache__/
135 | *.py[cod]
136 | *$py.class
137 |
138 | # C extensions
139 | *.so
140 |
141 | # Distribution / packaging
142 | .Python
143 | build/
144 | develop-eggs/
145 | dist/
146 | downloads/
147 | eggs/
148 | .eggs/
149 | lib/
150 | lib64/
151 | parts/
152 | sdist/
153 | var/
154 | wheels/
155 | share/python-wheels/
156 | *.egg-info/
157 | .installed.cfg
158 | *.egg
159 | MANIFEST
160 |
161 | # PyInstaller
162 | # Usually these files are written by a python script from a template
163 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
164 | *.manifest
165 | *.spec
166 |
167 | # Installer logs
168 | pip-log.txt
169 | pip-delete-this-directory.txt
170 |
171 | # Unit test / coverage reports
172 | htmlcov/
173 | .tox/
174 | .nox/
175 | .coverage
176 | .coverage.*
177 | .cache
178 | nosetests.xml
179 | coverage.xml
180 | *.cover
181 | *.py,cover
182 | .hypothesis/
183 | .pytest_cache/
184 | cover/
185 |
186 | # Translations
187 | *.mo
188 | *.pot
189 |
190 | # Django stuff:
191 | *.log
192 | local_settings.py
193 | db.sqlite3
194 | db.sqlite3-journal
195 |
196 | # Flask stuff:
197 | instance/
198 | .webassets-cache
199 |
200 | # Scrapy stuff:
201 | .scrapy
202 |
203 | # Sphinx documentation
204 | docs/_build/
205 |
206 | # PyBuilder
207 | .pybuilder/
208 | target/
209 |
210 | # Jupyter Notebook
211 | .ipynb_checkpoints
212 |
213 | # IPython
214 | profile_default/
215 | ipython_config.py
216 |
217 | # pyenv
218 | # For a library or package, you might want to ignore these files since the code is
219 | # intended to run in multiple environments; otherwise, check them in:
220 | .python-version
221 |
222 | # pipenv
223 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
224 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
225 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
226 | # install all needed dependencies.
227 | #Pipfile.lock
228 |
229 | # poetry
230 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
231 | # This is especially recommended for binary packages to ensure reproducibility, and is more
232 | # commonly ignored for libraries.
233 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
234 | #poetry.lock
235 |
236 | # pdm
237 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
238 | #pdm.lock
239 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
240 | # in version control.
241 | # https://pdm.fming.dev/#use-with-ide
242 | .pdm.toml
243 |
244 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
245 | __pypackages__/
246 |
247 | # Celery stuff
248 | celerybeat-schedule
249 | celerybeat.pid
250 |
251 | # SageMath parsed files
252 | *.sage.py
253 |
254 | # Environments
255 | .env
256 | .venv
257 | env/
258 | venv/
259 | ENV/
260 | env.bak/
261 | venv.bak/
262 |
263 | # Spyder project settings
264 | .spyderproject
265 | .spyproject
266 |
267 | # Rope project settings
268 | .ropeproject
269 |
270 | # mkdocs documentation
271 | /site
272 |
273 | # mypy
274 | .mypy_cache/
275 | .dmypy.json
276 | dmypy.json
277 |
278 | # Pyre type checker
279 | .pyre/
280 |
281 | # pytype static type analyzer
282 | .pytype/
283 |
284 | # Cython debug symbols
285 | cython_debug/
286 |
287 | # PyCharm
288 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
289 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
290 | # and can be added to the global gitignore or merged into this file. For a more nuclear
291 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
292 | .idea/
293 |
294 | ### Python Patch ###
295 | # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
296 | poetry.toml
297 |
298 | # ruff
299 | .ruff_cache/
300 |
301 | # LSP config files
302 | pyrightconfig.json
303 |
304 | # End of https://www.toptal.com/developers/gitignore/api/python,bazel,pycharm
305 |
306 | # docker-compose
307 | docker-compose.override.yaml
308 | # docker-compose volumes
309 | /volumes
310 | # docker-compose config
311 | /config
312 |
313 | # config file
314 | wgkex.yaml
315 |
--------------------------------------------------------------------------------
/wgkex/worker/mqtt_test.py:
--------------------------------------------------------------------------------
1 | """Unit tests for mqtt.py"""
2 |
3 | import socket
4 | import threading
5 | import unittest
6 | from time import sleep
7 |
8 | import mock
9 | import paho.mqtt.client
10 | import pyroute2.netlink.exceptions
11 |
12 | from wgkex.common.mqtt import TOPIC_CONNECTED_PEERS
13 | from wgkex.worker import mqtt
14 |
15 |
16 | def _get_config_mock(domains=None, mqtt=None):
17 | test_prefixes = ["_ffmuc_", "_TEST_PREFIX2_"]
18 | config_mock = mock.MagicMock()
19 | config_mock.domains = (
20 | domains if domains is not None else [f"{test_prefixes[0]}domain.one"]
21 | )
22 | config_mock.domain_prefixes = test_prefixes
23 | if mqtt:
24 | config_mock.mqtt = mqtt
25 | return config_mock
26 |
27 |
28 | class MQTTTest(unittest.TestCase):
29 | @mock.patch.object(mqtt.mqtt, "Client")
30 | @mock.patch.object(mqtt.socket, "gethostname")
31 | @mock.patch.object(mqtt, "get_config")
32 | def test_connect_success(self, config_mock, hostname_mock, mqtt_mock):
33 | """Tests successful connection to MQTT server."""
34 | hostname_mock.return_value = "hostname"
35 | config_mqtt_mock = mock.MagicMock()
36 | config_mqtt_mock.broker_url = "some_url"
37 | config_mqtt_mock.broker_port = 1833
38 | config_mqtt_mock.keepalive = False
39 | config_mock.return_value = _get_config_mock(mqtt=config_mqtt_mock)
40 | ee = threading.Event()
41 | mqtt.connect(ee)
42 | ee.set()
43 | mqtt_mock.assert_has_calls(
44 | [mock.call().connect("some_url", port=1833, keepalive=False)],
45 | any_order=True,
46 | )
47 |
48 | @mock.patch.object(mqtt.mqtt, "Client")
49 | @mock.patch.object(mqtt, "get_config")
50 | def test_connect_fails_mqtt_error(self, config_mock, mqtt_mock):
51 | """Tests failure for connect - ValueError."""
52 | mqtt_mock.side_effect = ValueError("barf")
53 | config_mqtt_mock = mock.MagicMock()
54 | config_mqtt_mock.broker_url = "some_url"
55 | config_mock.return_value = _get_config_mock(mqtt=config_mqtt_mock)
56 | with self.assertRaises(ValueError):
57 | mqtt.connect(threading.Event())
58 |
59 | @mock.patch.object(mqtt.mqtt, "Client")
60 | @mock.patch.object(mqtt, "get_config")
61 | @mock.patch.object(mqtt, "get_device_data")
62 | def test_on_connect_subscribes(
63 | self, get_device_data_mock, config_mock, mqtt_client_mock
64 | ):
65 | """Test that the on_connect callback correctly subscribes to all domains and pushes device data"""
66 | config_mqtt_mock = mock.MagicMock()
67 | config_mqtt_mock.broker_url = "some_url"
68 | config_mqtt_mock.broker_port = 1833
69 | config_mqtt_mock.keepalive = False
70 | config = _get_config_mock(mqtt=config_mqtt_mock)
71 | config.external_name = None
72 | config_mock.return_value = config
73 | get_device_data_mock.return_value = (51820, "456asdf=", "fe80::1")
74 |
75 | hostname = socket.gethostname()
76 |
77 | mqtt.on_connect(mqtt.mqtt.Client(), None, None, 0)
78 |
79 | mqtt_client_mock.assert_has_calls(
80 | [
81 | mock.call().subscribe("wireguard/_ffmuc_domain.one/+"),
82 | mock.call().publish(
83 | f"wireguard-worker/{hostname}/_ffmuc_domain.one/data",
84 | '{"ExternalAddress": "%s", "Port": 51820, "PublicKey": "456asdf=", "LinkAddress": "fe80::1"}'
85 | % hostname,
86 | qos=1,
87 | retain=True,
88 | ),
89 | mock.call().publish(
90 | f"wireguard-worker/{hostname}/status", 1, qos=1, retain=True
91 | ),
92 | ]
93 | )
94 |
95 | @mock.patch.object(mqtt, "get_config")
96 | @mock.patch.object(mqtt, "get_connected_peers_count")
97 | def test_publish_metrics_loop_success(self, conn_peers_mock, config_mock):
98 | config_mock.return_value = _get_config_mock()
99 | conn_peers_mock.return_value = 20
100 | mqtt_client = mock.MagicMock(spec=paho.mqtt.client.Client)
101 |
102 | ee = threading.Event()
103 | thread = threading.Thread(
104 | target=mqtt.publish_metrics_loop,
105 | args=(ee, mqtt_client, "_ffmuc_domain.one"),
106 | )
107 | thread.start()
108 |
109 | i = 0
110 | while i < 20 and not mqtt_client.publish.called:
111 | i += 1
112 | sleep(0.1)
113 |
114 | conn_peers_mock.assert_called_with("wg-domain.one")
115 | mqtt_client.publish.assert_called_with(
116 | TOPIC_CONNECTED_PEERS.format(
117 | domain="_ffmuc_domain.one", worker=socket.gethostname()
118 | ),
119 | 20,
120 | retain=True,
121 | )
122 |
123 | ee.set()
124 |
125 | i = 0
126 | while i < 20 and thread.is_alive():
127 | i += 1
128 | sleep(0.1)
129 |
130 | self.assertFalse(thread.is_alive())
131 |
132 | @mock.patch.object(mqtt, "_METRICS_SEND_INTERVAL", 0.02)
133 | @mock.patch.object(mqtt, "get_config")
134 | @mock.patch.object(mqtt, "get_connected_peers_count")
135 | def test_publish_metrics_loop_no_exception(self, conn_peers_mock, config_mock):
136 | """Tests that an exception doesn't interrupt the loop"""
137 | config_mock.return_value = _get_config_mock()
138 | conn_peers_mock.side_effect = Exception("Mocked exception")
139 | mqtt_client = mock.MagicMock(spec=paho.mqtt.client.Client)
140 |
141 | ee = threading.Event()
142 | thread = threading.Thread(
143 | target=mqtt.publish_metrics_loop,
144 | args=(ee, mqtt_client, "_ffmuc_domain.one"),
145 | )
146 | thread.start()
147 |
148 | i = 0
149 | while i < 20 and not len(conn_peers_mock.mock_calls) >= 2:
150 | i += 1
151 | sleep(0.1)
152 |
153 | self.assertTrue(
154 | len(conn_peers_mock.mock_calls) >= 2,
155 | "get_connected_peers_count must be called at least twice",
156 | )
157 |
158 | mqtt_client.publish.assert_not_called()
159 |
160 | ee.set()
161 |
162 | i = 0
163 | while i < 20 and thread.is_alive():
164 | i += 1
165 | sleep(0.1)
166 |
167 | self.assertFalse(thread.is_alive())
168 |
169 | @mock.patch.object(mqtt, "get_config")
170 | @mock.patch.object(mqtt, "get_connected_peers_count")
171 | def test_publish_metrics_NetlinkDumpInterrupted(self, conn_peers_mock, config_mock):
172 | config_mock.return_value = _get_config_mock()
173 | conn_peers_mock.side_effect = (
174 | pyroute2.netlink.exceptions.NetlinkDumpInterrupted()
175 | )
176 | mqtt_client = mock.MagicMock(spec=paho.mqtt.client.Client)
177 |
178 | domain = mqtt.get_config().domains[0]
179 | hostname = socket.gethostname()
180 | topic = TOPIC_CONNECTED_PEERS.format(domain=domain, worker=hostname)
181 |
182 | # Must not raise NetlinkDumpInterrupted, but handle gracefully by doing nothing
183 | mqtt.publish_metrics(mqtt_client, topic, domain)
184 |
185 | mqtt_client.publish.assert_not_called()
186 |
187 | @mock.patch.object(mqtt, "get_config")
188 | def test_on_message_wireguard_success(self, config_mock):
189 | # Tests on_message for success.
190 | config_mock.return_value = _get_config_mock()
191 | mqtt_msg = mock.patch.object(mqtt.mqtt, "MQTTMessage")
192 | mqtt_msg.topic = "wireguard/_ffmuc_domain1/gateway"
193 | mqtt_msg.payload = b"PUB_KEY"
194 | mqtt.on_message_wireguard(None, None, mqtt_msg)
195 | self.assertTrue(mqtt.q.qsize() > 0)
196 | item = mqtt.q.get_nowait()
197 | self.assertEqual(item, ("domain1", "PUB_KEY"))
198 |
199 |
200 | """ @mock.patch.object(msg_queue, "link_handler")
201 | @mock.patch.object(mqtt, "get_config")
202 | def test_on_message_wireguard_fails_no_domain(self, config_mock, link_mock):
203 | # Tests on_message for failure to parse domain.
204 | config_mqtt_mock = mock.MagicMock()
205 | config_mqtt_mock.broker_url = "mqtt://broker"
206 | config_mqtt_mock.broker_port = 1883
207 | config_mqtt_mock.keepalive = 5
208 | config_mqtt_mock.password = "pass"
209 | config_mqtt_mock.tls = True
210 | config_mqtt_mock.username = "user"
211 | config_mock.return_value = _get_config_mock(
212 | domains=["a", "b"], mqtt=config_mqtt_mock
213 | )
214 | link_mock.return_value = dict(WireGuard="result")
215 | mqtt_msg = mock.patch.object(mqtt.mqtt, "MQTTMessage")
216 | mqtt_msg.topic = "wireguard/bad_domain_match"
217 | with self.assertRaises(ValueError):
218 | mqtt.on_message_wireguard(None, None, mqtt_msg)
219 | """
220 |
221 |
222 | if __name__ == "__main__":
223 | unittest.main()
224 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # WGKex
2 |
3 | [](https://coveralls.io/github/freifunkMUC/wgkex?branch=main)
4 | [](https://github.com/freifunkMUC/wgkex/actions/workflows/pylint.yml)
5 | [](https://github.com/freifunkMUC/wgkex/actions/workflows/black.yml)
6 | [](https://github.com/freifunkMUC/wgkex/actions/workflows/bazel.yml)
7 |
8 | - [WGKex](#wgkex)
9 | - [Overview](#overview)
10 | - [Frontend broker](#frontend-broker)
11 | - [Backend worker](#backend-worker)
12 | - [Installation](#installation)
13 | - [Configuration](#configuration)
14 | - [Development](#development)
15 | - [Build using Bazel](#build-using-bazel)
16 | - [Updating PIP dependencies for Bazel](#updating-pip-dependencies-for-bazel)
17 | - [Unit tests](#unit-tests)
18 | - [Run Worker \& Broker using Bazel](#run-worker--broker-using-bazel)
19 | - [Run Worker \& Broker using Python](#run-worker--broker-using-python)
20 | - [Client](#client)
21 | - [MQTT topics](#mqtt-topics)
22 | - [Contact](#contact)
23 |
24 | wgkex was started as a WireGuard key exchange tool, but has evolved beyond to the main node management tool of FFMUC.
25 |
26 | ## Overview
27 |
28 | WireGuard Key Exchange is a tool consisting of two parts: a frontend (broker) and a backend (worker). These components
29 | communicate to each other via MQTT - a messaging bus.
30 |
31 | ```mermaid
32 | graph TD;
33 | A{"client"} -->|"RESTful API"| G("WGKex Broker")
34 | G -->|"publish"| B("Mosquitto")
35 | C("WGKex Worker") -->|"Subscribe"| B
36 | C -->|"Route Injection"| D["netlink (pyroute2)"]
37 | C -->|"Peer Creation"| E["wireguard (pyroute2)"]
38 | C -->|"VxLAN FDB Entry"| F["VXLAN FDB (pyroute2)"]
39 | ```
40 |
41 | ### Frontend broker
42 |
43 | The frontend broker is where the client can push (register) its key before connecting. These keys are then pushed into
44 | an MQTT bus for all workers to consume.
45 |
46 | The frontend broker exposes the following API endpoints for use:
47 |
48 | ```
49 | /api/v1/wg/key/exchange
50 | /api/v2/wg/key/exchange
51 | ```
52 |
53 | The listen address and port for the Flask server can be configured in `wgkex.yaml` under the `broker_listen` key:
54 |
55 | ```yaml
56 | broker_listen:
57 | # host defaults to 127.0.0.1 if unspecified
58 | host: 0.0.0.0
59 | # port defaults to 5000 if unspecified
60 | port: 5000
61 | ```
62 |
63 | #### POST /api/v1/wg/key/exchange
64 |
65 | JSON POST'd to this endpoint should be in this format:
66 |
67 | ```json
68 | {
69 | "domain": "CONFIGURED_DOMAIN",
70 | "public_key": "PUBLIC_KEY"
71 | }
72 | ```
73 |
74 | The broker will validate the domain and public key, and if valid, will push the key onto the MQTT bus.
75 |
76 |
77 | #### POST /api/v2/wg/key/exchange
78 |
79 | JSON POST'd to this endpoint should be in this format:
80 |
81 | ```json
82 | {
83 | "domain": "CONFIGURED_DOMAIN",
84 | "public_key": "PUBLIC_KEY"
85 | }
86 | ```
87 |
88 | The broker will validate the domain and public key, and if valid, will push the key onto the MQTT bus.
89 | Additionally it chooses a worker (aka gateway, endpoint) that the client should connect to.
90 | The response is JSON data containing the connection details for the chosen gateway:
91 |
92 | ```json
93 | {
94 | "Endpoint": {
95 | "Address": "GATEWAY_ADDRESS",
96 | "Port": "GATEWAY_WIREGUARD_PORT",
97 | "AllowedIPs": [
98 | "GATEWAY_WIREGUARD_INTERFACE_ADDRESS"
99 | ],
100 | "PublicKey": "GATEWAY_PUBLIC_KEY"
101 | }
102 | }
103 | ```
104 |
105 | ### Backend worker
106 |
107 | The backend (worker) waits for new keys to appear on the MQTT message bus. Once a new key appears, the worker performs
108 | validation task on the key, then injects those keys into a WireGuard instance(While also updating the VxLAN FDB).
109 | It reports metrics like number of connected peers and instance data like local address, WG listening port and
110 | external domain name (configured in config.yml) back to the broker.
111 | Each worker must run on a machine with a unique hostname, as it is used for separation of metrics.
112 |
113 | This tool is intended to facilitate running BATMAN over VXLAN over WireGuard as a means to create encrypted
114 | high-performance mesh links.
115 |
116 | For further information, please see this [presentation on the architecture](https://www.slideshare.net/AnnikaWickert/ffmuc-goes-wild-infrastructure-recap-2020-rc3)
117 |
118 | ## Installation
119 |
120 | - TBA
121 |
122 | ## Configuration
123 |
124 | - Configuration file
125 |
126 | The `wgkex` configuration file defaults to `/etc/wgkex.yaml` ([Sample configuration file](wgkex.yaml.example)), however
127 | can also be overwritten by setting the environment variable `WGKEX_CONFIG_FILE`.
128 |
129 | ## Development
130 |
131 | ### Build using [Bazel](https://bazel.build)
132 |
133 | ```sh
134 | # modify .bazelversion if you want to test another version of Bazel (using Bazelisk)
135 | bazel build //wgkex/broker:app
136 | bazel build //wgkex/worker:app
137 | # artifacts will be at ./bazel-bin/wgkex/{broker,worker}/app respectively
138 | ```
139 |
140 | ### Updating PIP dependencies for Bazel
141 |
142 | This package is using Bazel's `compile_pip_requirements` to get a requirements_lock.txt file.
143 | In order to update the respective depencencies after modifying the requirements.txt, run:
144 |
145 | ```sh
146 | bazel run //:requirements.update
147 | ```
148 |
149 | ### Unit tests
150 |
151 | The test can be run using
152 |
153 | ```sh
154 | bazel test ... --test_output=all
155 | ```
156 |
157 | or
158 |
159 | ```sh
160 | python3 -m unittest discover -p '*_test.py'
161 | ```
162 |
163 | ### Run Worker & Broker using Bazel
164 |
165 | 1. After having built the broker and worker (see above),
166 | set up dummy interfaces for the worker using this script:
167 |
168 | ```sh
169 | interface_linklocal() {
170 | # We generate a predictable v6 address
171 | local macaddr="$(echo $1 | wg pubkey |md5sum|sed 's/^\(..\)\(..\)\(..\)\(..\)\(..\).*$/02:\1:\2:\3:\4:\5/')"
172 | local oldIFS="$IFS"; IFS=':'; set -- $macaddr; IFS="$oldIFS"
173 | echo "fe80::$1$2:$3ff:fe$4:$5$6"
174 | }
175 |
176 | sudo ip link add wg-welt type wireguard
177 | wg genkey | sudo wg set wg-welt private-key /dev/stdin
178 | sudo wg set wg-welt listen-port 51820
179 | addr=$(interface_linklocal $(sudo wg show wg-welt private-key))
180 | sudo ip addr add $addr dev wg-welt
181 | sudo ip link add vx-welt type vxlan id 99 dstport 0 local $addr dev wg-welt
182 | sudo ip addr add fe80::1/64 dev vx-welt
183 | sudo ip link set wg-welt up
184 | sudo ip link set vx-welt up
185 | ```
186 |
187 | 2. Generate a development configuration:
188 |
189 | ```sh
190 | sed -E '/(ffmuc_muc|ffwert|ffdon)/d' wgkex.yaml.example > wgkex.yaml
191 | ```
192 |
193 | 3. Run the broker in a terminal:
194 |
195 | ```sh
196 | # defaults to /etc/wgkex.yaml if not set
197 | export WGKEX_CONFIG_FILE=$PWD/wgkex.yaml
198 | ./bazel-bin/wgkex/broker/app
199 | ```
200 |
201 | 4. And run the worker in a second terminal:
202 |
203 | ```sh
204 | export WGKEX_CONFIG_FILE=$PWD/wgkex.yaml
205 | # the worker requires admin permissions to read interfaces
206 | sudo -E ./bazel-bin/wgkex/worker/app
207 | ```
208 |
209 | ### Run Worker & Broker using Python
210 |
211 | Follow steps above to set generate the new config and export the `WGKEX_CONFIG_FILE`
212 | then start the broker and worker like the following:
213 |
214 | - Broker (Using Flask development server)
215 |
216 | ```sh
217 | FLASK_ENV=development FLASK_DEBUG=1 FLASK_APP=wgkex/broker/app.py python3 -m flask run
218 | ```
219 |
220 | - Worker
221 |
222 | ```sh
223 | python3 -c 'from wgkex.worker.app import main; main()'
224 | ```
225 |
226 | ### Client
227 |
228 | The client can be used via CLI:
229 |
230 | ```sh
231 | wget -q -O- --post-data='{"domain": "ffmuc_welt","public_key": "o52Ge+Rpj4CUSitVag9mS7pSXUesNM0ESnvj/wwehkg="}' 'http://127.0.0.1:5000/api/v2/wg/key/exchange'
232 | ```
233 |
234 | and it should output something similar to:
235 |
236 | ```json
237 | {
238 | "Endpoint": {
239 | "Address": "gw04.ext.ffmuc.net",
240 | "AllowedIPs": [
241 | "fe80::27c:16ff:fec0:6c74"
242 | ],
243 | "Port": "40011",
244 | "PublicKey": "TszFS3oFRdhsJP3K0VOlklGMGYZy+oFCtlaghXJqW2g="
245 | }
246 | }
247 | ```
248 |
249 | Or use Python instead of wget:
250 |
251 | ```python
252 | import requests
253 | key_data = {"domain": "ffmuc_welt","public_key": "o52Ge+Rpj4CUSitVag9mS7pSXUesNM0ESnvj/wwehkg="}
254 | broker_url = "http://127.0.0.1:5000"
255 | push_key = requests.get(f'{broker_url}/api/v2/wg/key/exchange', json=key_data)
256 | print(f'Key push was: {push_key.json().get("Message")}')
257 | ```
258 |
259 | ### MQTT topics
260 |
261 | - Publishing keys broker->worker: `wireguard/{domain}/{worker}`
262 | - Publishing metrics worker->broker: `wireguard-metrics/{domain}/{worker}/connected_peers`
263 | - Publishing worker status: `wireguard-worker/{worker}/status`
264 | - Publishing worker data: `wireguard-worker/{worker}/{domain}/data`
265 |
266 | ## Contact
267 |
268 | [Freifunk Munich Mattermost](https://chat.ffmuc.net)
269 |
--------------------------------------------------------------------------------
/wgkex/worker/mqtt.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """Process messages from MQTT."""
3 |
4 | # TODO(ruairi): Deprecate __init__.py from config, as it masks namespace.
5 | import json
6 | import re
7 | import socket
8 | import threading
9 | from typing import Any, Optional
10 |
11 | import paho.mqtt.client as mqtt
12 | import pyroute2.netlink.exceptions
13 |
14 | from wgkex.common import logger
15 | from wgkex.common.mqtt import (
16 | TOPIC_CONNECTED_PEERS,
17 | TOPIC_WORKER_STATUS,
18 | TOPIC_WORKER_WG_DATA,
19 | )
20 | from wgkex.config.config import get_config
21 | from wgkex.worker.msg_queue import q
22 | from wgkex.worker.netlink import (
23 | get_connected_peers_count,
24 | get_device_data,
25 | )
26 |
27 | _HOSTNAME = socket.gethostname()
28 | _METRICS_SEND_INTERVAL = 60
29 |
30 |
31 | def connect(exit_event: threading.Event) -> None:
32 | """Connect to MQTT.
33 |
34 | Argument:
35 | exit_event: A threading.Event that signals application shutdown.
36 | """
37 | base_config = get_config().mqtt
38 | broker_address = base_config.broker_url
39 | broker_port = base_config.broker_port
40 | broker_username = base_config.username
41 | broker_password = base_config.password
42 | broker_keepalive = base_config.keepalive
43 | client = mqtt.Client(_HOSTNAME, mqtt.CallbackAPIVersion.VERSION1)
44 | domains = get_config().domains
45 |
46 | # Register LWT to set worker status down when lossing connection
47 | client.will_set(TOPIC_WORKER_STATUS.format(worker=_HOSTNAME), 0, qos=1, retain=True)
48 |
49 | # Register handlers
50 | client.on_connect = on_connect
51 | client.on_disconnect = on_disconnect
52 | client.on_message = on_message
53 | client.message_callback_add("wireguard/#", on_message_wireguard)
54 | logger.info("connecting to broker %s", broker_address)
55 |
56 | client.username_pw_set(broker_username, broker_password)
57 | client.connect(broker_address, port=broker_port, keepalive=broker_keepalive)
58 |
59 | # Start background threads that should not be restarted on reconnect
60 |
61 | # Mark worker as offline on graceful shutdown, after exit_event is set
62 | def mark_offline_on_exit(exit_event: threading.Event):
63 | exit_event.wait()
64 | if client.is_connected():
65 | logger.info("Marking worker as down")
66 | client.publish(
67 | TOPIC_WORKER_STATUS.format(worker=_HOSTNAME), 0, qos=1, retain=True
68 | )
69 |
70 | mark_offline_on_exit_thread = threading.Thread(
71 | target=mark_offline_on_exit, args=(exit_event,)
72 | )
73 | mark_offline_on_exit_thread.start()
74 |
75 | for domain in domains:
76 | # Schedule repeated metrics publishing
77 | metrics_thread = threading.Thread(
78 | target=publish_metrics_loop, args=(exit_event, client, domain)
79 | )
80 | metrics_thread.start()
81 |
82 | client.loop_forever()
83 |
84 |
85 | def on_disconnect(client: mqtt.Client, userdata: Any, rc):
86 | """Handles MQTT disconnect and logs the event
87 |
88 | Expected signature for MQTT v3.1.1 and v3.1 is:
89 | disconnect_callback(client, userdata, rc)
90 |
91 | and for MQTT v5.0:
92 | disconnect_callback(client, userdata, reasonCode, properties)
93 |
94 | Arguments:
95 | client: the client instance for this callback
96 | userdata: the private user data as set in Client() or userdata_set()
97 | rc: the disconnection result
98 | The rc parameter indicates the disconnection state. If
99 | MQTT_ERR_SUCCESS (0), the callback was called in response to
100 | a disconnect() call. If any other value the disconnection
101 | was unexpected, such as might be caused by a network error.
102 | """
103 | logger.debug("Disconnected with result code " + str(rc))
104 |
105 |
106 | # The callback for when the client receives a CONNACK response from the server.
107 | def on_connect(client: mqtt.Client, userdata: Any, flags, rc) -> None:
108 | """Handles MQTT connect and subscribes to topics on connect
109 |
110 | Arguments:
111 | client: the client instance for this callback.
112 | userdata: the private user data.
113 | flags: The MQTT flags.
114 | rc: The MQTT rc.
115 | """
116 | logger.debug("Connected with result code " + str(rc))
117 | domains = get_config().domains
118 |
119 | own_external_name = (
120 | get_config().external_name
121 | if get_config().external_name is not None
122 | else _HOSTNAME
123 | )
124 |
125 | for domain in domains:
126 | # Subscribing in on_connect() means that if we lose the connection and
127 | # reconnect then subscriptions will be renewed.
128 | topic = f"wireguard/{domain}/+"
129 | logger.info(f"Subscribing to topic {topic}")
130 | client.subscribe(topic)
131 |
132 | for domain in domains:
133 | # Publish worker data (WG pubkeys, ports, local addresses)
134 | iface = wg_interface_name(domain)
135 | if iface:
136 | (port, public_key, link_address) = get_device_data(iface)
137 | data = {
138 | "ExternalAddress": own_external_name,
139 | "Port": port,
140 | "PublicKey": public_key,
141 | "LinkAddress": link_address,
142 | }
143 | client.publish(
144 | TOPIC_WORKER_WG_DATA.format(worker=_HOSTNAME, domain=domain),
145 | json.dumps(data),
146 | qos=1,
147 | retain=True,
148 | )
149 | else:
150 | logger.error(
151 | f"Could not get interface name for domain {domain}. Skipping worker data publication"
152 | )
153 |
154 | # Mark worker as online
155 | client.publish(TOPIC_WORKER_STATUS.format(worker=_HOSTNAME), 1, qos=1, retain=True)
156 |
157 |
158 | def on_message(client: mqtt.Client, userdata: Any, message: mqtt.MQTTMessage) -> None:
159 | """Fallback handler for MQTT messages that do not match any other registered handler.
160 |
161 | Arguments:
162 | client: the client instance for this callback.
163 | userdata: the private user data.
164 | message: The MQTT message.
165 | """
166 | logger.info("Got unhandled message on %s from MQTT", message.topic)
167 | return
168 |
169 |
170 | def on_message_wireguard(
171 | client: mqtt.Client, userdata: Any, message: mqtt.MQTTMessage
172 | ) -> None:
173 | """Processes messages from MQTT and forwards them to netlink.
174 |
175 | Arguments:
176 | client: the client instance for this callback.
177 | userdata: the private user data.
178 | message: The MQTT message.
179 | """
180 | # TODO(ruairi): Check bounds and raise exception here.
181 | logger.debug("Got message on %s from MQTT", message.topic)
182 |
183 | domain_prefixes = get_config().domain_prefixes
184 | domain = None
185 | for domain_prefix in domain_prefixes:
186 | domain = re.search(r".*/" + domain_prefix + r"(\w+)/", message.topic)
187 | if domain:
188 | break
189 | if not domain:
190 | raise ValueError(
191 | f"Could not find a match for {domain_prefixes} on {message.topic}"
192 | )
193 | # this will not work, if we have non-unique prefix stripped domains
194 | domain = domain.group(1)
195 | logger.debug("Found domain %s", domain)
196 | logger.info(
197 | f"Received create message for key {str(message.payload.decode('utf-8'))} on domain {domain} adding to queue"
198 | )
199 | q.put((domain, str(message.payload.decode("utf-8"))))
200 |
201 |
202 | def publish_metrics_loop(
203 | exit_event: threading.Event, client: mqtt.Client, domain: str
204 | ) -> None:
205 | """Continuously send metrics every METRICS_SEND_INTERVAL seconds for this gateway and the given domain."""
206 | logger.info("Scheduling metrics task for %s, ", domain)
207 |
208 | topic = TOPIC_CONNECTED_PEERS.format(domain=domain, worker=_HOSTNAME)
209 |
210 | while not exit_event.is_set():
211 | try:
212 | publish_metrics(client, topic, domain)
213 | except Exception as e:
214 | # Don't crash the thread when an exception is encountered
215 | logger.error(f"Exception during publish metrics task for {domain}:")
216 | logger.error(e)
217 | finally:
218 | # This drifts slightly over time, doesn't matter for us
219 | exit_event.wait(_METRICS_SEND_INTERVAL)
220 |
221 | # Set peers metric to -1 to mark worker as offline
222 | # Use QoS 1 (at least once) to make sure the broker notices
223 | client.publish(topic, -1, qos=1, retain=True)
224 |
225 |
226 | def publish_metrics(client: mqtt.Client, topic: str, domain: str) -> None:
227 | """Publish metrics for this gateway and the given domain.
228 |
229 | The metrics currently only consist of the number of connected peers.
230 | """
231 | logger.debug(f"Publishing metrics for domain {domain}")
232 | iface = wg_interface_name(domain)
233 | if not iface:
234 | logger.error(
235 | f"Could not get interface name for domain {domain}. Skipping metrics publication"
236 | )
237 | return
238 |
239 | try:
240 | peer_count = get_connected_peers_count(iface)
241 | except pyroute2.netlink.exceptions.NetlinkDumpInterrupted:
242 | # Handle gracefully, don't update metrics
243 | logger.info(
244 | "Caught NetlinkDumpInterrupted exception while collecting metrics for domain {domain}"
245 | )
246 | return
247 |
248 | # Publish metrics, retain it at MQTT broker so restarted wgkex broker has metrics right away
249 | client.publish(topic, peer_count, retain=True)
250 |
251 |
252 | def wg_interface_name(domain: str) -> Optional[str]:
253 | """Calculates the WireGuard interface name for a domain"""
254 | domain_prefixes = get_config().domain_prefixes
255 | cleaned_domain = None
256 | for prefix in domain_prefixes:
257 | try:
258 | cleaned_domain = domain.split(prefix)[1]
259 | except IndexError:
260 | continue
261 | break
262 | if not cleaned_domain:
263 | raise ValueError(f"Could not find a match for {domain_prefixes} on {domain}")
264 | # this will not work, if we have non-unique prefix stripped domains
265 | return f"wg-{cleaned_domain}"
266 |
--------------------------------------------------------------------------------
/wgkex/worker/netlink.py:
--------------------------------------------------------------------------------
1 | """Functions related to netlink manipulation for Wireguard, IPRoute and FDB on Linux."""
2 |
3 | # See https://docs.pyroute2.org/iproute.html for a documentation of the layout of netlink responses
4 | import hashlib
5 | import re
6 | from dataclasses import dataclass
7 | from datetime import datetime, timedelta
8 | from textwrap import wrap
9 | from typing import Dict, List, Tuple
10 |
11 | import pyroute2
12 | import pyroute2.netlink
13 | import pyroute2.netlink.exceptions
14 |
15 | from wgkex.common import logger
16 | from wgkex.common.utils import mac2eui64
17 |
18 | _PEER_TIMEOUT_HOURS = 3
19 |
20 |
21 | @dataclass
22 | class WireGuardClient:
23 | """A Class representing a WireGuard client.
24 |
25 | Attributes:
26 | public_key: The public key to use for this client.
27 | domain: The domain for this client.
28 | remove: If this is to be removed or not.
29 | """
30 |
31 | public_key: str
32 | domain: str
33 | remove: bool
34 |
35 | @property
36 | def lladdr(self) -> str:
37 | """Compute the X for an (IPv6) Link-Local address.
38 |
39 | Returns:
40 | IPv6 Link-Local address of the WireGuard peer.
41 | """
42 | pub_key_hash = hashlib.md5()
43 | pub_key_hash.update(self.public_key.encode("ascii") + b"\n")
44 | hashed_key = pub_key_hash.hexdigest()
45 | hash_as_list = wrap(hashed_key, 2)
46 | current_mac_addr = ":".join(["02"] + hash_as_list[:5])
47 |
48 | return re.sub(
49 | r"/\d+$", "/128", mac2eui64(mac=current_mac_addr, prefix="fe80::/10")
50 | )
51 |
52 | @property
53 | def vx_interface(self) -> str:
54 | """Returns the name of the VxLAN interface associated with this lladdr."""
55 | return f"vx-{self.domain}"
56 |
57 | @property
58 | def wg_interface(self) -> str:
59 | """Returns the WireGuard peer interface."""
60 | return f"wg-{self.domain}"
61 |
62 |
63 | def wg_flush_stale_peers(domain: str) -> List[Dict]:
64 | """Removes stale peers.
65 |
66 | Arguments:
67 | domain: The domain to detect peers on.
68 |
69 | Returns:
70 | The peers which we can remove.
71 | """
72 | logger.info("Searching for stale clients for %s", domain)
73 | stale_clients = [
74 | stale_client for stale_client in find_stale_wireguard_clients("wg-" + domain)
75 | ]
76 | logger.debug("Found %s stale clients: %s", len(stale_clients), stale_clients)
77 | stale_wireguard_clients = [
78 | WireGuardClient(public_key=stale_client, domain=domain, remove=True)
79 | for stale_client in stale_clients
80 | ]
81 | logger.debug("Found stale WireGuard clients: %s", stale_wireguard_clients)
82 | logger.info("Processing clients.")
83 | link_handled = [
84 | link_handler(stale_client) for stale_client in stale_wireguard_clients
85 | ]
86 | logger.debug("Handled the following clients: %s", link_handled)
87 | return link_handled
88 |
89 |
90 | # pyroute2 stuff
91 | def link_handler(client: WireGuardClient) -> Dict:
92 | """Updates fdb, route and WireGuard peers tables for a given WireGuard peer.
93 |
94 | Arguments:
95 | client: A WireGuard peer to manipulate.
96 | Returns:
97 | The outcome of each operation.
98 | """
99 | results = dict()
100 | # Updates WireGuard peers.
101 | results.update({"Wireguard": update_wireguard_peer(client)})
102 | logger.debug("Handling links for %s", client)
103 | try:
104 | # Updates routes to the WireGuard Peer.
105 | results.update({"Route": route_handler(client)})
106 | logger.info("Updated route for %s", client)
107 | except Exception as e:
108 | # TODO(ruairi): re-raise exception here.
109 | logger.error("Failed to update route for %s (%s)", client, e)
110 | results.update({"Route": e})
111 | # Updates WireGuard FDB.
112 | results.update({"Bridge FDB": bridge_fdb_handler(client)})
113 | logger.debug("Updated Bridge FDB for %s", client)
114 | return results
115 |
116 |
117 | def bridge_fdb_handler(client: WireGuardClient) -> Dict:
118 | """Handles updates of FDB info towards WireGuard peers.
119 |
120 | Note that set will remove an FDB entry if remove is set to True.
121 |
122 | Arguments:
123 | client: The WireGuard peer to update.
124 |
125 | Returns:
126 | A dict.
127 | """
128 | # TODO(ruairi): Splice this into an add_ and remove_ function.
129 | with pyroute2.IPRoute() as ip:
130 | return ip.fdb(
131 | "del" if client.remove else "append",
132 | ifindex=ip.link_lookup(ifname=client.vx_interface)[0],
133 | lladdr="00:00:00:00:00:00",
134 | dst=re.sub(r"/\d+$", "", client.lladdr),
135 | NDA_IFINDEX=ip.link_lookup(ifname=client.wg_interface)[0],
136 | )
137 |
138 |
139 | def update_wireguard_peer(client: WireGuardClient) -> Dict:
140 | """Handles updates of WireGuard peers to netlink.
141 |
142 | Note that set will remove a peer if remove is set to True.
143 |
144 | Arguments:
145 | client: The WireGuard peer to update.
146 |
147 | Returns:
148 | A dict.
149 | """
150 | # TODO(ruairi): Splice this into an add_ and remove_ function.
151 | with pyroute2.WireGuard() as wg:
152 | wg_peer = {
153 | "public_key": client.public_key,
154 | "allowed_ips": [client.lladdr],
155 | "remove": client.remove,
156 | }
157 | return wg.set(client.wg_interface, peer=wg_peer)
158 |
159 |
160 | def route_handler(client: WireGuardClient) -> Dict:
161 | """Handles updates of routes towards WireGuard peers.
162 |
163 | Note that set will remove a route if remove is set to True.
164 |
165 | Arguments:
166 | client: The WireGuard peer to update.
167 |
168 | Returns:
169 | A dict.
170 | """
171 | # TODO(ruairi): Determine what Exceptions are raised by ip.route
172 | # TODO(ruairi): Splice this into an add_ and remove_ function.
173 | with pyroute2.IPRoute() as ip:
174 | return ip.route(
175 | "del" if client.remove else "replace",
176 | dst=client.lladdr,
177 | oif=ip.link_lookup(ifname=client.wg_interface)[0],
178 | )
179 |
180 |
181 | def find_stale_wireguard_clients(wg_interface: str) -> List:
182 | """Fetches and returns a list of peers which have not had recent handshakes.
183 |
184 | Arguments:
185 | wg_interface: The WireGuard interface to query.
186 |
187 | Returns:
188 | # A list of peers which have not recently seen a handshake.
189 | """
190 | three_hrs_in_secs = int(
191 | (datetime.now() - timedelta(hours=_PEER_TIMEOUT_HOURS)).timestamp()
192 | )
193 | logger.info(
194 | "Starting search for stale wireguard peers for interface %s.", wg_interface
195 | )
196 | with pyroute2.WireGuard() as wg:
197 | all_peers = []
198 | msgs = wg.info(wg_interface)
199 | logger.debug("Got infos for stale peers: %s.", msgs)
200 | for msg in msgs:
201 | peers = msg.get_attr("WGDEVICE_A_PEERS")
202 | logger.debug("Got clients: %s.", peers)
203 | if peers:
204 | all_peers.extend(peers)
205 | ret = [
206 | peer.get_attr("WGPEER_A_PUBLIC_KEY").decode("utf-8")
207 | for peer in all_peers
208 | if (hshk_time := peer.get_attr("WGPEER_A_LAST_HANDSHAKE_TIME")) is not None
209 | and hshk_time.get("tv_sec", int()) < three_hrs_in_secs
210 | ]
211 | return ret
212 |
213 |
214 | def get_connected_peers_count(wg_interface: str) -> int:
215 | """Fetches and returns the number of connected peers, i.e. which had recent handshakes.
216 |
217 | Arguments:
218 | wg_interface: The WireGuard interface to query.
219 |
220 | Returns:
221 | The number of peers which have recently seen a handshake.
222 |
223 | Raises:
224 | NetlinkDumpInterrupted if the interface data has changed while it was being returned by netlink
225 | """
226 | three_mins_ago_in_secs = int((datetime.now() - timedelta(minutes=3)).timestamp())
227 | logger.info("Counting connected wireguard peers for interface %s.", wg_interface)
228 | with pyroute2.WireGuard() as wg:
229 | try:
230 | msgs = wg.info(wg_interface)
231 | except pyroute2.netlink.exceptions.NetlinkDumpInterrupted:
232 | # Normal behaviour, data has changed while it was being returned by netlink.
233 | # Retry once, don't catch the exception this time, and let the caller handle it.
234 | # See https://github.com/svinota/pyroute2/issues/874
235 | msgs = wg.info(wg_interface)
236 |
237 | logger.debug("Got infos for connected peers: %s.", msgs)
238 | count = 0
239 | for msg in msgs:
240 | peers = msg.get_attr("WGDEVICE_A_PEERS")
241 | logger.debug("Got clients: %s.", peers)
242 | if peers:
243 | for peer in peers:
244 | if (
245 | hshk_time := peer.get_attr("WGPEER_A_LAST_HANDSHAKE_TIME")
246 | ) is not None and hshk_time.get(
247 | "tv_sec", int()
248 | ) > three_mins_ago_in_secs:
249 | count += 1
250 |
251 | return count
252 |
253 |
254 | def get_device_data(wg_interface: str) -> Tuple[int, str, str]:
255 | """Returns the listening port, public key and local IP address.
256 |
257 | Arguments:
258 | wg_interface: The WireGuard interface to query.
259 |
260 | Returns:
261 | # The listening port, public key, and local IP address of the WireGuard interface.
262 | """
263 | logger.info("Reading data from interface %s.", wg_interface)
264 | with pyroute2.WireGuard() as wg, pyroute2.IPRoute() as ipr:
265 | msgs = wg.info(wg_interface)
266 | logger.debug("Got infos for interface data: %s.", msgs)
267 | if len(msgs) > 1:
268 | logger.warning(
269 | "Got multiple messages from netlink, expected one. Using only first one."
270 | )
271 | info: pyroute2.netlink.nla = msgs[0]
272 |
273 | port = int(info.get_attr("WGDEVICE_A_LISTEN_PORT"))
274 | public_key = info.get_attr("WGDEVICE_A_PUBLIC_KEY").decode("ascii")
275 |
276 | # Get link address using IPRoute
277 | ipr_link = ipr.link_lookup(ifname=wg_interface)[0]
278 | msgs = ipr.get_addr(index=ipr_link)
279 | link_address = msgs[0].get_attr("IFA_ADDRESS")
280 |
281 | logger.debug(
282 | "Interface data: port '%s', public key '%s', link address '%s",
283 | port,
284 | public_key,
285 | link_address,
286 | )
287 |
288 | return (port, public_key, link_address)
289 |
--------------------------------------------------------------------------------
/wgkex/broker/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """wgkex broker"""
3 |
4 | import dataclasses
5 | import json
6 | import re
7 | from typing import Any, Dict, Tuple
8 |
9 | import paho.mqtt.client as mqtt_client
10 | from flask import Flask, Response, render_template, request
11 | from flask.app import Flask as Flask_app
12 | from flask_mqtt import Mqtt
13 | from waitress import serve
14 |
15 | from wgkex.broker.metrics import WorkerMetricsCollection
16 | from wgkex.common import logger
17 | from wgkex.common.mqtt import (
18 | CONNECTED_PEERS_METRIC,
19 | TOPIC_WORKER_STATUS,
20 | TOPIC_WORKER_WG_DATA,
21 | )
22 | from wgkex.common.utils import is_valid_domain
23 | from wgkex.config import config
24 |
25 | WG_PUBKEY_PATTERN = re.compile(r"^[A-Za-z0-9+/]{42}[AEIMQUYcgkosw480]=$")
26 |
27 |
28 | @dataclasses.dataclass
29 | class KeyExchange:
30 | """A key exchange message.
31 |
32 | Attributes:
33 | public_key: The public key for this exchange.
34 | domain: The domain for this exchange.
35 | """
36 |
37 | public_key: str
38 | domain: str
39 |
40 | @classmethod
41 | def from_dict(cls, msg: dict) -> "KeyExchange":
42 | """Creates a new KeyExchange message from dict.
43 |
44 | Arguments:
45 | msg: The message to convert.
46 | Returns:
47 | A KeyExchange object.
48 | """
49 | public_key = is_valid_wg_pubkey(msg.get("public_key"))
50 | domain = str(msg.get("domain"))
51 | if not is_valid_domain(domain):
52 | raise ValueError(f"Domain {domain} not in configured domains.")
53 | return cls(public_key=public_key, domain=domain)
54 |
55 |
56 | def _fetch_app_config() -> Flask_app:
57 | """Creates the Flask app from configuration.
58 |
59 | Returns:
60 | A created Flask app.
61 | """
62 | app = Flask(__name__)
63 | mqtt_cfg = config.get_config().mqtt
64 | app.config["MQTT_BROKER_URL"] = mqtt_cfg.broker_url
65 | app.config["MQTT_BROKER_PORT"] = mqtt_cfg.broker_port
66 | app.config["MQTT_USERNAME"] = mqtt_cfg.username
67 | app.config["MQTT_PASSWORD"] = mqtt_cfg.password
68 | app.config["MQTT_KEEPALIVE"] = mqtt_cfg.keepalive
69 | app.config["MQTT_TLS_ENABLED"] = mqtt_cfg.tls
70 | return app
71 |
72 |
73 | app = _fetch_app_config()
74 | mqtt = Mqtt(app)
75 | worker_metrics = WorkerMetricsCollection()
76 | worker_data: Dict[Tuple[str, str], Dict] = {}
77 |
78 |
79 | @app.route("/", methods=["GET"])
80 | def index() -> str:
81 | """Returns main page"""
82 | return render_template("index.html")
83 |
84 |
85 | @app.route("/api/v1/wg/key/exchange", methods=["POST"])
86 | def wg_api_v1_key_exchange() -> Tuple[Response | Dict, int]:
87 | """Retrieves a new key and validates.
88 | Returns:
89 | Status message.
90 | """
91 | try:
92 | data = KeyExchange.from_dict(request.get_json(force=True))
93 | except Exception as ex:
94 | logger.error(
95 | "Exception occurred in /api/v1/wg/key/exchange: %s", ex, exc_info=True
96 | )
97 | return {
98 | "error": {
99 | "message": "An internal error has occurred. Please try again later."
100 | }
101 | }, 400
102 |
103 | key = data.public_key
104 | domain = data.domain
105 | # in case we want to decide here later we want to publish it only to dedicated gateways
106 | gateway = "all"
107 | logger.info(f"wg_api_v1_key_exchange: Domain: {domain}, Key:{key}")
108 |
109 | mqtt.publish(f"wireguard/{domain}/{gateway}", key)
110 | return {"Message": "OK"}, 200
111 |
112 |
113 | @app.route("/api/v2/wg/key/exchange", methods=["POST"])
114 | def wg_api_v2_key_exchange() -> Tuple[Response | Dict, int]:
115 | """Retrieves a new key, validates it and responds with a worker/gateway the client should connect to.
116 |
117 | Returns:
118 | Status message, Endpoint with address/domain, port pubic key and link address.
119 | """
120 | try:
121 | data = KeyExchange.from_dict(request.get_json(force=True))
122 | except Exception as ex:
123 | logger.error(
124 | "Exception occurred in /api/v2/wg/key/exchange: %s", ex, exc_info=True
125 | )
126 | return {
127 | "error": {
128 | "message": "An internal error has occurred. Please try again later."
129 | }
130 | }, 400
131 |
132 | key = data.public_key
133 | domain = data.domain
134 | # in case we want to decide here later we want to publish it only to dedicated gateways
135 | gateway = "all"
136 | logger.info(f"wg_api_v2_key_exchange: Domain: {domain}, Key:{key}")
137 |
138 | mqtt.publish(f"wireguard/{domain}/{gateway}", key)
139 |
140 | best_worker, diff, current_peers = worker_metrics.get_best_worker(domain)
141 | if best_worker is None:
142 | logger.warning(f"No worker online for domain {domain}")
143 | return {
144 | "error": {
145 | "message": "no gateway online for this domain, please check the domain value and try again later"
146 | }
147 | }, 400
148 |
149 | # Update number of peers locally to interpolate data between MQTT updates from the worker
150 | # TODO fix data race
151 | current_peers_domain = (
152 | worker_metrics.get(best_worker)
153 | .get_domain_metrics(domain)
154 | .get(CONNECTED_PEERS_METRIC, 0)
155 | )
156 | worker_metrics.update(
157 | best_worker, domain, CONNECTED_PEERS_METRIC, current_peers_domain + 1
158 | )
159 | logger.debug(
160 | f"Chose worker {best_worker} with {current_peers} connected clients ({diff})"
161 | )
162 |
163 | w_data = worker_data.get((best_worker, domain), None)
164 | if w_data is None:
165 | logger.error(f"Couldn't get worker endpoint data for {best_worker}/{domain}")
166 | return {"error": {"message": "could not get gateway data"}}, 500
167 |
168 | endpoint = {
169 | "Address": w_data.get("ExternalAddress"),
170 | "Port": str(w_data.get("Port")),
171 | "AllowedIPs": [w_data.get("LinkAddress")],
172 | "PublicKey": w_data.get("PublicKey"),
173 | }
174 |
175 | return {"Endpoint": endpoint}, 200
176 |
177 |
178 | @mqtt.on_connect()
179 | def handle_mqtt_connect(
180 | client: mqtt_client.Client, userdata: bytes, flags: Any, rc: Any
181 | ) -> None:
182 | """Prints status of connect message."""
183 | # TODO(ruairi): Clarify current usage of this function.
184 | logger.debug(
185 | "MQTT connected to {}:{}".format(
186 | app.config["MQTT_BROKER_URL"], app.config["MQTT_BROKER_PORT"]
187 | )
188 | )
189 | mqtt.subscribe("wireguard-metrics/#")
190 | mqtt.subscribe(TOPIC_WORKER_STATUS.format(worker="+"))
191 | mqtt.subscribe(TOPIC_WORKER_WG_DATA.format(worker="+", domain="+"))
192 |
193 |
194 | @mqtt.on_topic("wireguard-metrics/#")
195 | def handle_mqtt_message_metrics(
196 | client: mqtt_client.Client, userdata: bytes, message: mqtt_client.MQTTMessage
197 | ) -> None:
198 | """Processes published metrics from workers."""
199 | logger.debug(
200 | f"MQTT message received on {message.topic}: {message.payload.decode()}"
201 | )
202 | _, domain, worker, metric = message.topic.split("/", 3)
203 | if not is_valid_domain(domain):
204 | logger.error(f"Domain {domain} not in configured domains")
205 | return
206 |
207 | if not worker or not metric:
208 | logger.error("Ignored MQTT message with empty worker or metrics label")
209 | return
210 |
211 | data = int(message.payload)
212 |
213 | logger.info(f"Update worker metrics: {metric} on {worker}/{domain} = {data}")
214 | worker_metrics.update(worker, domain, metric, data)
215 |
216 |
217 | @mqtt.on_topic(TOPIC_WORKER_STATUS.format(worker="+"))
218 | def handle_mqtt_message_status(
219 | client: mqtt_client.Client, userdata: bytes, message: mqtt_client.MQTTMessage
220 | ) -> None:
221 | """Processes status messages from workers."""
222 | _, worker, _ = message.topic.split("/", 2)
223 |
224 | status = int(message.payload)
225 | if status < 1 and worker_metrics.get(worker).is_online():
226 | logger.warning(f"Marking worker as offline: {worker}")
227 | worker_metrics.set_offline(worker)
228 | elif status >= 1 and not worker_metrics.get(worker).is_online():
229 | logger.warning(f"Marking worker as online: {worker}")
230 | worker_metrics.set_online(worker)
231 |
232 |
233 | @mqtt.on_topic(TOPIC_WORKER_WG_DATA.format(worker="+", domain="+"))
234 | def handle_mqtt_message_data(
235 | client: mqtt_client.Client, userdata: bytes, message: mqtt_client.MQTTMessage
236 | ) -> None:
237 | """Processes data messages from workers.
238 |
239 | Stores them in a local dict"""
240 | _, worker, domain, _ = message.topic.split("/", 3)
241 | if not is_valid_domain(domain):
242 | logger.error(f"Domain {domain} not in configured domains.")
243 | return
244 |
245 | data = json.loads(message.payload)
246 | if not isinstance(data, dict):
247 | logger.error("Invalid worker data received for %s/%s: %s", worker, domain, data)
248 | return
249 |
250 | logger.info("Worker data received for %s/%s: %s", worker, domain, data)
251 | worker_data[(worker, domain)] = data
252 |
253 |
254 | @mqtt.on_message()
255 | def handle_mqtt_message(
256 | client: mqtt_client.Client, userdata: bytes, message: mqtt_client.MQTTMessage
257 | ) -> None:
258 | """Prints message contents."""
259 | logger.debug(
260 | f"MQTT message received on {message.topic}: {message.payload.decode()}"
261 | )
262 |
263 |
264 | def is_valid_wg_pubkey(pubkey: str) -> str:
265 | """Verifies if key is a valid WireGuard public key or not.
266 |
267 | Arguments:
268 | pubkey: The key to verify.
269 |
270 | Raises:
271 | ValueError: If the Wireguard Key is invalid.
272 |
273 | Returns:
274 | The public key.
275 | """
276 | # TODO(ruairi): Refactor to return bool.
277 | if WG_PUBKEY_PATTERN.match(pubkey) is None:
278 | raise ValueError(f"Not a valid Wireguard public key: {pubkey}.")
279 | return pubkey
280 |
281 |
282 | def join_host_port(host: str, port: str) -> str:
283 | """Concatenate a port string with a host string using a colon.
284 | The host may be either a hostname, IPv4 or IPv6 address.
285 | An IPv6 address as host will be automatically encapsulated in square brackets.
286 |
287 | Returns:
288 | The joined host:port string
289 | """
290 | if host.find(":") >= 0:
291 | return "[" + host + "]:" + port
292 | return host + ":" + port
293 |
294 |
295 | if __name__ == "__main__":
296 | listen_host = None
297 | listen_port = None
298 |
299 | listen_config = config.get_config().broker_listen
300 | if listen_config is not None:
301 | listen_host = listen_config.host
302 | listen_port = listen_config.port
303 |
304 | serve(app, host=listen_host, port=listen_port)
305 |
--------------------------------------------------------------------------------
/wgkex/worker/netlink_test.py:
--------------------------------------------------------------------------------
1 | """Unit tests for netlink.py"""
2 |
3 | # pyroute2 decides imports based on platform. WireGuard is specific to Linux only. Mock pyroute2.WireGuard so that
4 | # any testing platform can execute tests.
5 | import sys
6 | import unittest
7 | from datetime import datetime, timedelta
8 |
9 | import mock
10 | import pyroute2.netlink.exceptions as pyroute2_netlink_exceptions
11 |
12 | pyroute2_module_mock = mock.MagicMock()
13 | pyroute2_module_mock.netlink.exceptions = pyroute2_netlink_exceptions
14 | sys.modules["pyroute2"] = pyroute2_module_mock
15 | sys.modules["pyroute2.netlink"] = mock.MagicMock()
16 | from pyroute2 import IPRoute, WireGuard # noqa: E402
17 |
18 | from wgkex.worker import netlink # noqa: E402
19 |
20 | _WG_CLIENT_ADD = netlink.WireGuardClient(
21 | public_key="public_key", domain="add", remove=False
22 | )
23 | _WG_CLIENT_DEL = netlink.WireGuardClient(
24 | public_key="public_key", domain="del", remove=True
25 | )
26 |
27 |
28 | def _get_peer_mock(public_key, last_handshake_time):
29 | def peer_get_attr(attr: str):
30 | if attr == "WGPEER_A_LAST_HANDSHAKE_TIME":
31 | return {"tv_sec": last_handshake_time}
32 | if attr == "WGPEER_A_PUBLIC_KEY":
33 | return public_key.encode()
34 |
35 | peer_mock = mock.Mock()
36 | peer_mock.get_attr.side_effect = peer_get_attr
37 | return peer_mock
38 |
39 |
40 | def _get_wg_mock(public_key, last_handshake_time):
41 | peer_mock = _get_peer_mock(public_key, last_handshake_time)
42 |
43 | def msg_get_attr(attr: str):
44 | if attr == "WGDEVICE_A_PEERS":
45 | return [peer_mock]
46 |
47 | msg_mock = mock.Mock()
48 | msg_mock.get_attr.side_effect = msg_get_attr
49 | wg_instance = WireGuard()
50 | wg_info_mock = wg_instance.__enter__.return_value
51 | wg_info_mock.set.return_value = {"WireGuard": "set"}
52 | wg_info_mock.info.return_value = [msg_mock]
53 | return wg_info_mock
54 |
55 |
56 | class NetlinkTest(unittest.TestCase):
57 | def setUp(self) -> None:
58 | iproute_instance = IPRoute()
59 | self.route_info_mock = iproute_instance.__enter__.return_value
60 | # self.addCleanup(mock.patch.stopall)
61 |
62 | def test_find_stale_wireguard_clients_success_with_non_stale_peer(self):
63 | """Tests find_stale_wireguard_clients no operation on non-stale peers."""
64 | _wg_info_mock = _get_wg_mock(
65 | "WGPEER_A_PUBLIC_KEY",
66 | int((datetime.now() - timedelta(seconds=3)).timestamp()),
67 | )
68 | self.assertListEqual([], netlink.find_stale_wireguard_clients("some_interface"))
69 |
70 | def test_find_stale_wireguard_clients_success_stale_peer(self):
71 | """Tests find_stale_wireguard_clients removal of stale peer"""
72 | _wg_info_mock = _get_wg_mock(
73 | "WGPEER_A_PUBLIC_KEY_STALE",
74 | int((datetime.now() - timedelta(hours=5)).timestamp()),
75 | )
76 | self.assertListEqual(
77 | ["WGPEER_A_PUBLIC_KEY_STALE"],
78 | netlink.find_stale_wireguard_clients("some_interface"),
79 | )
80 |
81 | def test_route_handler_add_success(self):
82 | """Test route_handler for normal add operation."""
83 | self.route_info_mock.route.return_value = {"key": "value"}
84 | self.assertDictEqual({"key": "value"}, netlink.route_handler(_WG_CLIENT_ADD))
85 | self.route_info_mock.route.assert_called_with(
86 | "replace", dst="fe80::282:6eff:fe9d:ecd3/128", oif=mock.ANY
87 | )
88 |
89 | def test_route_handler_remove_success(self):
90 | """Test route_handler for normal del operation."""
91 | self.route_info_mock.route.return_value = {"key": "value"}
92 | self.assertDictEqual({"key": "value"}, netlink.route_handler(_WG_CLIENT_DEL))
93 | self.route_info_mock.route.assert_called_with(
94 | "del", dst="fe80::282:6eff:fe9d:ecd3/128", oif=mock.ANY
95 | )
96 |
97 | def test_update_wireguard_peer_success(self):
98 | """Test update_wireguard_peer for normal operation."""
99 | wg_info_mock = _get_wg_mock(
100 | "WGPEER_A_PUBLIC_KEY",
101 | int((datetime.now() - timedelta(seconds=3)).timestamp()),
102 | )
103 | self.assertDictEqual(
104 | {"WireGuard": "set"}, netlink.update_wireguard_peer(_WG_CLIENT_ADD)
105 | )
106 | wg_info_mock.set.assert_called_with(
107 | "wg-add",
108 | peer={
109 | "public_key": "public_key",
110 | "allowed_ips": ["fe80::282:6eff:fe9d:ecd3/128"],
111 | "remove": False,
112 | },
113 | )
114 |
115 | def test_bridge_fdb_handler_append_success(self):
116 | """Test bridge_fdb_handler for normal append operation."""
117 | self.route_info_mock.fdb.return_value = {"key": "value"}
118 | self.assertEqual({"key": "value"}, netlink.bridge_fdb_handler(_WG_CLIENT_ADD))
119 | self.route_info_mock.fdb.assert_called_with(
120 | "append",
121 | lladdr="00:00:00:00:00:00",
122 | dst="fe80::282:6eff:fe9d:ecd3",
123 | ifindex=mock.ANY,
124 | NDA_IFINDEX=mock.ANY,
125 | )
126 |
127 | def test_bridge_fdb_handler_del_success(self):
128 | """Test bridge_fdb_handler for normal del operation."""
129 | self.route_info_mock.fdb.return_value = {"key": "value"}
130 | self.assertEqual({"key": "value"}, netlink.bridge_fdb_handler(_WG_CLIENT_DEL))
131 | self.route_info_mock.fdb.assert_called_with(
132 | "del",
133 | ifindex=mock.ANY,
134 | NDA_IFINDEX=mock.ANY,
135 | lladdr="00:00:00:00:00:00",
136 | dst="fe80::282:6eff:fe9d:ecd3",
137 | )
138 |
139 | def test_link_handler_addition_success(self):
140 | """Test link_handler for normal operation."""
141 | expected = {
142 | "Wireguard": {"WireGuard": "set"},
143 | "Route": {"IPRoute": "route"},
144 | "Bridge FDB": {"IPRoute": "fdb"},
145 | }
146 | wg_info_mock = _get_wg_mock(
147 | "WGPEER_A_PUBLIC_KEY",
148 | int((datetime.now() - timedelta(seconds=3)).timestamp()),
149 | )
150 | wg_info_mock.set.return_value = {"WireGuard": "set"}
151 | self.route_info_mock.fdb.return_value = {"IPRoute": "fdb"}
152 | self.route_info_mock.route.return_value = {"IPRoute": "route"}
153 | self.assertEqual(expected, netlink.link_handler(_WG_CLIENT_ADD))
154 | self.route_info_mock.fdb.assert_called_with(
155 | "append",
156 | ifindex=mock.ANY,
157 | NDA_IFINDEX=mock.ANY,
158 | lladdr="00:00:00:00:00:00",
159 | dst="fe80::282:6eff:fe9d:ecd3",
160 | )
161 | self.route_info_mock.route.assert_called_with(
162 | "replace", dst="fe80::282:6eff:fe9d:ecd3/128", oif=mock.ANY
163 | )
164 | wg_info_mock.set.assert_called_with(
165 | "wg-add",
166 | peer={
167 | "public_key": "public_key",
168 | "allowed_ips": ["fe80::282:6eff:fe9d:ecd3/128"],
169 | "remove": False,
170 | },
171 | )
172 |
173 | def test_wg_flush_stale_peers_not_stale_success(self):
174 | """Tests processing of non-stale WireGuard Peer."""
175 | _wg_info_mock = _get_wg_mock(
176 | "WGPEER_A_PUBLIC_KEY",
177 | int((datetime.now() - timedelta(seconds=3)).timestamp()),
178 | )
179 | self.route_info_mock.fdb.return_value = {"IPRoute": "fdb"}
180 | self.route_info_mock.route.return_value = {"IPRoute": "route"}
181 | self.assertListEqual([], netlink.wg_flush_stale_peers("domain"))
182 | # TODO(ruairi): Understand why pyroute.WireGuard.set
183 | # wg_info_mock.set.assert_not_called()
184 |
185 | def test_wg_flush_stale_peers_stale_success(self):
186 | """Tests processing of stale WireGuard Peer."""
187 | expected = [
188 | {
189 | "Wireguard": {"WireGuard": "set"},
190 | "Route": {"IPRoute": "route"},
191 | "Bridge FDB": {"IPRoute": "fdb"},
192 | }
193 | ]
194 | self.route_info_mock.fdb.return_value = {"IPRoute": "fdb"}
195 | self.route_info_mock.route.return_value = {"IPRoute": "route"}
196 | wg_info_mock = _get_wg_mock(
197 | "WGPEER_A_PUBLIC_KEY_STALE",
198 | int((datetime.now() - timedelta(hours=5)).timestamp()),
199 | )
200 | wg_info_mock.set.return_value = {"WireGuard": "set"}
201 | self.assertListEqual(expected, netlink.wg_flush_stale_peers("domain"))
202 | self.route_info_mock.route.assert_called_with(
203 | "del", dst="fe80::281:16ff:fe49:395e/128", oif=mock.ANY
204 | )
205 |
206 | def test_get_connected_peers_count_success(self):
207 | """Tests getting the correct number of connected peers for an interface."""
208 | peers = []
209 | for i in range(10):
210 | peer_mock = _get_peer_mock(
211 | "TEST_KEY",
212 | int((datetime.now() - timedelta(minutes=i, seconds=5)).timestamp()),
213 | )
214 | peers.append(peer_mock)
215 |
216 | def msg_get_attr(attr: str):
217 | if attr == "WGDEVICE_A_PEERS":
218 | return peers
219 |
220 | msg_mock = mock.Mock()
221 | msg_mock.get_attr.side_effect = msg_get_attr
222 |
223 | wg_instance = WireGuard()
224 | wg_info_mock = wg_instance.__enter__.return_value
225 | wg_info_mock.info.return_value = [msg_mock]
226 |
227 | ret = netlink.get_connected_peers_count("wg-welt")
228 | self.assertEqual(ret, 3)
229 |
230 | @mock.patch("pyroute2.WireGuard")
231 | def test_get_connected_peers_count_NetlinkDumpInterrupted(self, pyroute2_wg_mock):
232 | """Tests getting the correct number of connected peers for an interface."""
233 |
234 | nl_wg_mock_ctx = mock.MagicMock()
235 | wg_info_mock = mock.MagicMock(
236 | side_effect=(pyroute2_netlink_exceptions.NetlinkDumpInterrupted),
237 | )
238 | nl_wg_mock_ctx.info = wg_info_mock
239 |
240 | nl_wg_mock_inst = pyroute2_wg_mock.return_value
241 | nl_wg_mock_inst.__enter__ = mock.MagicMock(return_value=nl_wg_mock_ctx)
242 |
243 | self.assertRaises(
244 | pyroute2_netlink_exceptions.NetlinkDumpInterrupted,
245 | netlink.get_connected_peers_count,
246 | "wg-welt",
247 | )
248 | self.assertTrue(len(wg_info_mock.mock_calls) == 2)
249 |
250 | def test_get_device_data_success(self):
251 | def msg_get_attr(attr: str):
252 | if attr == "WGDEVICE_A_LISTEN_PORT":
253 | return 51820
254 | if attr == "WGDEVICE_A_PUBLIC_KEY":
255 | return "TEST_PUBLIC_KEY".encode("ascii")
256 |
257 | msg_mock = mock.Mock()
258 | msg_mock.get_attr.side_effect = msg_get_attr
259 |
260 | wg_instance = WireGuard()
261 | wg_info_mock = wg_instance.__enter__.return_value
262 | wg_info_mock.info.return_value = [msg_mock]
263 |
264 | ret = netlink.get_device_data("wg-welt")
265 | self.assertTupleEqual(ret, (51820, "TEST_PUBLIC_KEY", mock.ANY))
266 |
267 |
268 | if __name__ == "__main__":
269 | unittest.main()
270 |
--------------------------------------------------------------------------------
/MODULE.bazel.lock:
--------------------------------------------------------------------------------
1 | {
2 | "lockFileVersion": 18,
3 | "registryFileHashes": {
4 | "https://bcr.bazel.build/bazel_registry.json": "8a28e4aff06ee60aed2a8c281907fb8bcbf3b753c91fb5a5c57da3215d5b3497",
5 | "https://bcr.bazel.build/modules/abseil-cpp/20210324.2/MODULE.bazel": "7cd0312e064fde87c8d1cd79ba06c876bd23630c83466e9500321be55c96ace2",
6 | "https://bcr.bazel.build/modules/abseil-cpp/20211102.0/MODULE.bazel": "70390338f7a5106231d20620712f7cccb659cd0e9d073d1991c038eb9fc57589",
7 | "https://bcr.bazel.build/modules/abseil-cpp/20230125.1/MODULE.bazel": "89047429cb0207707b2dface14ba7f8df85273d484c2572755be4bab7ce9c3a0",
8 | "https://bcr.bazel.build/modules/abseil-cpp/20230802.0.bcr.1/MODULE.bazel": "1c8cec495288dccd14fdae6e3f95f772c1c91857047a098fad772034264cc8cb",
9 | "https://bcr.bazel.build/modules/abseil-cpp/20230802.0/MODULE.bazel": "d253ae36a8bd9ee3c5955384096ccb6baf16a1b1e93e858370da0a3b94f77c16",
10 | "https://bcr.bazel.build/modules/abseil-cpp/20230802.1/MODULE.bazel": "fa92e2eb41a04df73cdabeec37107316f7e5272650f81d6cc096418fe647b915",
11 | "https://bcr.bazel.build/modules/abseil-cpp/20240116.1/MODULE.bazel": "37bcdb4440fbb61df6a1c296ae01b327f19e9bb521f9b8e26ec854b6f97309ed",
12 | "https://bcr.bazel.build/modules/abseil-cpp/20240116.1/source.json": "9be551b8d4e3ef76875c0d744b5d6a504a27e3ae67bc6b28f46415fd2d2957da",
13 | "https://bcr.bazel.build/modules/bazel_features/1.1.1/MODULE.bazel": "27b8c79ef57efe08efccbd9dd6ef70d61b4798320b8d3c134fd571f78963dbcd",
14 | "https://bcr.bazel.build/modules/bazel_features/1.11.0/MODULE.bazel": "f9382337dd5a474c3b7d334c2f83e50b6eaedc284253334cf823044a26de03e8",
15 | "https://bcr.bazel.build/modules/bazel_features/1.15.0/MODULE.bazel": "d38ff6e517149dc509406aca0db3ad1efdd890a85e049585b7234d04238e2a4d",
16 | "https://bcr.bazel.build/modules/bazel_features/1.17.0/MODULE.bazel": "039de32d21b816b47bd42c778e0454217e9c9caac4a3cf8e15c7231ee3ddee4d",
17 | "https://bcr.bazel.build/modules/bazel_features/1.18.0/MODULE.bazel": "1be0ae2557ab3a72a57aeb31b29be347bcdc5d2b1eb1e70f39e3851a7e97041a",
18 | "https://bcr.bazel.build/modules/bazel_features/1.19.0/MODULE.bazel": "59adcdf28230d220f0067b1f435b8537dd033bfff8db21335ef9217919c7fb58",
19 | "https://bcr.bazel.build/modules/bazel_features/1.21.0/MODULE.bazel": "675642261665d8eea09989aa3b8afb5c37627f1be178382c320d1b46afba5e3b",
20 | "https://bcr.bazel.build/modules/bazel_features/1.21.0/source.json": "3e8379efaaef53ce35b7b8ba419df829315a880cb0a030e5bb45c96d6d5ecb5f",
21 | "https://bcr.bazel.build/modules/bazel_features/1.4.1/MODULE.bazel": "e45b6bb2350aff3e442ae1111c555e27eac1d915e77775f6fdc4b351b758b5d7",
22 | "https://bcr.bazel.build/modules/bazel_features/1.9.1/MODULE.bazel": "8f679097876a9b609ad1f60249c49d68bfab783dd9be012faf9d82547b14815a",
23 | "https://bcr.bazel.build/modules/bazel_skylib/1.0.3/MODULE.bazel": "bcb0fd896384802d1ad283b4e4eb4d718eebd8cb820b0a2c3a347fb971afd9d8",
24 | "https://bcr.bazel.build/modules/bazel_skylib/1.1.1/MODULE.bazel": "1add3e7d93ff2e6998f9e118022c84d163917d912f5afafb3058e3d2f1545b5e",
25 | "https://bcr.bazel.build/modules/bazel_skylib/1.2.0/MODULE.bazel": "44fe84260e454ed94ad326352a698422dbe372b21a1ac9f3eab76eb531223686",
26 | "https://bcr.bazel.build/modules/bazel_skylib/1.2.1/MODULE.bazel": "f35baf9da0efe45fa3da1696ae906eea3d615ad41e2e3def4aeb4e8bc0ef9a7a",
27 | "https://bcr.bazel.build/modules/bazel_skylib/1.3.0/MODULE.bazel": "20228b92868bf5cfc41bda7afc8a8ba2a543201851de39d990ec957b513579c5",
28 | "https://bcr.bazel.build/modules/bazel_skylib/1.4.1/MODULE.bazel": "a0dcb779424be33100dcae821e9e27e4f2901d9dfd5333efe5ac6a8d7ab75e1d",
29 | "https://bcr.bazel.build/modules/bazel_skylib/1.4.2/MODULE.bazel": "3bd40978e7a1fac911d5989e6b09d8f64921865a45822d8b09e815eaa726a651",
30 | "https://bcr.bazel.build/modules/bazel_skylib/1.5.0/MODULE.bazel": "32880f5e2945ce6a03d1fbd588e9198c0a959bb42297b2cfaf1685b7bc32e138",
31 | "https://bcr.bazel.build/modules/bazel_skylib/1.6.1/MODULE.bazel": "8fdee2dbaace6c252131c00e1de4b165dc65af02ea278476187765e1a617b917",
32 | "https://bcr.bazel.build/modules/bazel_skylib/1.7.0/MODULE.bazel": "0db596f4563de7938de764cc8deeabec291f55e8ec15299718b93c4423e9796d",
33 | "https://bcr.bazel.build/modules/bazel_skylib/1.7.1/MODULE.bazel": "3120d80c5861aa616222ec015332e5f8d3171e062e3e804a2a0253e1be26e59b",
34 | "https://bcr.bazel.build/modules/bazel_skylib/1.7.1/source.json": "f121b43eeefc7c29efbd51b83d08631e2347297c95aac9764a701f2a6a2bb953",
35 | "https://bcr.bazel.build/modules/buildozer/7.1.2/MODULE.bazel": "2e8dd40ede9c454042645fd8d8d0cd1527966aa5c919de86661e62953cd73d84",
36 | "https://bcr.bazel.build/modules/buildozer/7.1.2/source.json": "c9028a501d2db85793a6996205c8de120944f50a0d570438fcae0457a5f9d1f8",
37 | "https://bcr.bazel.build/modules/google_benchmark/1.8.2/MODULE.bazel": "a70cf1bba851000ba93b58ae2f6d76490a9feb74192e57ab8e8ff13c34ec50cb",
38 | "https://bcr.bazel.build/modules/googletest/1.11.0/MODULE.bazel": "3a83f095183f66345ca86aa13c58b59f9f94a2f81999c093d4eeaa2d262d12f4",
39 | "https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/MODULE.bazel": "22c31a561553727960057361aa33bf20fb2e98584bc4fec007906e27053f80c6",
40 | "https://bcr.bazel.build/modules/googletest/1.14.0.bcr.1/source.json": "41e9e129f80d8c8bf103a7acc337b76e54fad1214ac0a7084bf24f4cd924b8b4",
41 | "https://bcr.bazel.build/modules/googletest/1.14.0/MODULE.bazel": "cfbcbf3e6eac06ef9d85900f64424708cc08687d1b527f0ef65aa7517af8118f",
42 | "https://bcr.bazel.build/modules/jsoncpp/1.9.5/MODULE.bazel": "31271aedc59e815656f5736f282bb7509a97c7ecb43e927ac1a37966e0578075",
43 | "https://bcr.bazel.build/modules/jsoncpp/1.9.5/source.json": "4108ee5085dd2885a341c7fab149429db457b3169b86eb081fa245eadf69169d",
44 | "https://bcr.bazel.build/modules/libpfm/4.11.0/MODULE.bazel": "45061ff025b301940f1e30d2c16bea596c25b176c8b6b3087e92615adbd52902",
45 | "https://bcr.bazel.build/modules/platforms/0.0.10/MODULE.bazel": "8cb8efaf200bdeb2150d93e162c40f388529a25852b332cec879373771e48ed5",
46 | "https://bcr.bazel.build/modules/platforms/0.0.10/source.json": "f22828ff4cf021a6b577f1bf6341cb9dcd7965092a439f64fc1bb3b7a5ae4bd5",
47 | "https://bcr.bazel.build/modules/platforms/0.0.4/MODULE.bazel": "9b328e31ee156f53f3c416a64f8491f7eb731742655a47c9eec4703a71644aee",
48 | "https://bcr.bazel.build/modules/platforms/0.0.5/MODULE.bazel": "5733b54ea419d5eaf7997054bb55f6a1d0b5ff8aedf0176fef9eea44f3acda37",
49 | "https://bcr.bazel.build/modules/platforms/0.0.6/MODULE.bazel": "ad6eeef431dc52aefd2d77ed20a4b353f8ebf0f4ecdd26a807d2da5aa8cd0615",
50 | "https://bcr.bazel.build/modules/platforms/0.0.7/MODULE.bazel": "72fd4a0ede9ee5c021f6a8dd92b503e089f46c227ba2813ff183b71616034814",
51 | "https://bcr.bazel.build/modules/platforms/0.0.8/MODULE.bazel": "9f142c03e348f6d263719f5074b21ef3adf0b139ee4c5133e2aa35664da9eb2d",
52 | "https://bcr.bazel.build/modules/protobuf/21.7/MODULE.bazel": "a5a29bb89544f9b97edce05642fac225a808b5b7be74038ea3640fae2f8e66a7",
53 | "https://bcr.bazel.build/modules/protobuf/27.0/MODULE.bazel": "7873b60be88844a0a1d8f80b9d5d20cfbd8495a689b8763e76c6372998d3f64c",
54 | "https://bcr.bazel.build/modules/protobuf/27.1/MODULE.bazel": "703a7b614728bb06647f965264967a8ef1c39e09e8f167b3ca0bb1fd80449c0d",
55 | "https://bcr.bazel.build/modules/protobuf/29.0-rc2/MODULE.bazel": "6241d35983510143049943fc0d57937937122baf1b287862f9dc8590fc4c37df",
56 | "https://bcr.bazel.build/modules/protobuf/29.0-rc3/MODULE.bazel": "33c2dfa286578573afc55a7acaea3cada4122b9631007c594bf0729f41c8de92",
57 | "https://bcr.bazel.build/modules/protobuf/29.0/MODULE.bazel": "319dc8bf4c679ff87e71b1ccfb5a6e90a6dbc4693501d471f48662ac46d04e4e",
58 | "https://bcr.bazel.build/modules/protobuf/29.0/source.json": "b857f93c796750eef95f0d61ee378f3420d00ee1dd38627b27193aa482f4f981",
59 | "https://bcr.bazel.build/modules/protobuf/3.19.0/MODULE.bazel": "6b5fbb433f760a99a22b18b6850ed5784ef0e9928a72668b66e4d7ccd47db9b0",
60 | "https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/MODULE.bazel": "88af1c246226d87e65be78ed49ecd1e6f5e98648558c14ce99176da041dc378e",
61 | "https://bcr.bazel.build/modules/pybind11_bazel/2.11.1/source.json": "be4789e951dd5301282729fe3d4938995dc4c1a81c2ff150afc9f1b0504c6022",
62 | "https://bcr.bazel.build/modules/re2/2023-09-01/MODULE.bazel": "cb3d511531b16cfc78a225a9e2136007a48cf8a677e4264baeab57fe78a80206",
63 | "https://bcr.bazel.build/modules/re2/2023-09-01/source.json": "e044ce89c2883cd957a2969a43e79f7752f9656f6b20050b62f90ede21ec6eb4",
64 | "https://bcr.bazel.build/modules/rules_android/0.1.1/MODULE.bazel": "48809ab0091b07ad0182defb787c4c5328bd3a278938415c00a7b69b50c4d3a8",
65 | "https://bcr.bazel.build/modules/rules_android/0.1.1/source.json": "e6986b41626ee10bdc864937ffb6d6bf275bb5b9c65120e6137d56e6331f089e",
66 | "https://bcr.bazel.build/modules/rules_cc/0.0.1/MODULE.bazel": "cb2aa0747f84c6c3a78dad4e2049c154f08ab9d166b1273835a8174940365647",
67 | "https://bcr.bazel.build/modules/rules_cc/0.0.10/MODULE.bazel": "ec1705118f7eaedd6e118508d3d26deba2a4e76476ada7e0e3965211be012002",
68 | "https://bcr.bazel.build/modules/rules_cc/0.0.13/MODULE.bazel": "0e8529ed7b323dad0775ff924d2ae5af7640b23553dfcd4d34344c7e7a867191",
69 | "https://bcr.bazel.build/modules/rules_cc/0.0.14/MODULE.bazel": "5e343a3aac88b8d7af3b1b6d2093b55c347b8eefc2e7d1442f7a02dc8fea48ac",
70 | "https://bcr.bazel.build/modules/rules_cc/0.0.15/MODULE.bazel": "6704c35f7b4a72502ee81f61bf88706b54f06b3cbe5558ac17e2e14666cd5dcc",
71 | "https://bcr.bazel.build/modules/rules_cc/0.0.16/MODULE.bazel": "7661303b8fc1b4d7f532e54e9d6565771fea666fbdf839e0a86affcd02defe87",
72 | "https://bcr.bazel.build/modules/rules_cc/0.0.17/MODULE.bazel": "2ae1d8f4238ec67d7185d8861cb0a2cdf4bc608697c331b95bf990e69b62e64a",
73 | "https://bcr.bazel.build/modules/rules_cc/0.0.17/source.json": "4db99b3f55c90ab28d14552aa0632533e3e8e5e9aea0f5c24ac0014282c2a7c5",
74 | "https://bcr.bazel.build/modules/rules_cc/0.0.2/MODULE.bazel": "6915987c90970493ab97393024c156ea8fb9f3bea953b2f3ec05c34f19b5695c",
75 | "https://bcr.bazel.build/modules/rules_cc/0.0.6/MODULE.bazel": "abf360251023dfe3efcef65ab9d56beefa8394d4176dd29529750e1c57eaa33f",
76 | "https://bcr.bazel.build/modules/rules_cc/0.0.8/MODULE.bazel": "964c85c82cfeb6f3855e6a07054fdb159aced38e99a5eecf7bce9d53990afa3e",
77 | "https://bcr.bazel.build/modules/rules_cc/0.0.9/MODULE.bazel": "836e76439f354b89afe6a911a7adf59a6b2518fafb174483ad78a2a2fde7b1c5",
78 | "https://bcr.bazel.build/modules/rules_foreign_cc/0.9.0/MODULE.bazel": "c9e8c682bf75b0e7c704166d79b599f93b72cfca5ad7477df596947891feeef6",
79 | "https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/MODULE.bazel": "40c97d1144356f52905566c55811f13b299453a14ac7769dfba2ac38192337a8",
80 | "https://bcr.bazel.build/modules/rules_fuzzing/0.5.2/source.json": "c8b1e2c717646f1702290959a3302a178fb639d987ab61d548105019f11e527e",
81 | "https://bcr.bazel.build/modules/rules_java/4.0.0/MODULE.bazel": "5a78a7ae82cd1a33cef56dc578c7d2a46ed0dca12643ee45edbb8417899e6f74",
82 | "https://bcr.bazel.build/modules/rules_java/5.3.5/MODULE.bazel": "a4ec4f2db570171e3e5eb753276ee4b389bae16b96207e9d3230895c99644b86",
83 | "https://bcr.bazel.build/modules/rules_java/6.0.0/MODULE.bazel": "8a43b7df601a7ec1af61d79345c17b31ea1fedc6711fd4abfd013ea612978e39",
84 | "https://bcr.bazel.build/modules/rules_java/6.4.0/MODULE.bazel": "e986a9fe25aeaa84ac17ca093ef13a4637f6107375f64667a15999f77db6c8f6",
85 | "https://bcr.bazel.build/modules/rules_java/6.5.2/MODULE.bazel": "1d440d262d0e08453fa0c4d8f699ba81609ed0e9a9a0f02cd10b3e7942e61e31",
86 | "https://bcr.bazel.build/modules/rules_java/7.10.0/MODULE.bazel": "530c3beb3067e870561739f1144329a21c851ff771cd752a49e06e3dc9c2e71a",
87 | "https://bcr.bazel.build/modules/rules_java/7.12.2/MODULE.bazel": "579c505165ee757a4280ef83cda0150eea193eed3bef50b1004ba88b99da6de6",
88 | "https://bcr.bazel.build/modules/rules_java/7.2.0/MODULE.bazel": "06c0334c9be61e6cef2c8c84a7800cef502063269a5af25ceb100b192453d4ab",
89 | "https://bcr.bazel.build/modules/rules_java/7.3.2/MODULE.bazel": "50dece891cfdf1741ea230d001aa9c14398062f2b7c066470accace78e412bc2",
90 | "https://bcr.bazel.build/modules/rules_java/7.6.1/MODULE.bazel": "2f14b7e8a1aa2f67ae92bc69d1ec0fa8d9f827c4e17ff5e5f02e91caa3b2d0fe",
91 | "https://bcr.bazel.build/modules/rules_java/8.11.0/MODULE.bazel": "c3d280bc5ff1038dcb3bacb95d3f6b83da8dd27bba57820ec89ea4085da767ad",
92 | "https://bcr.bazel.build/modules/rules_java/8.11.0/source.json": "302b52a39259a85aa06ca3addb9787864ca3e03b432a5f964ea68244397e7544",
93 | "https://bcr.bazel.build/modules/rules_java/8.3.2/MODULE.bazel": "7336d5511ad5af0b8615fdc7477535a2e4e723a357b6713af439fe8cf0195017",
94 | "https://bcr.bazel.build/modules/rules_java/8.5.1/MODULE.bazel": "d8a9e38cc5228881f7055a6079f6f7821a073df3744d441978e7a43e20226939",
95 | "https://bcr.bazel.build/modules/rules_jvm_external/4.4.2/MODULE.bazel": "a56b85e418c83eb1839819f0b515c431010160383306d13ec21959ac412d2fe7",
96 | "https://bcr.bazel.build/modules/rules_jvm_external/5.1/MODULE.bazel": "33f6f999e03183f7d088c9be518a63467dfd0be94a11d0055fe2d210f89aa909",
97 | "https://bcr.bazel.build/modules/rules_jvm_external/5.2/MODULE.bazel": "d9351ba35217ad0de03816ef3ed63f89d411349353077348a45348b096615036",
98 | "https://bcr.bazel.build/modules/rules_jvm_external/5.3/MODULE.bazel": "bf93870767689637164657731849fb887ad086739bd5d360d90007a581d5527d",
99 | "https://bcr.bazel.build/modules/rules_jvm_external/6.1/MODULE.bazel": "75b5fec090dbd46cf9b7d8ea08cf84a0472d92ba3585b476f44c326eda8059c4",
100 | "https://bcr.bazel.build/modules/rules_jvm_external/6.3/MODULE.bazel": "c998e060b85f71e00de5ec552019347c8bca255062c990ac02d051bb80a38df0",
101 | "https://bcr.bazel.build/modules/rules_jvm_external/6.3/source.json": "6f5f5a5a4419ae4e37c35a5bb0a6ae657ed40b7abc5a5189111b47fcebe43197",
102 | "https://bcr.bazel.build/modules/rules_kotlin/1.9.0/MODULE.bazel": "ef85697305025e5a61f395d4eaede272a5393cee479ace6686dba707de804d59",
103 | "https://bcr.bazel.build/modules/rules_kotlin/1.9.6/MODULE.bazel": "d269a01a18ee74d0335450b10f62c9ed81f2321d7958a2934e44272fe82dcef3",
104 | "https://bcr.bazel.build/modules/rules_kotlin/1.9.6/source.json": "2faa4794364282db7c06600b7e5e34867a564ae91bda7cae7c29c64e9466b7d5",
105 | "https://bcr.bazel.build/modules/rules_license/0.0.3/MODULE.bazel": "627e9ab0247f7d1e05736b59dbb1b6871373de5ad31c3011880b4133cafd4bd0",
106 | "https://bcr.bazel.build/modules/rules_license/0.0.7/MODULE.bazel": "088fbeb0b6a419005b89cf93fe62d9517c0a2b8bb56af3244af65ecfe37e7d5d",
107 | "https://bcr.bazel.build/modules/rules_license/1.0.0/MODULE.bazel": "a7fda60eefdf3d8c827262ba499957e4df06f659330bbe6cdbdb975b768bb65c",
108 | "https://bcr.bazel.build/modules/rules_license/1.0.0/source.json": "a52c89e54cc311196e478f8382df91c15f7a2bfdf4c6cd0e2675cc2ff0b56efb",
109 | "https://bcr.bazel.build/modules/rules_pkg/0.7.0/MODULE.bazel": "df99f03fc7934a4737122518bb87e667e62d780b610910f0447665a7e2be62dc",
110 | "https://bcr.bazel.build/modules/rules_pkg/1.0.1/MODULE.bazel": "5b1df97dbc29623bccdf2b0dcd0f5cb08e2f2c9050aab1092fd39a41e82686ff",
111 | "https://bcr.bazel.build/modules/rules_pkg/1.0.1/source.json": "bd82e5d7b9ce2d31e380dd9f50c111d678c3bdaca190cb76b0e1c71b05e1ba8a",
112 | "https://bcr.bazel.build/modules/rules_proto/4.0.0/MODULE.bazel": "a7a7b6ce9bee418c1a760b3d84f83a299ad6952f9903c67f19e4edd964894e06",
113 | "https://bcr.bazel.build/modules/rules_proto/5.3.0-21.7/MODULE.bazel": "e8dff86b0971688790ae75528fe1813f71809b5afd57facb44dad9e8eca631b7",
114 | "https://bcr.bazel.build/modules/rules_proto/6.0.2/MODULE.bazel": "ce916b775a62b90b61888052a416ccdda405212b6aaeb39522f7dc53431a5e73",
115 | "https://bcr.bazel.build/modules/rules_proto/7.0.2/MODULE.bazel": "bf81793bd6d2ad89a37a40693e56c61b0ee30f7a7fdbaf3eabbf5f39de47dea2",
116 | "https://bcr.bazel.build/modules/rules_proto/7.0.2/source.json": "1e5e7260ae32ef4f2b52fd1d0de8d03b606a44c91b694d2f1afb1d3b28a48ce1",
117 | "https://bcr.bazel.build/modules/rules_python/0.10.2/MODULE.bazel": "cc82bc96f2997baa545ab3ce73f196d040ffb8756fd2d66125a530031cd90e5f",
118 | "https://bcr.bazel.build/modules/rules_python/0.23.1/MODULE.bazel": "49ffccf0511cb8414de28321f5fcf2a31312b47c40cc21577144b7447f2bf300",
119 | "https://bcr.bazel.build/modules/rules_python/0.25.0/MODULE.bazel": "72f1506841c920a1afec76975b35312410eea3aa7b63267436bfb1dd91d2d382",
120 | "https://bcr.bazel.build/modules/rules_python/0.28.0/MODULE.bazel": "cba2573d870babc976664a912539b320cbaa7114cd3e8f053c720171cde331ed",
121 | "https://bcr.bazel.build/modules/rules_python/0.31.0/MODULE.bazel": "93a43dc47ee570e6ec9f5779b2e64c1476a6ce921c48cc9a1678a91dd5f8fd58",
122 | "https://bcr.bazel.build/modules/rules_python/0.4.0/MODULE.bazel": "9208ee05fd48bf09ac60ed269791cf17fb343db56c8226a720fbb1cdf467166c",
123 | "https://bcr.bazel.build/modules/rules_python/0.40.0/MODULE.bazel": "9d1a3cd88ed7d8e39583d9ffe56ae8a244f67783ae89b60caafc9f5cf318ada7",
124 | "https://bcr.bazel.build/modules/rules_python/1.0.0-rc2/MODULE.bazel": "7e755005ffab33a24df8af746781ee3d0ba2afad1617d97aaaf81a0a287e2a22",
125 | "https://bcr.bazel.build/modules/rules_python/1.0.0-rc2/source.json": "0e7ded29166858fcf92c13efa358327943bc5008720dfd7c39f9630ddbb4516d",
126 | "https://bcr.bazel.build/modules/rules_shell/0.2.0/MODULE.bazel": "fda8a652ab3c7d8fee214de05e7a9916d8b28082234e8d2c0094505c5268ed3c",
127 | "https://bcr.bazel.build/modules/rules_shell/0.2.0/source.json": "7f27af3c28037d9701487c4744b5448d26537cc66cdef0d8df7ae85411f8de95",
128 | "https://bcr.bazel.build/modules/stardoc/0.5.1/MODULE.bazel": "1a05d92974d0c122f5ccf09291442580317cdd859f07a8655f1db9a60374f9f8",
129 | "https://bcr.bazel.build/modules/stardoc/0.5.3/MODULE.bazel": "c7f6948dae6999bf0db32c1858ae345f112cacf98f174c7a8bb707e41b974f1c",
130 | "https://bcr.bazel.build/modules/stardoc/0.5.6/MODULE.bazel": "c43dabc564990eeab55e25ed61c07a1aadafe9ece96a4efabb3f8bf9063b71ef",
131 | "https://bcr.bazel.build/modules/stardoc/0.7.0/MODULE.bazel": "05e3d6d30c099b6770e97da986c53bd31844d7f13d41412480ea265ac9e8079c",
132 | "https://bcr.bazel.build/modules/stardoc/0.7.1/MODULE.bazel": "3548faea4ee5dda5580f9af150e79d0f6aea934fc60c1cc50f4efdd9420759e7",
133 | "https://bcr.bazel.build/modules/stardoc/0.7.2/MODULE.bazel": "fc152419aa2ea0f51c29583fab1e8c99ddefd5b3778421845606ee628629e0e5",
134 | "https://bcr.bazel.build/modules/stardoc/0.7.2/source.json": "58b029e5e901d6802967754adf0a9056747e8176f017cfe3607c0851f4d42216",
135 | "https://bcr.bazel.build/modules/upb/0.0.0-20220923-a547704/MODULE.bazel": "7298990c00040a0e2f121f6c32544bab27d4452f80d9ce51349b1a28f3005c43",
136 | "https://bcr.bazel.build/modules/zlib/1.2.11/MODULE.bazel": "07b389abc85fdbca459b69e2ec656ae5622873af3f845e1c9d80fe179f3effa0",
137 | "https://bcr.bazel.build/modules/zlib/1.3.1.bcr.3/MODULE.bazel": "af322bc08976524477c79d1e45e241b6efbeb918c497e8840b8ab116802dda79",
138 | "https://bcr.bazel.build/modules/zlib/1.3.1.bcr.3/source.json": "2be409ac3c7601245958cd4fcdff4288be79ed23bd690b4b951f500d54ee6e7d",
139 | "https://bcr.bazel.build/modules/zlib/1.3.1/MODULE.bazel": "751c9940dcfe869f5f7274e1295422a34623555916eb98c174c1e945594bf198"
140 | },
141 | "selectedYankedVersions": {},
142 | "moduleExtensions": {
143 | "@@platforms//host:extension.bzl%host_platform": {
144 | "general": {
145 | "bzlTransitiveDigest": "xelQcPZH8+tmuOHVjL9vDxMnnQNMlwj0SlvgoqBkm4U=",
146 | "usagesDigest": "SeQiIN/f8/Qt9vYQk7qcXp4I4wJeEC0RnQDiaaJ4tb8=",
147 | "recordedFileInputs": {},
148 | "recordedDirentsInputs": {},
149 | "envVariables": {},
150 | "generatedRepoSpecs": {
151 | "host_platform": {
152 | "repoRuleId": "@@platforms//host:extension.bzl%host_platform_repo",
153 | "attributes": {}
154 | }
155 | },
156 | "recordedRepoMappingEntries": []
157 | }
158 | },
159 | "@@rules_kotlin+//src/main/starlark/core/repositories:bzlmod_setup.bzl%rules_kotlin_extensions": {
160 | "general": {
161 | "bzlTransitiveDigest": "sFhcgPbDQehmbD1EOXzX4H1q/CD5df8zwG4kp4jbvr8=",
162 | "usagesDigest": "QI2z8ZUR+mqtbwsf2fLqYdJAkPOHdOV+tF2yVAUgRzw=",
163 | "recordedFileInputs": {},
164 | "recordedDirentsInputs": {},
165 | "envVariables": {},
166 | "generatedRepoSpecs": {
167 | "com_github_jetbrains_kotlin_git": {
168 | "repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:compiler.bzl%kotlin_compiler_git_repository",
169 | "attributes": {
170 | "urls": [
171 | "https://github.com/JetBrains/kotlin/releases/download/v1.9.23/kotlin-compiler-1.9.23.zip"
172 | ],
173 | "sha256": "93137d3aab9afa9b27cb06a824c2324195c6b6f6179d8a8653f440f5bd58be88"
174 | }
175 | },
176 | "com_github_jetbrains_kotlin": {
177 | "repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:compiler.bzl%kotlin_capabilities_repository",
178 | "attributes": {
179 | "git_repository_name": "com_github_jetbrains_kotlin_git",
180 | "compiler_version": "1.9.23"
181 | }
182 | },
183 | "com_github_google_ksp": {
184 | "repoRuleId": "@@rules_kotlin+//src/main/starlark/core/repositories:ksp.bzl%ksp_compiler_plugin_repository",
185 | "attributes": {
186 | "urls": [
187 | "https://github.com/google/ksp/releases/download/1.9.23-1.0.20/artifacts.zip"
188 | ],
189 | "sha256": "ee0618755913ef7fd6511288a232e8fad24838b9af6ea73972a76e81053c8c2d",
190 | "strip_version": "1.9.23-1.0.20"
191 | }
192 | },
193 | "com_github_pinterest_ktlint": {
194 | "repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_file",
195 | "attributes": {
196 | "sha256": "01b2e0ef893383a50dbeb13970fe7fa3be36ca3e83259e01649945b09d736985",
197 | "urls": [
198 | "https://github.com/pinterest/ktlint/releases/download/1.3.0/ktlint"
199 | ],
200 | "executable": true
201 | }
202 | },
203 | "rules_android": {
204 | "repoRuleId": "@@bazel_tools//tools/build_defs/repo:http.bzl%http_archive",
205 | "attributes": {
206 | "sha256": "cd06d15dd8bb59926e4d65f9003bfc20f9da4b2519985c27e190cddc8b7a7806",
207 | "strip_prefix": "rules_android-0.1.1",
208 | "urls": [
209 | "https://github.com/bazelbuild/rules_android/archive/v0.1.1.zip"
210 | ]
211 | }
212 | }
213 | },
214 | "recordedRepoMappingEntries": [
215 | [
216 | "rules_kotlin+",
217 | "bazel_tools",
218 | "bazel_tools"
219 | ]
220 | ]
221 | }
222 | }
223 | }
224 | }
225 |
--------------------------------------------------------------------------------
/requirements_lock.txt:
--------------------------------------------------------------------------------
1 | #
2 | # This file is autogenerated by pip-compile with Python 3.13
3 | # by the following command:
4 | #
5 | # bazel run //:requirements.update
6 | #
7 | blinker==1.9.0 \
8 | --hash=sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf \
9 | --hash=sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc
10 | # via flask
11 | click==8.3.1 \
12 | --hash=sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a \
13 | --hash=sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6
14 | # via flask
15 | coverage==7.12.0 \
16 | --hash=sha256:01d24af36fedda51c2b1aca56e4330a3710f83b02a5ff3743a6b015ffa7c9384 \
17 | --hash=sha256:04a79245ab2b7a61688958f7a855275997134bc84f4a03bc240cf64ff132abf6 \
18 | --hash=sha256:083631eeff5eb9992c923e14b810a179798bb598e6a0dd60586819fc23be6e60 \
19 | --hash=sha256:099d11698385d572ceafb3288a5b80fe1fc58bf665b3f9d362389de488361d3d \
20 | --hash=sha256:09a86acaaa8455f13d6a99221d9654df249b33937b4e212b4e5a822065f12aa7 \
21 | --hash=sha256:159d50c0b12e060b15ed3d39f87ed43d4f7f7ad40b8a534f4dd331adbb51104a \
22 | --hash=sha256:172cf3a34bfef42611963e2b661302a8931f44df31629e5b1050567d6b90287d \
23 | --hash=sha256:22a7aade354a72dff3b59c577bfd18d6945c61f97393bc5fb7bd293a4237024b \
24 | --hash=sha256:24cff9d1f5743f67db7ba46ff284018a6e9aeb649b67aa1e70c396aa1b7cb23c \
25 | --hash=sha256:29644c928772c78512b48e14156b81255000dcfd4817574ff69def189bcb3647 \
26 | --hash=sha256:297bc2da28440f5ae51c845a47c8175a4db0553a53827886e4fb25c66633000c \
27 | --hash=sha256:2fd8354ed5d69775ac42986a691fbf68b4084278710cee9d7c3eaa0c28fa982a \
28 | --hash=sha256:313672140638b6ddb2c6455ddeda41c6a0b208298034544cfca138978c6baed6 \
29 | --hash=sha256:31b8b2e38391a56e3cea39d22a23faaa7c3fc911751756ef6d2621d2a9daf742 \
30 | --hash=sha256:32b75c2ba3f324ee37af3ccee5b30458038c50b349ad9b88cee85096132a575b \
31 | --hash=sha256:33baadc0efd5c7294f436a632566ccc1f72c867f82833eb59820ee37dc811c6f \
32 | --hash=sha256:3ff651dcd36d2fea66877cd4a82de478004c59b849945446acb5baf9379a1b64 \
33 | --hash=sha256:40c867af715f22592e0d0fb533a33a71ec9e0f73a6945f722a0c85c8c1cbe3a2 \
34 | --hash=sha256:42435d46d6461a3b305cdfcad7cdd3248787771f53fe18305548cba474e6523b \
35 | --hash=sha256:459443346509476170d553035e4a3eed7b860f4fe5242f02de1010501956ce87 \
36 | --hash=sha256:4648158fd8dd9381b5847622df1c90ff314efbfc1df4550092ab6013c238a5fc \
37 | --hash=sha256:47324fffca8d8eae7e185b5bb20c14645f23350f870c1649003618ea91a78941 \
38 | --hash=sha256:473dc45d69694069adb7680c405fb1e81f60b2aff42c81e2f2c3feaf544d878c \
39 | --hash=sha256:4b59b501455535e2e5dde5881739897967b272ba25988c89145c12d772810ccb \
40 | --hash=sha256:4c589361263ab2953e3c4cd2a94db94c4ad4a8e572776ecfbad2389c626e4507 \
41 | --hash=sha256:51777647a749abdf6f6fd8c7cffab12de68ab93aab15efc72fbbb83036c2a068 \
42 | --hash=sha256:52ca620260bd8cd6027317bdd8b8ba929be1d741764ee765b42c4d79a408601e \
43 | --hash=sha256:5560c7e0d82b42eb1951e4f68f071f8017c824ebfd5a6ebe42c60ac16c6c2434 \
44 | --hash=sha256:5734b5d913c3755e72f70bf6cc37a0518d4f4745cde760c5d8e12005e62f9832 \
45 | --hash=sha256:583f9adbefd278e9de33c33d6846aa8f5d164fa49b47144180a0e037f0688bb9 \
46 | --hash=sha256:58c1c6aa677f3a1411fe6fb28ec3a942e4f665df036a3608816e0847fad23296 \
47 | --hash=sha256:5b3c889c0b8b283a24d721a9eabc8ccafcfc3aebf167e4cd0d0e23bf8ec4e339 \
48 | --hash=sha256:5bcead88c8423e1855e64b8057d0544e33e4080b95b240c2a355334bb7ced937 \
49 | --hash=sha256:5ea5a9f7dc8877455b13dd1effd3202e0bca72f6f3ab09f9036b1bcf728f69ac \
50 | --hash=sha256:5f3738279524e988d9da2893f307c2093815c623f8d05a8f79e3eff3a7a9e553 \
51 | --hash=sha256:68b0d0a2d84f333de875666259dadf28cc67858bc8fd8b3f1eae84d3c2bec455 \
52 | --hash=sha256:6d907ddccbca819afa2cd014bc69983b146cca2735a0b1e6259b2a6c10be1e70 \
53 | --hash=sha256:6e1a8c066dabcde56d5d9fed6a66bc19a2883a3fe051f0c397a41fc42aedd4cc \
54 | --hash=sha256:6ff7651cc01a246908eac162a6a86fc0dbab6de1ad165dfb9a1e2ec660b44984 \
55 | --hash=sha256:737c3814903be30695b2de20d22bcc5428fdae305c61ba44cdc8b3252984c49c \
56 | --hash=sha256:73f9e7fbd51a221818fd11b7090eaa835a353ddd59c236c57b2199486b116c6d \
57 | --hash=sha256:76336c19a9ef4a94b2f8dc79f8ac2da3f193f625bb5d6f51a328cd19bfc19933 \
58 | --hash=sha256:7670d860e18b1e3ee5930b17a7d55ae6287ec6e55d9799982aa103a2cc1fa2ef \
59 | --hash=sha256:79a44421cd5fba96aa57b5e3b5a4d3274c449d4c622e8f76882d76635501fd13 \
60 | --hash=sha256:7c1059b600aec6ef090721f8f633f60ed70afaffe8ecab85b59df748f24b31fe \
61 | --hash=sha256:8638cbb002eaa5d7c8d04da667813ce1067080b9a91099801a0053086e52b736 \
62 | --hash=sha256:874fe69a0785d96bd066059cd4368022cebbec1a8958f224f0016979183916e6 \
63 | --hash=sha256:8787b0f982e020adb732b9f051f3e49dd5054cebbc3f3432061278512a2b1360 \
64 | --hash=sha256:8bb5b894b3ec09dcd6d3743229dc7f2c42ef7787dc40596ae04c0edda487371e \
65 | --hash=sha256:907e0df1b71ba77463687a74149c6122c3f6aac56c2510a5d906b2f368208560 \
66 | --hash=sha256:90d58ac63bc85e0fb919f14d09d6caa63f35a5512a2205284b7816cafd21bb03 \
67 | --hash=sha256:9157a5e233c40ce6613dead4c131a006adfda70e557b6856b97aceed01b0e27a \
68 | --hash=sha256:91b810a163ccad2e43b1faa11d70d3cf4b6f3d83f9fd5f2df82a32d47b648e0d \
69 | --hash=sha256:950411f1eb5d579999c5f66c62a40961f126fc71e5e14419f004471957b51508 \
70 | --hash=sha256:99d5415c73ca12d558e07776bd957c4222c687b9f1d26fa0e1b57e3598bdcde8 \
71 | --hash=sha256:9b57e2d0ddd5f0582bae5437c04ee71c46cd908e7bc5d4d0391f9a41e812dd12 \
72 | --hash=sha256:9bb44c889fb68004e94cab71f6a021ec83eac9aeabdbb5a5a88821ec46e1da73 \
73 | --hash=sha256:a00594770eb715854fb1c57e0dea08cce6720cfbc531accdb9850d7c7770396c \
74 | --hash=sha256:a1783ed5bd0d5938d4435014626568dc7f93e3cb99bc59188cc18857c47aa3c4 \
75 | --hash=sha256:a1c59b7dc169809a88b21a936eccf71c3895a78f5592051b1af8f4d59c2b4f92 \
76 | --hash=sha256:aa124a3683d2af98bd9d9c2bfa7a5076ca7e5ab09fdb96b81fa7d89376ae928f \
77 | --hash=sha256:aa7d48520a32cb21c7a9b31f81799e8eaec7239db36c3b670be0fa2403828d1d \
78 | --hash=sha256:b1518ecbad4e6173f4c6e6c4a46e49555ea5679bf3feda5edb1b935c7c44e8a0 \
79 | --hash=sha256:b1aab7302a87bafebfe76b12af681b56ff446dc6f32ed178ff9c092ca776e6bc \
80 | --hash=sha256:b2089cc445f2dc0af6f801f0d1355c025b76c24481935303cf1af28f636688f0 \
81 | --hash=sha256:b365adc70a6936c6b0582dc38746b33b2454148c02349345412c6e743efb646d \
82 | --hash=sha256:b527a08cdf15753279b7afb2339a12073620b761d79b81cbe2cdebdb43d90daa \
83 | --hash=sha256:bc13baf85cd8a4cfcf4a35c7bc9d795837ad809775f782f697bf630b7e200211 \
84 | --hash=sha256:bcec6f47e4cb8a4c2dc91ce507f6eefc6a1b10f58df32cdc61dff65455031dfc \
85 | --hash=sha256:c406a71f544800ef7e9e0000af706b88465f3573ae8b8de37e5f96c59f689ad1 \
86 | --hash=sha256:c5a6f20bf48b8866095c6820641e7ffbe23f2ac84a2efc218d91235e404c7777 \
87 | --hash=sha256:c87395744f5c77c866d0f5a43d97cc39e17c7f1cb0115e54a2fe67ca75c5d14d \
88 | --hash=sha256:ca8ecfa283764fdda3eae1bdb6afe58bf78c2c3ec2b2edcb05a671f0bba7b3f9 \
89 | --hash=sha256:cb2a1b6ab9fe833714a483a915de350abc624a37149649297624c8d57add089c \
90 | --hash=sha256:ccf3b2ede91decd2fb53ec73c1f949c3e034129d1e0b07798ff1d02ea0c8fa4a \
91 | --hash=sha256:ce61969812d6a98a981d147d9ac583a36ac7db7766f2e64a9d4d059c2fe29d07 \
92 | --hash=sha256:d6c2e26b481c9159c2773a37947a9718cfdc58893029cdfb177531793e375cfc \
93 | --hash=sha256:d7e0d0303c13b54db495eb636bc2465b2fb8475d4c8bcec8fe4b5ca454dfbae8 \
94 | --hash=sha256:d8842f17095b9868a05837b7b1b73495293091bed870e099521ada176aa3e00e \
95 | --hash=sha256:d93fbf446c31c0140208dcd07c5d882029832e8ed7891a39d6d44bd65f2316c3 \
96 | --hash=sha256:dcbb630ab034e86d2a0f79aefd2be07e583202f41e037602d438c80044957baa \
97 | --hash=sha256:e0d68c1f7eabbc8abe582d11fa393ea483caf4f44b0af86881174769f185c94d \
98 | --hash=sha256:e0f483ab4f749039894abaf80c2f9e7ed77bbf3c737517fb88c8e8e305896a17 \
99 | --hash=sha256:e71bba6a40883b00c6d571599b4627f50c360b3d0d02bfc658168936be74027b \
100 | --hash=sha256:e84da3a0fd233aeec797b981c51af1cabac74f9bd67be42458365b30d11b5291 \
101 | --hash=sha256:e949ebf60c717c3df63adb4a1a366c096c8d7fd8472608cd09359e1bd48ef59f \
102 | --hash=sha256:f3433ffd541380f3a0e423cff0f4926d55b0cc8c1d160fdc3be24a4c03aa65f7 \
103 | --hash=sha256:f7ba9da4726e446d8dd8aae5a6cd872511184a5d861de80a86ef970b5dacce3e \
104 | --hash=sha256:f7bbb321d4adc9f65e402c677cd1c8e4c2d0105d3ce285b51b4d87f1d5db5245 \
105 | --hash=sha256:f999813dddeb2a56aab5841e687b68169da0d3f6fc78ccf50952fa2463746022 \
106 | --hash=sha256:fc11e0a4e372cb5f282f16ef90d4a585034050ccda536451901abfb19a57f40c \
107 | --hash=sha256:fdba9f15849534594f60b47c9a30bc70409b54947319a7c4fd0e8e3d8d2f355d
108 | # via -r requirements.txt
109 | flask==3.1.2 \
110 | --hash=sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87 \
111 | --hash=sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c
112 | # via
113 | # -r requirements.txt
114 | # flask-mqtt
115 | flask-mqtt==1.1.1 \
116 | --hash=sha256:6d37c5ebb8e180f6aaa409d776a04f6dcf2fcd61cc22d67341bc146db36641e0
117 | # via -r requirements.txt
118 | ipaddress==1.0.23 \
119 | --hash=sha256:6e0f4a39e66cb5bb9a137b00276a2eff74f93b71dcbdad6f10ff7df9d3557fcc \
120 | --hash=sha256:b7f8e0369580bb4a24d5ba1d7cc29660a4a6987763faf1d8a8046830e020e7e2
121 | # via -r requirements.txt
122 | itsdangerous==2.2.0 \
123 | --hash=sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef \
124 | --hash=sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173
125 | # via flask
126 | jinja2==3.1.6 \
127 | --hash=sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d \
128 | --hash=sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67
129 | # via flask
130 | markupsafe==3.0.3 \
131 | --hash=sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f \
132 | --hash=sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a \
133 | --hash=sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf \
134 | --hash=sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19 \
135 | --hash=sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf \
136 | --hash=sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c \
137 | --hash=sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175 \
138 | --hash=sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219 \
139 | --hash=sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb \
140 | --hash=sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6 \
141 | --hash=sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab \
142 | --hash=sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26 \
143 | --hash=sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1 \
144 | --hash=sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce \
145 | --hash=sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218 \
146 | --hash=sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634 \
147 | --hash=sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695 \
148 | --hash=sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad \
149 | --hash=sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73 \
150 | --hash=sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c \
151 | --hash=sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe \
152 | --hash=sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa \
153 | --hash=sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559 \
154 | --hash=sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa \
155 | --hash=sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37 \
156 | --hash=sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758 \
157 | --hash=sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f \
158 | --hash=sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8 \
159 | --hash=sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d \
160 | --hash=sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c \
161 | --hash=sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97 \
162 | --hash=sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a \
163 | --hash=sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19 \
164 | --hash=sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9 \
165 | --hash=sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9 \
166 | --hash=sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc \
167 | --hash=sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2 \
168 | --hash=sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4 \
169 | --hash=sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354 \
170 | --hash=sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50 \
171 | --hash=sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698 \
172 | --hash=sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9 \
173 | --hash=sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b \
174 | --hash=sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc \
175 | --hash=sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115 \
176 | --hash=sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e \
177 | --hash=sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485 \
178 | --hash=sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f \
179 | --hash=sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12 \
180 | --hash=sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025 \
181 | --hash=sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009 \
182 | --hash=sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d \
183 | --hash=sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b \
184 | --hash=sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a \
185 | --hash=sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5 \
186 | --hash=sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f \
187 | --hash=sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d \
188 | --hash=sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1 \
189 | --hash=sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287 \
190 | --hash=sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6 \
191 | --hash=sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f \
192 | --hash=sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581 \
193 | --hash=sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed \
194 | --hash=sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b \
195 | --hash=sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c \
196 | --hash=sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026 \
197 | --hash=sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8 \
198 | --hash=sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676 \
199 | --hash=sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6 \
200 | --hash=sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e \
201 | --hash=sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d \
202 | --hash=sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d \
203 | --hash=sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01 \
204 | --hash=sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7 \
205 | --hash=sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419 \
206 | --hash=sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795 \
207 | --hash=sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1 \
208 | --hash=sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5 \
209 | --hash=sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d \
210 | --hash=sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42 \
211 | --hash=sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe \
212 | --hash=sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda \
213 | --hash=sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e \
214 | --hash=sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737 \
215 | --hash=sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523 \
216 | --hash=sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591 \
217 | --hash=sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc \
218 | --hash=sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a \
219 | --hash=sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50
220 | # via
221 | # flask
222 | # jinja2
223 | # werkzeug
224 | mock==5.2.0 \
225 | --hash=sha256:4e460e818629b4b173f32d08bf30d3af8123afbb8e04bb5707a1fd4799e503f0 \
226 | --hash=sha256:7ba87f72ca0e915175596069dbbcc7c75af7b5e9b9bc107ad6349ede0819982f
227 | # via -r requirements.txt
228 | netlink==0.1 \
229 | --hash=sha256:b8d6f54887d023d4e9e55fecab53410f33a2b1bc76298b22348af8a78a48f43b \
230 | --hash=sha256:d9fcc15add8c48145f16d6a3a2ce91703c32af7412871759057b5ec16c734fdd
231 | # via -r requirements.txt
232 | paho-mqtt==2.1.0 \
233 | --hash=sha256:12d6e7511d4137555a3f6ea167ae846af2c7357b10bc6fa4f7c3968fc1723834 \
234 | --hash=sha256:6db9ba9b34ed5bc6b6e3812718c7e06e2fd7444540df2455d2c51bd58808feee
235 | # via
236 | # -r requirements.txt
237 | # flask-mqtt
238 | pyroute2==0.9.5 \
239 | --hash=sha256:a198ccbe545b031b00b10da4b44df33d548db04af944be8107c05a215ba03872 \
240 | --hash=sha256:e7d485ce8274cbf473e9092fa65585faf8a3df5fe05ecf497cb9c5b1516ba09f
241 | # via -r requirements.txt
242 | pyyaml==6.0.3 \
243 | --hash=sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c \
244 | --hash=sha256:0150219816b6a1fa26fb4699fb7daa9caf09eb1999f3b70fb6e786805e80375a \
245 | --hash=sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3 \
246 | --hash=sha256:02ea2dfa234451bbb8772601d7b8e426c2bfa197136796224e50e35a78777956 \
247 | --hash=sha256:0f29edc409a6392443abf94b9cf89ce99889a1dd5376d94316ae5145dfedd5d6 \
248 | --hash=sha256:10892704fc220243f5305762e276552a0395f7beb4dbf9b14ec8fd43b57f126c \
249 | --hash=sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65 \
250 | --hash=sha256:1d37d57ad971609cf3c53ba6a7e365e40660e3be0e5175fa9f2365a379d6095a \
251 | --hash=sha256:1ebe39cb5fc479422b83de611d14e2c0d3bb2a18bbcb01f229ab3cfbd8fee7a0 \
252 | --hash=sha256:214ed4befebe12df36bcc8bc2b64b396ca31be9304b8f59e25c11cf94a4c033b \
253 | --hash=sha256:2283a07e2c21a2aa78d9c4442724ec1eb15f5e42a723b99cb3d822d48f5f7ad1 \
254 | --hash=sha256:22ba7cfcad58ef3ecddc7ed1db3409af68d023b7f940da23c6c2a1890976eda6 \
255 | --hash=sha256:27c0abcb4a5dac13684a37f76e701e054692a9b2d3064b70f5e4eb54810553d7 \
256 | --hash=sha256:28c8d926f98f432f88adc23edf2e6d4921ac26fb084b028c733d01868d19007e \
257 | --hash=sha256:2e71d11abed7344e42a8849600193d15b6def118602c4c176f748e4583246007 \
258 | --hash=sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310 \
259 | --hash=sha256:37503bfbfc9d2c40b344d06b2199cf0e96e97957ab1c1b546fd4f87e53e5d3e4 \
260 | --hash=sha256:3c5677e12444c15717b902a5798264fa7909e41153cdf9ef7ad571b704a63dd9 \
261 | --hash=sha256:3ff07ec89bae51176c0549bc4c63aa6202991da2d9a6129d7aef7f1407d3f295 \
262 | --hash=sha256:41715c910c881bc081f1e8872880d3c650acf13dfa8214bad49ed4cede7c34ea \
263 | --hash=sha256:418cf3f2111bc80e0933b2cd8cd04f286338bb88bdc7bc8e6dd775ebde60b5e0 \
264 | --hash=sha256:44edc647873928551a01e7a563d7452ccdebee747728c1080d881d68af7b997e \
265 | --hash=sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac \
266 | --hash=sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9 \
267 | --hash=sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7 \
268 | --hash=sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35 \
269 | --hash=sha256:5498cd1645aa724a7c71c8f378eb29ebe23da2fc0d7a08071d89469bf1d2defb \
270 | --hash=sha256:5cf4e27da7e3fbed4d6c3d8e797387aaad68102272f8f9752883bc32d61cb87b \
271 | --hash=sha256:5e0b74767e5f8c593e8c9b5912019159ed0533c70051e9cce3e8b6aa699fcd69 \
272 | --hash=sha256:5ed875a24292240029e4483f9d4a4b8a1ae08843b9c54f43fcc11e404532a8a5 \
273 | --hash=sha256:5fcd34e47f6e0b794d17de1b4ff496c00986e1c83f7ab2fb8fcfe9616ff7477b \
274 | --hash=sha256:5fdec68f91a0c6739b380c83b951e2c72ac0197ace422360e6d5a959d8d97b2c \
275 | --hash=sha256:6344df0d5755a2c9a276d4473ae6b90647e216ab4757f8426893b5dd2ac3f369 \
276 | --hash=sha256:64386e5e707d03a7e172c0701abfb7e10f0fb753ee1d773128192742712a98fd \
277 | --hash=sha256:652cb6edd41e718550aad172851962662ff2681490a8a711af6a4d288dd96824 \
278 | --hash=sha256:66291b10affd76d76f54fad28e22e51719ef9ba22b29e1d7d03d6777a9174198 \
279 | --hash=sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065 \
280 | --hash=sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c \
281 | --hash=sha256:79005a0d97d5ddabfeeea4cf676af11e647e41d81c9a7722a193022accdb6b7c \
282 | --hash=sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764 \
283 | --hash=sha256:7f047e29dcae44602496db43be01ad42fc6f1cc0d8cd6c83d342306c32270196 \
284 | --hash=sha256:8098f252adfa6c80ab48096053f512f2321f0b998f98150cea9bd23d83e1467b \
285 | --hash=sha256:850774a7879607d3a6f50d36d04f00ee69e7fc816450e5f7e58d7f17f1ae5c00 \
286 | --hash=sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac \
287 | --hash=sha256:8da9669d359f02c0b91ccc01cac4a67f16afec0dac22c2ad09f46bee0697eba8 \
288 | --hash=sha256:8dc52c23056b9ddd46818a57b78404882310fb473d63f17b07d5c40421e47f8e \
289 | --hash=sha256:9149cad251584d5fb4981be1ecde53a1ca46c891a79788c0df828d2f166bda28 \
290 | --hash=sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3 \
291 | --hash=sha256:96b533f0e99f6579b3d4d4995707cf36df9100d67e0c8303a0c55b27b5f99bc5 \
292 | --hash=sha256:9c57bb8c96f6d1808c030b1687b9b5fb476abaa47f0db9c0101f5e9f394e97f4 \
293 | --hash=sha256:9c7708761fccb9397fe64bbc0395abcae8c4bf7b0eac081e12b809bf47700d0b \
294 | --hash=sha256:9f3bfb4965eb874431221a3ff3fdcddc7e74e3b07799e0e84ca4a0f867d449bf \
295 | --hash=sha256:a33284e20b78bd4a18c8c2282d549d10bc8408a2a7ff57653c0cf0b9be0afce5 \
296 | --hash=sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702 \
297 | --hash=sha256:b30236e45cf30d2b8e7b3e85881719e98507abed1011bf463a8fa23e9c3e98a8 \
298 | --hash=sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788 \
299 | --hash=sha256:b865addae83924361678b652338317d1bd7e79b1f4596f96b96c77a5a34b34da \
300 | --hash=sha256:b8bb0864c5a28024fac8a632c443c87c5aa6f215c0b126c449ae1a150412f31d \
301 | --hash=sha256:ba1cc08a7ccde2d2ec775841541641e4548226580ab850948cbfda66a1befcdc \
302 | --hash=sha256:bdb2c67c6c1390b63c6ff89f210c8fd09d9a1217a465701eac7316313c915e4c \
303 | --hash=sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba \
304 | --hash=sha256:c2514fceb77bc5e7a2f7adfaa1feb2fb311607c9cb518dbc378688ec73d8292f \
305 | --hash=sha256:c3355370a2c156cffb25e876646f149d5d68f5e0a3ce86a5084dd0b64a994917 \
306 | --hash=sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5 \
307 | --hash=sha256:d0eae10f8159e8fdad514efdc92d74fd8d682c933a6dd088030f3834bc8e6b26 \
308 | --hash=sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f \
309 | --hash=sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b \
310 | --hash=sha256:eda16858a3cab07b80edaf74336ece1f986ba330fdb8ee0d6c0d68fe82bc96be \
311 | --hash=sha256:ee2922902c45ae8ccada2c5b501ab86c36525b883eff4255313a253a3160861c \
312 | --hash=sha256:efd7b85f94a6f21e4932043973a7ba2613b059c4a000551892ac9f1d11f5baf3 \
313 | --hash=sha256:f7057c9a337546edc7973c0d3ba84ddcdf0daa14533c2065749c9075001090e6 \
314 | --hash=sha256:fa160448684b4e94d80416c0fa4aac48967a969efe22931448d853ada8baf926 \
315 | --hash=sha256:fc09d0aa354569bc501d4e787133afc08552722d3ab34836a80547331bb5d4a0
316 | # via -r requirements.txt
317 | waitress==3.0.2 \
318 | --hash=sha256:682aaaf2af0c44ada4abfb70ded36393f0e307f4ab9456a215ce0020baefc31f \
319 | --hash=sha256:c56d67fd6e87c2ee598b76abdd4e96cfad1f24cacdea5078d382b1f9d7b5ed2e
320 | # via -r requirements.txt
321 | werkzeug==3.1.4 \
322 | --hash=sha256:2ad50fb9ed09cc3af22c54698351027ace879a0b60a3b5edf5730b2f7d876905 \
323 | --hash=sha256:cd3cd98b1b92dc3b7b3995038826c68097dcb16f9baa63abe35f20eafeb9fe5e
324 | # via flask
325 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 | Preamble
9 |
10 | The GNU General Public License is a free, copyleft license for
11 | software and other kinds of works.
12 |
13 | The licenses for most software and other practical works are designed
14 | to take away your freedom to share and change the works. By contrast,
15 | the GNU General Public License is intended to guarantee your freedom to
16 | share and change all versions of a program--to make sure it remains free
17 | software for all its users. We, the Free Software Foundation, use the
18 | GNU General Public License for most of our software; it applies also to
19 | any other work released this way by its authors. You can apply it to
20 | your programs, too.
21 |
22 | When we speak of free software, we are referring to freedom, not
23 | price. Our General Public Licenses are designed to make sure that you
24 | have the freedom to distribute copies of free software (and charge for
25 | them if you wish), that you receive source code or can get it if you
26 | want it, that you can change the software or use pieces of it in new
27 | free programs, and that you know you can do these things.
28 |
29 | To protect your rights, we need to prevent others from denying you
30 | these rights or asking you to surrender the rights. Therefore, you have
31 | certain responsibilities if you distribute copies of the software, or if
32 | you modify it: responsibilities to respect the freedom of others.
33 |
34 | For example, if you distribute copies of such a program, whether
35 | gratis or for a fee, you must pass on to the recipients the same
36 | freedoms that you received. You must make sure that they, too, receive
37 | or can get the source code. And you must show them these terms so they
38 | know their rights.
39 |
40 | Developers that use the GNU GPL protect your rights with two steps:
41 | (1) assert copyright on the software, and (2) offer you this License
42 | giving you legal permission to copy, distribute and/or modify it.
43 |
44 | For the developers' and authors' protection, the GPL clearly explains
45 | that there is no warranty for this free software. For both users' and
46 | authors' sake, the GPL requires that modified versions be marked as
47 | changed, so that their problems will not be attributed erroneously to
48 | authors of previous versions.
49 |
50 | Some devices are designed to deny users access to install or run
51 | modified versions of the software inside them, although the manufacturer
52 | can do so. This is fundamentally incompatible with the aim of
53 | protecting users' freedom to change the software. The systematic
54 | pattern of such abuse occurs in the area of products for individuals to
55 | use, which is precisely where it is most unacceptable. Therefore, we
56 | have designed this version of the GPL to prohibit the practice for those
57 | products. If such problems arise substantially in other domains, we
58 | stand ready to extend this provision to those domains in future versions
59 | of the GPL, as needed to protect the freedom of users.
60 |
61 | Finally, every program is threatened constantly by software patents.
62 | States should not allow patents to restrict development and use of
63 | software on general-purpose computers, but in those that do, we wish to
64 | avoid the special danger that patents applied to a free program could
65 | make it effectively proprietary. To prevent this, the GPL assures that
66 | patents cannot be used to render the program non-free.
67 |
68 | The precise terms and conditions for copying, distribution and
69 | modification follow.
70 |
71 | TERMS AND CONDITIONS
72 |
73 | 0. Definitions.
74 |
75 | "This License" refers to version 3 of the GNU General Public License.
76 |
77 | "Copyright" also means copyright-like laws that apply to other kinds of
78 | works, such as semiconductor masks.
79 |
80 | "The Program" refers to any copyrightable work licensed under this
81 | License. Each licensee is addressed as "you". "Licensees" and
82 | "recipients" may be individuals or organizations.
83 |
84 | To "modify" a work means to copy from or adapt all or part of the work
85 | in a fashion requiring copyright permission, other than the making of an
86 | exact copy. The resulting work is called a "modified version" of the
87 | earlier work or a work "based on" the earlier work.
88 |
89 | A "covered work" means either the unmodified Program or a work based
90 | on the Program.
91 |
92 | To "propagate" a work means to do anything with it that, without
93 | permission, would make you directly or secondarily liable for
94 | infringement under applicable copyright law, except executing it on a
95 | computer or modifying a private copy. Propagation includes copying,
96 | distribution (with or without modification), making available to the
97 | public, and in some countries other activities as well.
98 |
99 | To "convey" a work means any kind of propagation that enables other
100 | parties to make or receive copies. Mere interaction with a user through
101 | a computer network, with no transfer of a copy, is not conveying.
102 |
103 | An interactive user interface displays "Appropriate Legal Notices"
104 | to the extent that it includes a convenient and prominently visible
105 | feature that (1) displays an appropriate copyright notice, and (2)
106 | tells the user that there is no warranty for the work (except to the
107 | extent that warranties are provided), that licensees may convey the
108 | work under this License, and how to view a copy of this License. If
109 | the interface presents a list of user commands or options, such as a
110 | menu, a prominent item in the list meets this criterion.
111 |
112 | 1. Source Code.
113 |
114 | The "source code" for a work means the preferred form of the work
115 | for making modifications to it. "Object code" means any non-source
116 | form of a work.
117 |
118 | A "Standard Interface" means an interface that either is an official
119 | standard defined by a recognized standards body, or, in the case of
120 | interfaces specified for a particular programming language, one that
121 | is widely used among developers working in that language.
122 |
123 | The "System Libraries" of an executable work include anything, other
124 | than the work as a whole, that (a) is included in the normal form of
125 | packaging a Major Component, but which is not part of that Major
126 | Component, and (b) serves only to enable use of the work with that
127 | Major Component, or to implement a Standard Interface for which an
128 | implementation is available to the public in source code form. A
129 | "Major Component", in this context, means a major essential component
130 | (kernel, window system, and so on) of the specific operating system
131 | (if any) on which the executable work runs, or a compiler used to
132 | produce the work, or an object code interpreter used to run it.
133 |
134 | The "Corresponding Source" for a work in object code form means all
135 | the source code needed to generate, install, and (for an executable
136 | work) run the object code and to modify the work, including scripts to
137 | control those activities. However, it does not include the work's
138 | System Libraries, or general-purpose tools or generally available free
139 | programs which are used unmodified in performing those activities but
140 | which are not part of the work. For example, Corresponding Source
141 | includes interface definition files associated with source files for
142 | the work, and the source code for shared libraries and dynamically
143 | linked subprograms that the work is specifically designed to require,
144 | such as by intimate data communication or control flow between those
145 | subprograms and other parts of the work.
146 |
147 | The Corresponding Source need not include anything that users
148 | can regenerate automatically from other parts of the Corresponding
149 | Source.
150 |
151 | The Corresponding Source for a work in source code form is that
152 | same work.
153 |
154 | 2. Basic Permissions.
155 |
156 | All rights granted under this License are granted for the term of
157 | copyright on the Program, and are irrevocable provided the stated
158 | conditions are met. This License explicitly affirms your unlimited
159 | permission to run the unmodified Program. The output from running a
160 | covered work is covered by this License only if the output, given its
161 | content, constitutes a covered work. This License acknowledges your
162 | rights of fair use or other equivalent, as provided by copyright law.
163 |
164 | You may make, run and propagate covered works that you do not
165 | convey, without conditions so long as your license otherwise remains
166 | in force. You may convey covered works to others for the sole purpose
167 | of having them make modifications exclusively for you, or provide you
168 | with facilities for running those works, provided that you comply with
169 | the terms of this License in conveying all material for which you do
170 | not control copyright. Those thus making or running the covered works
171 | for you must do so exclusively on your behalf, under your direction
172 | and control, on terms that prohibit them from making any copies of
173 | your copyrighted material outside their relationship with you.
174 |
175 | Conveying under any other circumstances is permitted solely under
176 | the conditions stated below. Sublicensing is not allowed; section 10
177 | makes it unnecessary.
178 |
179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
180 |
181 | No covered work shall be deemed part of an effective technological
182 | measure under any applicable law fulfilling obligations under article
183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or
184 | similar laws prohibiting or restricting circumvention of such
185 | measures.
186 |
187 | When you convey a covered work, you waive any legal power to forbid
188 | circumvention of technological measures to the extent such circumvention
189 | is effected by exercising rights under this License with respect to
190 | the covered work, and you disclaim any intention to limit operation or
191 | modification of the work as a means of enforcing, against the work's
192 | users, your or third parties' legal rights to forbid circumvention of
193 | technological measures.
194 |
195 | 4. Conveying Verbatim Copies.
196 |
197 | You may convey verbatim copies of the Program's source code as you
198 | receive it, in any medium, provided that you conspicuously and
199 | appropriately publish on each copy an appropriate copyright notice;
200 | keep intact all notices stating that this License and any
201 | non-permissive terms added in accord with section 7 apply to the code;
202 | keep intact all notices of the absence of any warranty; and give all
203 | recipients a copy of this License along with the Program.
204 |
205 | You may charge any price or no price for each copy that you convey,
206 | and you may offer support or warranty protection for a fee.
207 |
208 | 5. Conveying Modified Source Versions.
209 |
210 | You may convey a work based on the Program, or the modifications to
211 | produce it from the Program, in the form of source code under the
212 | terms of section 4, provided that you also meet all of these conditions:
213 |
214 | a) The work must carry prominent notices stating that you modified
215 | it, and giving a relevant date.
216 |
217 | b) The work must carry prominent notices stating that it is
218 | released under this License and any conditions added under section
219 | 7. This requirement modifies the requirement in section 4 to
220 | "keep intact all notices".
221 |
222 | c) You must license the entire work, as a whole, under this
223 | License to anyone who comes into possession of a copy. This
224 | License will therefore apply, along with any applicable section 7
225 | additional terms, to the whole of the work, and all its parts,
226 | regardless of how they are packaged. This License gives no
227 | permission to license the work in any other way, but it does not
228 | invalidate such permission if you have separately received it.
229 |
230 | d) If the work has interactive user interfaces, each must display
231 | Appropriate Legal Notices; however, if the Program has interactive
232 | interfaces that do not display Appropriate Legal Notices, your
233 | work need not make them do so.
234 |
235 | A compilation of a covered work with other separate and independent
236 | works, which are not by their nature extensions of the covered work,
237 | and which are not combined with it such as to form a larger program,
238 | in or on a volume of a storage or distribution medium, is called an
239 | "aggregate" if the compilation and its resulting copyright are not
240 | used to limit the access or legal rights of the compilation's users
241 | beyond what the individual works permit. Inclusion of a covered work
242 | in an aggregate does not cause this License to apply to the other
243 | parts of the aggregate.
244 |
245 | 6. Conveying Non-Source Forms.
246 |
247 | You may convey a covered work in object code form under the terms
248 | of sections 4 and 5, provided that you also convey the
249 | machine-readable Corresponding Source under the terms of this License,
250 | in one of these ways:
251 |
252 | a) Convey the object code in, or embodied in, a physical product
253 | (including a physical distribution medium), accompanied by the
254 | Corresponding Source fixed on a durable physical medium
255 | customarily used for software interchange.
256 |
257 | b) Convey the object code in, or embodied in, a physical product
258 | (including a physical distribution medium), accompanied by a
259 | written offer, valid for at least three years and valid for as
260 | long as you offer spare parts or customer support for that product
261 | model, to give anyone who possesses the object code either (1) a
262 | copy of the Corresponding Source for all the software in the
263 | product that is covered by this License, on a durable physical
264 | medium customarily used for software interchange, for a price no
265 | more than your reasonable cost of physically performing this
266 | conveying of source, or (2) access to copy the
267 | Corresponding Source from a network server at no charge.
268 |
269 | c) Convey individual copies of the object code with a copy of the
270 | written offer to provide the Corresponding Source. This
271 | alternative is allowed only occasionally and noncommercially, and
272 | only if you received the object code with such an offer, in accord
273 | with subsection 6b.
274 |
275 | d) Convey the object code by offering access from a designated
276 | place (gratis or for a charge), and offer equivalent access to the
277 | Corresponding Source in the same way through the same place at no
278 | further charge. You need not require recipients to copy the
279 | Corresponding Source along with the object code. If the place to
280 | copy the object code is a network server, the Corresponding Source
281 | may be on a different server (operated by you or a third party)
282 | that supports equivalent copying facilities, provided you maintain
283 | clear directions next to the object code saying where to find the
284 | Corresponding Source. Regardless of what server hosts the
285 | Corresponding Source, you remain obligated to ensure that it is
286 | available for as long as needed to satisfy these requirements.
287 |
288 | e) Convey the object code using peer-to-peer transmission, provided
289 | you inform other peers where the object code and Corresponding
290 | Source of the work are being offered to the general public at no
291 | charge under subsection 6d.
292 |
293 | A separable portion of the object code, whose source code is excluded
294 | from the Corresponding Source as a System Library, need not be
295 | included in conveying the object code work.
296 |
297 | A "User Product" is either (1) a "consumer product", which means any
298 | tangible personal property which is normally used for personal, family,
299 | or household purposes, or (2) anything designed or sold for incorporation
300 | into a dwelling. In determining whether a product is a consumer product,
301 | doubtful cases shall be resolved in favor of coverage. For a particular
302 | product received by a particular user, "normally used" refers to a
303 | typical or common use of that class of product, regardless of the status
304 | of the particular user or of the way in which the particular user
305 | actually uses, or expects or is expected to use, the product. A product
306 | is a consumer product regardless of whether the product has substantial
307 | commercial, industrial or non-consumer uses, unless such uses represent
308 | the only significant mode of use of the product.
309 |
310 | "Installation Information" for a User Product means any methods,
311 | procedures, authorization keys, or other information required to install
312 | and execute modified versions of a covered work in that User Product from
313 | a modified version of its Corresponding Source. The information must
314 | suffice to ensure that the continued functioning of the modified object
315 | code is in no case prevented or interfered with solely because
316 | modification has been made.
317 |
318 | If you convey an object code work under this section in, or with, or
319 | specifically for use in, a User Product, and the conveying occurs as
320 | part of a transaction in which the right of possession and use of the
321 | User Product is transferred to the recipient in perpetuity or for a
322 | fixed term (regardless of how the transaction is characterized), the
323 | Corresponding Source conveyed under this section must be accompanied
324 | by the Installation Information. But this requirement does not apply
325 | if neither you nor any third party retains the ability to install
326 | modified object code on the User Product (for example, the work has
327 | been installed in ROM).
328 |
329 | The requirement to provide Installation Information does not include a
330 | requirement to continue to provide support service, warranty, or updates
331 | for a work that has been modified or installed by the recipient, or for
332 | the User Product in which it has been modified or installed. Access to a
333 | network may be denied when the modification itself materially and
334 | adversely affects the operation of the network or violates the rules and
335 | protocols for communication across the network.
336 |
337 | Corresponding Source conveyed, and Installation Information provided,
338 | in accord with this section must be in a format that is publicly
339 | documented (and with an implementation available to the public in
340 | source code form), and must require no special password or key for
341 | unpacking, reading or copying.
342 |
343 | 7. Additional Terms.
344 |
345 | "Additional permissions" are terms that supplement the terms of this
346 | License by making exceptions from one or more of its conditions.
347 | Additional permissions that are applicable to the entire Program shall
348 | be treated as though they were included in this License, to the extent
349 | that they are valid under applicable law. If additional permissions
350 | apply only to part of the Program, that part may be used separately
351 | under those permissions, but the entire Program remains governed by
352 | this License without regard to the additional permissions.
353 |
354 | When you convey a copy of a covered work, you may at your option
355 | remove any additional permissions from that copy, or from any part of
356 | it. (Additional permissions may be written to require their own
357 | removal in certain cases when you modify the work.) You may place
358 | additional permissions on material, added by you to a covered work,
359 | for which you have or can give appropriate copyright permission.
360 |
361 | Notwithstanding any other provision of this License, for material you
362 | add to a covered work, you may (if authorized by the copyright holders of
363 | that material) supplement the terms of this License with terms:
364 |
365 | a) Disclaiming warranty or limiting liability differently from the
366 | terms of sections 15 and 16 of this License; or
367 |
368 | b) Requiring preservation of specified reasonable legal notices or
369 | author attributions in that material or in the Appropriate Legal
370 | Notices displayed by works containing it; or
371 |
372 | c) Prohibiting misrepresentation of the origin of that material, or
373 | requiring that modified versions of such material be marked in
374 | reasonable ways as different from the original version; or
375 |
376 | d) Limiting the use for publicity purposes of names of licensors or
377 | authors of the material; or
378 |
379 | e) Declining to grant rights under trademark law for use of some
380 | trade names, trademarks, or service marks; or
381 |
382 | f) Requiring indemnification of licensors and authors of that
383 | material by anyone who conveys the material (or modified versions of
384 | it) with contractual assumptions of liability to the recipient, for
385 | any liability that these contractual assumptions directly impose on
386 | those licensors and authors.
387 |
388 | All other non-permissive additional terms are considered "further
389 | restrictions" within the meaning of section 10. If the Program as you
390 | received it, or any part of it, contains a notice stating that it is
391 | governed by this License along with a term that is a further
392 | restriction, you may remove that term. If a license document contains
393 | a further restriction but permits relicensing or conveying under this
394 | License, you may add to a covered work material governed by the terms
395 | of that license document, provided that the further restriction does
396 | not survive such relicensing or conveying.
397 |
398 | If you add terms to a covered work in accord with this section, you
399 | must place, in the relevant source files, a statement of the
400 | additional terms that apply to those files, or a notice indicating
401 | where to find the applicable terms.
402 |
403 | Additional terms, permissive or non-permissive, may be stated in the
404 | form of a separately written license, or stated as exceptions;
405 | the above requirements apply either way.
406 |
407 | 8. Termination.
408 |
409 | You may not propagate or modify a covered work except as expressly
410 | provided under this License. Any attempt otherwise to propagate or
411 | modify it is void, and will automatically terminate your rights under
412 | this License (including any patent licenses granted under the third
413 | paragraph of section 11).
414 |
415 | However, if you cease all violation of this License, then your
416 | license from a particular copyright holder is reinstated (a)
417 | provisionally, unless and until the copyright holder explicitly and
418 | finally terminates your license, and (b) permanently, if the copyright
419 | holder fails to notify you of the violation by some reasonable means
420 | prior to 60 days after the cessation.
421 |
422 | Moreover, your license from a particular copyright holder is
423 | reinstated permanently if the copyright holder notifies you of the
424 | violation by some reasonable means, this is the first time you have
425 | received notice of violation of this License (for any work) from that
426 | copyright holder, and you cure the violation prior to 30 days after
427 | your receipt of the notice.
428 |
429 | Termination of your rights under this section does not terminate the
430 | licenses of parties who have received copies or rights from you under
431 | this License. If your rights have been terminated and not permanently
432 | reinstated, you do not qualify to receive new licenses for the same
433 | material under section 10.
434 |
435 | 9. Acceptance Not Required for Having Copies.
436 |
437 | You are not required to accept this License in order to receive or
438 | run a copy of the Program. Ancillary propagation of a covered work
439 | occurring solely as a consequence of using peer-to-peer transmission
440 | to receive a copy likewise does not require acceptance. However,
441 | nothing other than this License grants you permission to propagate or
442 | modify any covered work. These actions infringe copyright if you do
443 | not accept this License. Therefore, by modifying or propagating a
444 | covered work, you indicate your acceptance of this License to do so.
445 |
446 | 10. Automatic Licensing of Downstream Recipients.
447 |
448 | Each time you convey a covered work, the recipient automatically
449 | receives a license from the original licensors, to run, modify and
450 | propagate that work, subject to this License. You are not responsible
451 | for enforcing compliance by third parties with this License.
452 |
453 | An "entity transaction" is a transaction transferring control of an
454 | organization, or substantially all assets of one, or subdividing an
455 | organization, or merging organizations. If propagation of a covered
456 | work results from an entity transaction, each party to that
457 | transaction who receives a copy of the work also receives whatever
458 | licenses to the work the party's predecessor in interest had or could
459 | give under the previous paragraph, plus a right to possession of the
460 | Corresponding Source of the work from the predecessor in interest, if
461 | the predecessor has it or can get it with reasonable efforts.
462 |
463 | You may not impose any further restrictions on the exercise of the
464 | rights granted or affirmed under this License. For example, you may
465 | not impose a license fee, royalty, or other charge for exercise of
466 | rights granted under this License, and you may not initiate litigation
467 | (including a cross-claim or counterclaim in a lawsuit) alleging that
468 | any patent claim is infringed by making, using, selling, offering for
469 | sale, or importing the Program or any portion of it.
470 |
471 | 11. Patents.
472 |
473 | A "contributor" is a copyright holder who authorizes use under this
474 | License of the Program or a work on which the Program is based. The
475 | work thus licensed is called the contributor's "contributor version".
476 |
477 | A contributor's "essential patent claims" are all patent claims
478 | owned or controlled by the contributor, whether already acquired or
479 | hereafter acquired, that would be infringed by some manner, permitted
480 | by this License, of making, using, or selling its contributor version,
481 | but do not include claims that would be infringed only as a
482 | consequence of further modification of the contributor version. For
483 | purposes of this definition, "control" includes the right to grant
484 | patent sublicenses in a manner consistent with the requirements of
485 | this License.
486 |
487 | Each contributor grants you a non-exclusive, worldwide, royalty-free
488 | patent license under the contributor's essential patent claims, to
489 | make, use, sell, offer for sale, import and otherwise run, modify and
490 | propagate the contents of its contributor version.
491 |
492 | In the following three paragraphs, a "patent license" is any express
493 | agreement or commitment, however denominated, not to enforce a patent
494 | (such as an express permission to practice a patent or covenant not to
495 | sue for patent infringement). To "grant" such a patent license to a
496 | party means to make such an agreement or commitment not to enforce a
497 | patent against the party.
498 |
499 | If you convey a covered work, knowingly relying on a patent license,
500 | and the Corresponding Source of the work is not available for anyone
501 | to copy, free of charge and under the terms of this License, through a
502 | publicly available network server or other readily accessible means,
503 | then you must either (1) cause the Corresponding Source to be so
504 | available, or (2) arrange to deprive yourself of the benefit of the
505 | patent license for this particular work, or (3) arrange, in a manner
506 | consistent with the requirements of this License, to extend the patent
507 | license to downstream recipients. "Knowingly relying" means you have
508 | actual knowledge that, but for the patent license, your conveying the
509 | covered work in a country, or your recipient's use of the covered work
510 | in a country, would infringe one or more identifiable patents in that
511 | country that you have reason to believe are valid.
512 |
513 | If, pursuant to or in connection with a single transaction or
514 | arrangement, you convey, or propagate by procuring conveyance of, a
515 | covered work, and grant a patent license to some of the parties
516 | receiving the covered work authorizing them to use, propagate, modify
517 | or convey a specific copy of the covered work, then the patent license
518 | you grant is automatically extended to all recipients of the covered
519 | work and works based on it.
520 |
521 | A patent license is "discriminatory" if it does not include within
522 | the scope of its coverage, prohibits the exercise of, or is
523 | conditioned on the non-exercise of one or more of the rights that are
524 | specifically granted under this License. You may not convey a covered
525 | work if you are a party to an arrangement with a third party that is
526 | in the business of distributing software, under which you make payment
527 | to the third party based on the extent of your activity of conveying
528 | the work, and under which the third party grants, to any of the
529 | parties who would receive the covered work from you, a discriminatory
530 | patent license (a) in connection with copies of the covered work
531 | conveyed by you (or copies made from those copies), or (b) primarily
532 | for and in connection with specific products or compilations that
533 | contain the covered work, unless you entered into that arrangement,
534 | or that patent license was granted, prior to 28 March 2007.
535 |
536 | Nothing in this License shall be construed as excluding or limiting
537 | any implied license or other defenses to infringement that may
538 | otherwise be available to you under applicable patent law.
539 |
540 | 12. No Surrender of Others' Freedom.
541 |
542 | If conditions are imposed on you (whether by court order, agreement or
543 | otherwise) that contradict the conditions of this License, they do not
544 | excuse you from the conditions of this License. If you cannot convey a
545 | covered work so as to satisfy simultaneously your obligations under this
546 | License and any other pertinent obligations, then as a consequence you may
547 | not convey it at all. For example, if you agree to terms that obligate you
548 | to collect a royalty for further conveying from those to whom you convey
549 | the Program, the only way you could satisfy both those terms and this
550 | License would be to refrain entirely from conveying the Program.
551 |
552 | 13. Use with the GNU Affero General Public License.
553 |
554 | Notwithstanding any other provision of this License, you have
555 | permission to link or combine any covered work with a work licensed
556 | under version 3 of the GNU Affero General Public License into a single
557 | combined work, and to convey the resulting work. The terms of this
558 | License will continue to apply to the part which is the covered work,
559 | but the special requirements of the GNU Affero General Public License,
560 | section 13, concerning interaction through a network will apply to the
561 | combination as such.
562 |
563 | 14. Revised Versions of this License.
564 |
565 | The Free Software Foundation may publish revised and/or new versions of
566 | the GNU General Public License from time to time. Such new versions will
567 | be similar in spirit to the present version, but may differ in detail to
568 | address new problems or concerns.
569 |
570 | Each version is given a distinguishing version number. If the
571 | Program specifies that a certain numbered version of the GNU General
572 | Public License "or any later version" applies to it, you have the
573 | option of following the terms and conditions either of that numbered
574 | version or of any later version published by the Free Software
575 | Foundation. If the Program does not specify a version number of the
576 | GNU General Public License, you may choose any version ever published
577 | by the Free Software Foundation.
578 |
579 | If the Program specifies that a proxy can decide which future
580 | versions of the GNU General Public License can be used, that proxy's
581 | public statement of acceptance of a version permanently authorizes you
582 | to choose that version for the Program.
583 |
584 | Later license versions may give you additional or different
585 | permissions. However, no additional obligations are imposed on any
586 | author or copyright holder as a result of your choosing to follow a
587 | later version.
588 |
589 | 15. Disclaimer of Warranty.
590 |
591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
599 |
600 | 16. Limitation of Liability.
601 |
602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
610 | SUCH DAMAGES.
611 |
612 | 17. Interpretation of Sections 15 and 16.
613 |
614 | If the disclaimer of warranty and limitation of liability provided
615 | above cannot be given local legal effect according to their terms,
616 | reviewing courts shall apply local law that most closely approximates
617 | an absolute waiver of all civil liability in connection with the
618 | Program, unless a warranty or assumption of liability accompanies a
619 | copy of the Program in return for a fee.
620 |
621 | END OF TERMS AND CONDITIONS
622 |
623 | How to Apply These Terms to Your New Programs
624 |
625 | If you develop a new program, and you want it to be of the greatest
626 | possible use to the public, the best way to achieve this is to make it
627 | free software which everyone can redistribute and change under these terms.
628 |
629 | To do so, attach the following notices to the program. It is safest
630 | to attach them to the start of each source file to most effectively
631 | state the exclusion of warranty; and each file should have at least
632 | the "copyright" line and a pointer to where the full notice is found.
633 |
634 |
635 | Copyright (C)
636 |
637 | This program is free software: you can redistribute it and/or modify
638 | it under the terms of the GNU General Public License as published by
639 | the Free Software Foundation, either version 3 of the License, or
640 | (at your option) any later version.
641 |
642 | This program is distributed in the hope that it will be useful,
643 | but WITHOUT ANY WARRANTY; without even the implied warranty of
644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
645 | GNU General Public License for more details.
646 |
647 | You should have received a copy of the GNU General Public License
648 | along with this program. If not, see .
649 |
650 | Also add information on how to contact you by electronic and paper mail.
651 |
652 | If the program does terminal interaction, make it output a short
653 | notice like this when it starts in an interactive mode:
654 |
655 | Copyright (C)
656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
657 | This is free software, and you are welcome to redistribute it
658 | under certain conditions; type `show c' for details.
659 |
660 | The hypothetical commands `show w' and `show c' should show the appropriate
661 | parts of the General Public License. Of course, your program's commands
662 | might be different; for a GUI interface, you would use an "about box".
663 |
664 | You should also get your employer (if you work as a programmer) or school,
665 | if any, to sign a "copyright disclaimer" for the program, if necessary.
666 | For more information on this, and how to apply and follow the GNU GPL, see
667 | .
668 |
669 | The GNU General Public License does not permit incorporating your program
670 | into proprietary programs. If your program is a subroutine library, you
671 | may consider it more useful to permit linking proprietary applications with
672 | the library. If this is what you want to do, use the GNU Lesser General
673 | Public License instead of this License. But first, please read
674 | .
675 |
--------------------------------------------------------------------------------