├── .github
└── workflows
│ ├── ci-cd.yml
│ ├── ci-cd.yml.trouble1
│ └── ci-cd.yml.trouble2
├── .markdownlint.json
├── Dockerfile
├── LICENSE
├── README.md
├── examples
├── blocky
│ ├── blocky_config
│ │ ├── config.yml
│ │ ├── local.allow.txt
│ │ └── local.block.txt
│ ├── docker-compose.blocky.yml
│ └── gluetun_config
│ │ └── post-rules.txt
├── docker-compose
│ ├── docker-compose.freevpn.us.yml
│ ├── docker-compose.gluetun_basic.yml
│ └── docker-compose.yml
├── env.example
├── haproxy
│ └── haproxy.cfg
├── systemd
│ └── randomizer.service
└── unbound
│ ├── docker-compose.unbound.yml
│ └── unbound_config
│ ├── conf.d
│ ├── README
│ └── local.conf
│ └── unbound.conf
├── images
├── gluetun_vpn_randomizer.png
└── random_image.png
├── randomizer
├── randomizer.yml
├── requirements-ci.txt
└── requirements.txt
/.github/workflows/ci-cd.yml:
--------------------------------------------------------------------------------
1 | name: CI/CD Pipeline - randomizer
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request:
7 | branches: [main]
8 |
9 | permissions: read-all
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-latest
14 |
15 | # permissions:
16 | # contents: read
17 | # packages: read
18 | # # To report GitHub Actions status checks
19 | # # statuses: write
20 |
21 | env:
22 | DOCKER_USERNAME: ingestbot
23 | DOCKER_IMAGE_NAME: randomizer
24 |
25 | steps:
26 | - name: get user with UID 1000
27 | run: |
28 | getent passwd 1000
29 |
30 | - name: who am i and id
31 | run: |
32 | echo "$USER"
33 | whoami
34 | id "$USER"
35 |
36 | - name: Get Docker group ID
37 | id: get_docker_gid
38 | run: |
39 | DOCKER_GID=$(getent group docker | cut -d: -f3)
40 | echo "DOCKER_GID=${DOCKER_GID}" >> "$GITHUB_ENV"
41 |
42 | - name: Check Docker Socket
43 | run: ls -l /var/run/docker.sock || echo "Docker socket not found"
44 |
45 | - name: Set up environment variables
46 | run: echo "SHORT_SHA=$(echo "${{ github.sha }}" | cut -c1-7)" >> "$GITHUB_ENV"
47 |
48 | - name: Check out code
49 | uses: actions/checkout@v4
50 | with:
51 | fetch-depth: 0
52 |
53 | - name: Install Dependencies
54 | # run: pip install -r requirements-ci.txt
55 | run: |
56 | python -m venv venv
57 | source venv/bin/activate
58 | pip install --upgrade pip
59 | pip install -r requirements-ci.txt
60 |
61 | - name: TruffleHog OSS
62 | uses: trufflesecurity/trufflehog@v3.82.8
63 |
64 | - name: Super-Linter
65 | uses: super-linter/super-linter@v7.1.0
66 | env:
67 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
68 | VALIDATE_PYTHON_PYLINT: false
69 | VALIDATE_JSCPD: false
70 | VALIDATE_YAML: false
71 |
72 | #- name: Run tests
73 | # run: |
74 | # source venv/bin/activate
75 | # PYTHONPATH=. pytest tests/test_main.py -vv
76 |
77 | - name: Docker Login
78 | uses: docker/login-action@v3.3.0
79 | with:
80 | username: ${{ secrets.DOCKER_USERNAME }}
81 | # password: ${{ secrets.DOCKER_PASSWORD }}
82 | password: ${{ secrets.DOCKER_HUB_TOKEN }}
83 |
84 | #
85 | # https://github.com/marketplace/actions/docker-setup-buildx
86 | #
87 | - name: Set up Docker Buildx
88 | uses: docker/setup-buildx-action@v3
89 |
90 | - name: build local container
91 | uses: docker/build-push-action@v4
92 | with:
93 | tags: localbuild/testimage
94 | push: false
95 | load: true
96 |
97 | - name: Anchore Container Scan
98 | uses: anchore/scan-action@v5.0.0
99 | with:
100 | image: "localbuild/testimage"
101 |
102 | - name: Run the Docker container | capture logs | verify logs
103 | run: |
104 | # container_id=$(docker run -d --group-add 118 -v /var/run/docker.sock:/var/run/docker.sock localbuild/testimage)
105 | container_id=$(docker run -d --group-add "$DOCKER_GID" -v /var/run/docker.sock:/var/run/docker.sock localbuild/testimage)
106 | timeout=180
107 | interval=5
108 | elapsed=0
109 |
110 | while [ "$elapsed" -lt "$timeout" ]; do
111 | if docker logs "$container_id" 2>&1 | grep -q "Starting randomizer"; then
112 | echo "PASS"
113 | exit 0
114 | fi
115 | sleep "$interval"
116 | elapsed=$((elapsed + interval))
117 | done
118 |
119 | echo "FAIL: expected logging not found within the timeout period."
120 | docker logs "$container_id"
121 | exit 1
122 |
123 | - name: Build and push Docker image
124 | uses: docker/build-push-action@v4
125 | with:
126 | context: .
127 | file: Dockerfile
128 | push: true
129 | tags: |
130 | ${{ env.DOCKER_USERNAME }}/${{ env.DOCKER_IMAGE_NAME }}:latest
131 | ${{ env.DOCKER_USERNAME }}/${{ env.DOCKER_IMAGE_NAME }}:${{ env.SHORT_SHA }}
132 |
--------------------------------------------------------------------------------
/.github/workflows/ci-cd.yml.trouble1:
--------------------------------------------------------------------------------
1 | name: CI/CD Pipeline - randomizer
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request:
7 | branches: [main]
8 |
9 | permissions: read-all
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-latest
14 |
15 | # permissions:
16 | # contents: read
17 | # packages: read
18 | # # To report GitHub Actions status checks
19 | # # statuses: write
20 |
21 | env:
22 | DOCKER_USERNAME: ingestbot
23 | DOCKER_IMAGE_NAME: randomizer
24 |
25 | #services:
26 | # docker:
27 | # image: docker:19.03.12
28 | # options: >-
29 | # --privileged
30 | # --name docker
31 | # -v /var/run/docker.sock:/var/run/docker.sock
32 |
33 | steps:
34 | - name: Set up environment variables
35 | run: echo "SHORT_SHA=$(echo "${{ github.sha }}" | cut -c1-7)" >> "$GITHUB_ENV"
36 |
37 | - name: Check out code
38 | uses: actions/checkout@v4
39 | with:
40 | fetch-depth: 0
41 |
42 | - name: Install Dependencies
43 | # run: pip install -r requirements-ci.txt
44 | run: |
45 | python -m venv venv
46 | source venv/bin/activate
47 | pip install --upgrade pip
48 | pip install -r requirements-ci.txt
49 |
50 | - name: TruffleHog OSS
51 | uses: trufflesecurity/trufflehog@v3.82.8
52 |
53 | #- name: Super-Linter
54 | # uses: super-linter/super-linter@v7.1.0
55 | # env:
56 | # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
57 | # VALIDATE_PYTHON_PYLINT: false
58 | # VALIDATE_JSCPD: false
59 | # # VALIDATE_MARKDOWN: false
60 |
61 | #- name: Run tests
62 | # run: |
63 | # source venv/bin/activate
64 | # PYTHONPATH=. pytest tests/test_main.py -vv
65 |
66 | - name: Docker Login
67 | uses: docker/login-action@v3.3.0
68 | with:
69 | username: ${{ secrets.DOCKER_USERNAME }}
70 | password: ${{ secrets.DOCKER_PASSWORD }}
71 |
72 | #
73 | # https://github.com/marketplace/actions/docker-setup-buildx
74 | #
75 | - name: Set up Docker Buildx
76 | uses: docker/setup-buildx-action@v3
77 |
78 | - name: build local container
79 | uses: docker/build-push-action@v4
80 | with:
81 | tags: localbuild/testimage
82 | push: false
83 | load: true
84 |
85 | #- name: Anchore Container Scan
86 | # uses: anchore/scan-action@v5.0.0
87 | # with:
88 | # image: "localbuild/testimage"
89 |
90 | #- name: Run Docker Container
91 | # id: run_container
92 | # run: |
93 | # # container_id=$(docker run -d localbuild/testimage)
94 | # container_id=$(docker run -d -v /var/run/docker.sock:/var/run/docker.sock localbuild/testimage)
95 | # echo "Container ID: $container_id"
96 | # echo "::set-output name=id::$container_id"
97 |
98 | - name: Check Docker Socket
99 | run: ls -l /var/run/docker.sock || echo "Docker socket not found"
100 |
101 | - name: Run Docker Container with Logs Capture
102 | id: run_container
103 | run: |
104 | set +e # Disable immediate exit on error
105 | container_id=$(docker run -d -v /var/run/docker.sock:/var/run/docker.sock localbuild/testimage)
106 | echo "Container ID: $container_id"
107 |
108 | # Wait for the container to exit
109 | exit_code=$(docker wait "$container_id")
110 |
111 | echo "Container exited with status $exit_code"
112 | # Capture logs if the container exited with an error
113 | if [ "$exit_code" -ne 0 ]; then
114 | echo "Fetching logs from container $container_id"
115 | docker logs "$container_id" || true
116 | fi
117 |
118 | - name: Run the Docker container | capture logs | verify logging
119 | run: |
120 | # container_id=$(docker run -d localbuild/testimage)
121 | timeout=180
122 | interval=5
123 | elapsed=0
124 | container_id=${{ steps.run_container.outputs.id }}
125 |
126 | while [ "$elapsed" -lt "$timeout" ]; do
127 | if docker logs "$container_id" 2>&1 | grep -q "Starting randomizer"; then
128 | echo "PASS"
129 | exit 0
130 | fi
131 | sleep "$interval"
132 | elapsed=$((elapsed + interval))
133 | done
134 |
135 | echo "FAIL: Expected logging not found within the timeout period."
136 | docker logs "$container_id"
137 | exit 1
138 |
139 | #- name: Show Container Logs
140 | # run: |
141 | # container_id=${{ steps.run_container.outputs.id }}
142 | # echo "Container Logs:"
143 | # docker logs "$container_id" || true
144 |
145 | - name: Build and push Docker image
146 | uses: docker/build-push-action@v4
147 | with:
148 | context: .
149 | file: Dockerfile
150 | push: true
151 | tags: |
152 | ${{ env.DOCKER_USERNAME }}/${{ env.DOCKER_IMAGE_NAME }}:latest
153 | ${{ env.DOCKER_USERNAME }}/${{ env.DOCKER_IMAGE_NAME }}:${{ env.SHORT_SHA }}
154 |
--------------------------------------------------------------------------------
/.github/workflows/ci-cd.yml.trouble2:
--------------------------------------------------------------------------------
1 | name: CI/CD Pipeline - randomizer
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request:
7 | branches: [main]
8 |
9 | permissions: read-all
10 |
11 | jobs:
12 | build:
13 | runs-on: ubuntu-latest
14 |
15 | # permissions:
16 | # contents: read
17 | # packages: read
18 | # # To report GitHub Actions status checks
19 | # # statuses: write
20 |
21 | env:
22 | DOCKER_USERNAME: ingestbot
23 | DOCKER_IMAGE_NAME: randomizer
24 |
25 | #services:
26 | # docker:
27 | # image: docker:19.03.12
28 | # options: >-
29 | # --privileged
30 | # --name docker
31 | # -v /var/run/docker.sock:/var/run/docker.sock
32 |
33 | steps:
34 | - name: Set up environment variables
35 | run: echo "SHORT_SHA=$(echo "${{ github.sha }}" | cut -c1-7)" >> "$GITHUB_ENV"
36 |
37 | - name: Check out code
38 | uses: actions/checkout@v4
39 | with:
40 | fetch-depth: 0
41 |
42 | - name: Install Dependencies
43 | # run: pip install -r requirements-ci.txt
44 | run: |
45 | python -m venv venv
46 | source venv/bin/activate
47 | pip install --upgrade pip
48 | pip install -r requirements-ci.txt
49 |
50 | #- name: get user with UID 1000
51 | # run: |
52 | # getent passwd 1000
53 |
54 | #- name: who am i and id
55 | # run: |
56 | # echo $USER
57 | # whoami
58 | # id $USER
59 |
60 | #- name: TruffleHog OSS
61 | # uses: trufflesecurity/trufflehog@v3.82.8
62 |
63 | #- name: Super-Linter
64 | # uses: super-linter/super-linter@v7.1.0
65 | # env:
66 | # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
67 | # VALIDATE_PYTHON_PYLINT: false
68 | # VALIDATE_JSCPD: false
69 | # # VALIDATE_MARKDOWN: false
70 |
71 | #- name: Run tests
72 | # run: |
73 | # source venv/bin/activate
74 | # PYTHONPATH=. pytest tests/test_main.py -vv
75 |
76 | #
77 | # https://github.com/marketplace/actions/docker-setup-buildx
78 | #
79 | - name: Set up Docker Buildx
80 | uses: docker/setup-buildx-action@v3
81 |
82 | - name: build local container
83 | uses: docker/build-push-action@v4
84 | with:
85 | tags: localbuild/testimage
86 | push: false
87 | load: true
88 |
89 | #- name: Anchore Container Scan
90 | # uses: anchore/scan-action@v5.0.0
91 | # with:
92 | # image: "localbuild/testimage"
93 |
94 | #- name: Run Docker Container
95 | # id: run_container
96 | # run: |
97 | # # container_id=$(docker run -d localbuild/testimage)
98 | # container_id=$(docker run -d -v /var/run/docker.sock:/var/run/docker.sock localbuild/testimage)
99 | # echo "Container ID: $container_id"
100 | # echo "::set-output name=id::$container_id"
101 |
102 | #- name: Check Docker Socket
103 | # run: ls -l /var/run/docker.sock || echo "Docker socket not found"
104 |
105 | - name: Run Docker Container
106 | run: |
107 | set +e # Disable immediate exit on error
108 |
109 | container_id=$(docker run -d --group-add 118 -v /var/run/docker.sock:/var/run/docker.sock localbuild/testimage)
110 |
111 | # container_id=$(docker run -d --privileged --group-add docker -v /var/run/docker.sock:/var/run/docker.sock localbuild/testimage)
112 |
113 | # docker run -dit --name randomizer -v /var/run/docker.sock:/var/run/docker.sock randomizer
114 | # container_id=$(docker run -dit -v /var/run/docker.sock:/var/run/docker.sock localbuild/testimage)
115 | # container_id=$(docker run -dit --group-add 118 -v /var/run/docker.sock:/var/run/docker.sock localbuild/testimage)
116 |
117 | # docker exec -i $container_id ls -ld /var/run/docker.sock
118 | # docker exec -i $container_id id
119 | # docker exec -i $container_id docker ps
120 |
121 | echo "Container ID: $container_id"
122 | exit_code=$(docker wait "$container_id")
123 | echo "Container exited with status $exit_code"
124 | if [ "$exit_code" -ne 0 ]; then
125 | echo "Fetching logs from container $container_id"
126 | docker logs "$container_id" || true
127 | fi
128 |
--------------------------------------------------------------------------------
/.markdownlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "default": true,
3 | "MD033": {
4 | "allowed_elements": ["div", "br", "img"]
5 | },
6 | "MD041": false
7 | }
8 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:alpine
2 |
3 | ENV RANDOMIZER_CONFIG=/app/randomizer.yml
4 |
5 | USER root
6 | RUN echo "https://dl-cdn.alpinelinux.org/alpine/edge/community" >> /etc/apk/repositories && \
7 | apk update && \
8 | apk add --no-cache docker-cli-compose=2.29.7-r0
9 |
10 | RUN addgroup -g 988 docker
11 | RUN adduser -D -H randomizer && \
12 | adduser randomizer docker
13 | RUN mkdir /app && chown randomizer:randomizer /app
14 |
15 | USER randomizer
16 | WORKDIR /app
17 | HEALTHCHECK --interval=30s --timeout=5s --retries=3 CMD pgrep -f "randomizer" || exit 1
18 |
19 | RUN python -m venv /app/venv
20 | ENV PATH="/app/venv/bin:${PATH}"
21 | COPY requirements.txt .
22 | RUN pip install --no-cache-dir -r requirements.txt
23 |
24 | COPY randomizer .
25 | COPY randomizer.yml .
26 |
27 | ENTRYPOINT ["sh", "-c"]
28 |
29 | CMD ["python /app/randomizer --config $RANDOMIZER_CONFIG"]
30 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 ingestbot
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |

3 |
4 | # Randomizer
5 |
6 | A Gluetun VPN Randomizer
7 |
8 |
9 | [](https://hub.docker.com/r/ingestbot/randomizer)
10 | [](https://github.com/ingestbot/randomizer/actions/workflows/ci-cd.yml)
11 |
12 |
13 |
14 | ## Gluetun VPN Randomizer
15 |
16 | Gluetun VPN Randomizer is a Python application and Docker container developed
17 | for the purpose of rotating VPN gateways from one or more VPN providers. This
18 | will change your outbound IP address and optionally your VPN provider on a
19 | random basis within a given time period (eg, every 2-4 hours).
20 |
21 | Some familiarity with Docker, VPN (Wireguard), and Python may be helpful.
22 |
23 | ## Requirements and Dependencies
24 |
25 | - VPN Service (see [Notes](#notes) for options)
26 | - [Gluetun VPN client](https://github.com/qdm12/gluetun)
27 | - [Docker](https://docs.docker.com/engine/install)
28 | - [Docker Compose V2](https://docs.docker.com/compose/migrate)
29 |
30 | - Additional Requirements and Dependencies noted in `requirements.txt`
31 |
32 | ## Quick Start
33 |
34 | ### For both systemd and Docker
35 |
36 | - Get a functional
37 | [docker-compose.yml](examples/docker-compose/docker-compose.gluetun_basic.yml)
38 | working with Gluetun (see the provided example).
39 | - eg, `/usr/local/docker/gluetun/docker-compose.yml`
40 | - Modify the provided [env.example](examples/env.example) and copy to the
41 | Gluetun path as `.env`.
42 | This provides values to the `environment:` parameters in
43 | [docker-compose.yml](examples/docker-compose/docker-compose.gluetun_basic.yml).
44 | - eg, `/usr/local/docker/gluetun/.env`
45 | - Create a `gluetun_config` directory. This should be the same path defined in
46 | `volumes:`
47 | - eg, `/usr/local/docker/gluetun/gluetun_config`
48 | - If multiple providers are being used, extensions of `.env` files must be named
49 | after [supported Gluetun
50 | providers](https://github.com/qdm12/gluetun-wiki/tree/main/setup/providers)
51 | - eg, `.env.ivpn`, `.env.mullvad`, etc.
52 |
53 | ### Docker
54 |
55 | - See [docker-compose.yml](examples/docker-compose/docker-compose.yml)
56 | - Modify `randomizer` options (see [Options](#options) below)
57 | - Set `RANDOMIZER_CONFIG` to location of `randomizer.yml`
58 | - Bind mount files must be readable by user `randomizer/1000`
59 |
60 | ### systemd
61 |
62 | - Copy [randomizer](randomizer) to a desired location and make it executable
63 | - `/usr/local/bin/randomizer`
64 | - `chmod 750 /usr/local/bin/randomizer`
65 | - Modify `randomizer` options (see [Options](#options) below)
66 | - To run as a systemd service, use the provided systemd unit file
67 | [randomizer.service](examples/systemd/randomizer.service)
68 | - `/etc/systemd/system/randomizer.service`
69 | - `systemctl daemon-reload`
70 | - `systemctl status|stop|start randomizer`
71 | - Test/Verify Connection:
72 | - `docker logs gluetun`
73 | - `curl -x localhost:8008 ipinfo.io`
74 |
75 | ## Options
76 |
77 | Options are defined in a separate YML configuration file (see
78 | [randomizer.yml](randomizer.yml). The location of this file is defined in
79 | [randomizer](randomizer) by the main parameter `config`. For the Docker
80 | container, see `RANDOMIZER_CONFIG`
81 | ([docker-compose.yml](examples/docker-compose/docker-compose.yml)
82 |
83 | - `is_docker_container`: Set to True if using Docker container
84 | - `hostname`: Set to localhost if using systemd. Set to Docker host if container.
85 | - `debug`: Increases verbosity of logging and frequency of rotation (see also
86 | Logging section and `ttime_min`, `ttime_max`)
87 | - `shuffle_vpn_provider`: True to support multiple VPN providers. False if
88 | single. Multiple VPN providers require multiple `.env.*` files
89 | - `mqtt_enable`: True to support MQTT.
90 | - `influxdb_enable`: True to support InfluxDB.
91 | - `container_name`: This name should match `container_name` given in `docker-compose.yml`
92 |
93 | ## Example Architecture
94 |
95 | This is one of many examples of using multiple VPN instances in a given
96 | environment.
97 |
98 | - LAN: 192.168.1.0/24
99 | - Consists of a mix of mobile devices, desktops/laptops, and virtual machines
100 | - All devices in this network use an HTTP/HTTPS proxy of the assigned fronted
101 | in HAProxy (eg, 192.168.1.100:8118)
102 | - HAProxy: 192.168.1.100
103 | - Acts as load balancer and distribution to multiple Gluetun VPN instances
104 | - In this example port 8118 is the listening HTTP/HTTPS proxy
105 | - See the configuration example [haproxy.cfg](examples/haproxy/haproxy.cfg)
106 | - Virtual Machines: 192.168.20.10, 192.168.30.10
107 | - Separate subnets of 192.168.20/24 and 192.168.30/24 are optional
108 | - Each virtual machine contains a running randomizer and Docker container(s)
109 | - Outbound IP addresses 23.11.82.103, 201.32.11.201 are randomly assigned
110 | based on VPN provider(s) configured in gluetun and frequency of rotation
111 |
112 | 
113 |
114 | ## DNS
115 |
116 | ### Unbound
117 |
118 | An example of using Gluetun with
119 | [Unbound](https://nlnetlabs.nl/projects/unbound/about/) is shown in
120 | [unbound](examples/unbound)
121 |
122 | ### Blocky
123 |
124 | An example of using Gluetun with [Blocky](https://0xerr0r.github.io/blocky/) is
125 | shown in [blocky](examples/blocky)
126 |
127 | ## Notes
128 |
129 | - VPN Service: Temporary VPN service can be obtained via
130 | . See [docker-compose.freevpn.us.yml](examples/docker-compose/docker-compose.freevpn.us.yml)
131 | for details.
132 | - Currently, randomizer is biased towards Wireguard. OpenVPN may be supported in
133 | the future, particularly if there is a demand for it.
134 | - If multiple VPN instances are being used, it's highly recommended to use MQTT.
135 | This prevents conflicting gateways from being used.
136 | - Docker Secrets will be adopted... soon!
137 | -
138 | -
139 | - [VPN Comparison
140 | Table](https://www.reddit.com/r/VPN/comments/m736zt/vpn_comparison_table)
141 |
--------------------------------------------------------------------------------
/examples/blocky/blocky_config/config.yml:
--------------------------------------------------------------------------------
1 | ##
2 | ## https://0xerr0r.github.io/blocky/v0.23/configuration/
3 | ##
4 | #
5 | #log:
6 | # level: debug
7 | # level: trace
8 | #
9 | upstreams:
10 | init:
11 | strategy: fast
12 | strategy: parallel_best
13 | groups:
14 | default:
15 | ##
16 | ## https://www.ipfire.org/docs/dns/public-servers
17 | ## https://dnsprivacy.org/public_resolvers
18 | ##
19 | #
20 | # https://res3.digitale-gesellschaft.ch/
21 | #
22 | - tcp-tls:dns.digitale-gesellschaft.ch:853
23 | - https://dns.digitale-gesellschaft.ch/dns-query
24 | #
25 | # cloudflare - https://developers.cloudflare.com/1.1.1.1/
26 | #
27 | - tcp-tls:1.1.1.1:853
28 | - https://cloudflare-dns.com/dns-query
29 | #
30 | # https://www.quad9.net
31 | #
32 | - https://dns.quad9.net/dns-query
33 | - tcp-tls:dns.quad9.net
34 | #
35 | # https://digitalcourage.de
36 | #
37 | - tcp-tls:dns3.digitalcourage.de:853
38 | #
39 | # https://www.dns-ga.de/server-info.html
40 | #
41 | - tcp-tls:dot.dns-ga.com
42 | - https://doh.dns-ga.com/query
43 | #
44 | # https://blog.uncensoreddns.org/dns-servers/
45 | #
46 | - tcp-tls:anycast.uncensoreddns.org:853
47 | - https://anycast.uncensoreddns.org/dns-query
48 | #
49 | # https://controld.com
50 | #
51 | - tcp-tls:p0.freedns.controld.com
52 | - https://freedns.controld.com/p0
53 | #
54 | # https://my.nextdns.io
55 | #
56 | - tcp-tls:bd4842.dns.nextdns.io
57 | - https://dns.nextdns.io/bd4842
58 |
59 | blocking:
60 | blackLists:
61 | ads:
62 | - local.blocks.txt
63 | - https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
64 | - http://sysctl.org/cameleon/hosts
65 | - https://big.oisd.nl/domainswild
66 | whiteLists:
67 | ads:
68 | - local.allow.txt
69 | clientGroupsBlock:
70 | default:
71 | - ads
72 | loading:
73 | concurrency: 10
74 |
75 | bootstrapDns: 1.1.1.1
76 |
77 | prometheus:
78 | enable: true
79 |
80 | filtering:
81 | queryTypes:
82 | - AAAA
83 |
84 | fqdnOnly:
85 | enable: true
86 |
87 | ports:
88 | dns: 53
89 | http: 4000
90 |
91 | caching:
92 | minTime: 25m
93 | maxTime: 40m
94 | prefetching: true
95 |
96 | redis:
97 | address: 192.168.1.25:6379
98 |
99 | queryLog:
100 | type: mysql
101 | target: db_user:db_password@tcp(db_host_or_ip:3306)/db_user?charset=utf8mb4&parseTime=True&loc=Local
102 | logRetentionDays: 7
103 |
--------------------------------------------------------------------------------
/examples/blocky/blocky_config/local.allow.txt:
--------------------------------------------------------------------------------
1 | recaptcha.google.com
2 | www.recaptcha.net
3 | www.google.com
4 | maps.google.com
5 | youtube.com
6 |
--------------------------------------------------------------------------------
/examples/blocky/blocky_config/local.block.txt:
--------------------------------------------------------------------------------
1 | iprofiles.apple.com
2 | mdmenrollment.apple.com
3 | deviceenrollment.apple.com
4 | gdmf.apple.com
5 | acmdm.apple.com
6 | albert.apple.com
7 |
--------------------------------------------------------------------------------
/examples/blocky/docker-compose.blocky.yml:
--------------------------------------------------------------------------------
1 | services:
2 | gluetun:
3 | image: qmcgaw/gluetun
4 | container_name: gluetun
5 | restart: always
6 | dns:
7 | - 127.0.0.1
8 | networks:
9 | - gluetun_net
10 | cap_add:
11 | - NET_ADMIN
12 | devices:
13 | - /dev/net/tun:/dev/net/tun
14 | ports:
15 | - 8000:8000/tcp # control server - https://github.com/qdm12/gluetun-wiki/blob/main/setup/advanced/control-server.md
16 | - 8008:8888/tcp # HTTP proxy - https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/http-proxy.md
17 | - 6666:9999/tcp # healthcheck - https://github.com/qdm12/gluetun-wiki/blob/main/faq/healthcheck.md#internal-healthcheck
18 | - 8053:53/tcp # blocky
19 | - 8053:53/udp # blocky
20 | - 9110:4000/tcp # blocky prometheus exporter
21 | volumes:
22 | - /usr/local/docker/gluetun/gluetun_config:/gluetun
23 | - /usr/local/docker/gluetun/gluetun_config/post-rules.txt:/iptables/post-rules.txt
24 |
25 | sysctls:
26 | - net.ipv6.conf.all.disable_ipv6=1
27 |
28 | environment:
29 | # - LOG_LEVEL=debug
30 | - TZ=Etc/UTC
31 | #
32 | # https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/firewall.md
33 | #
34 | - FIREWALL_OUTBOUND_SUBNETS=192.168.1.0/24
35 | # - FIREWALL_DEBUG=on
36 | #
37 | # https://github.com/qdm12/gluetun/issues/2047
38 | #
39 | ### - PUBLICIP_API=ip2location
40 | #
41 | # healthcheck - https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/healthcheck.md
42 | #
43 | - HEALTH_TARGET_ADDRESS=1.1.1.1:443
44 | - HEALTH_VPN_DURATION_INITIAL=10s
45 | - HEALTH_VPN_DURATION_ADDITION=7s
46 | - HEALTH_SUCCESS_WAIT_DURATION=7s
47 | - HEALTH_SERVER_ADDRESS=0.0.0.0:9999
48 | #
49 | # providers - https://github.com/qdm12/gluetun-wiki/tree/main/setup/providers
50 | #
51 | - VPN_TYPE=wireguard
52 | - VPN_SERVICE_PROVIDER=${VPN_SERVICE_PROVIDER}
53 | - SERVER_COUNTRIES=${SERVER_COUNTRIES}
54 | - WIREGUARD_PRIVATE_KEY=${WIREGUARD_PRIVATE_KEY}
55 | - WIREGUARD_ADDRESSES=${WIREGUARD_ADDRESSES}
56 | - VPN_ENDPOINT_PORT=${VPN_ENDPOINT_PORT}
57 | #
58 | # server list update - https://github.com/qdm12/gluetun-wiki/blob/main/setup/servers.md#update-the-vpn-servers-list
59 | #
60 | - UPDATER_PERIOD=3h
61 | #
62 | # http proxy - https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/http-proxy.md
63 | #
64 | - HTTPPROXY=on
65 | - HTTPPROXY_STEALTH=on
66 | #
67 | # DNS
68 | # https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/dns.md
69 | #
70 | - DOT=off
71 | - DNS_KEEP_NAMESERVER=on
72 |
73 | blocky:
74 | image: spx01/blocky
75 | container_name: blocky
76 | #
77 | # https://github.com/0xERR0R/blocky/discussions/1417
78 | #
79 | restart: always
80 | network_mode: "service:gluetun"
81 |
82 | volumes:
83 | # Optional to synchronize the log timestamp with host
84 | - /etc/localtime:/etc/localtime:ro
85 | # config file
86 | - ./blocky_config/config.yml:/app/config.yml
87 | - ./blocky_config/local.blocks.txt:/app/local.blocks.txt
88 | - ./blocky_config/local.allow.txt:/app/local.allow.txt
89 |
90 | networks:
91 | gluetun_net:
92 | name: gluetun_net
93 | driver_opts:
94 | com.docker.network.bridge.name: br-gluetun
95 | ipam:
96 | config:
97 | - subnet: 10.10.10.0/24
98 |
--------------------------------------------------------------------------------
/examples/blocky/gluetun_config/post-rules.txt:
--------------------------------------------------------------------------------
1 | iptables -A OUTPUT -p tcp -d 192.168.1.25 --dport 3306 -j ACCEPT
2 | iptables -A INPUT -p tcp -s 192.168.1.25 --sport 3306 -j ACCEPT
3 | iptables -A OUTPUT -p tcp -d 192.168.1.25 --dport 6379 -j ACCEPT
4 | iptables -A INPUT -p tcp -s 192.168.1.25 --sport 6379 -j ACCEPT
5 | iptables -A OUTPUT -d 192.168.1.0/24 -j DROP
6 | iptables -A INPUT -s 192.168.1.0/24 -j DROP
7 |
--------------------------------------------------------------------------------
/examples/docker-compose/docker-compose.freevpn.us.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | gluetun:
4 | image: qmcgaw/gluetun
5 | container_name: gluetun
6 | cap_add:
7 | - NET_ADMIN
8 | devices:
9 | - /dev/net/tun:/dev/net/tun
10 | ports:
11 | - 8000:8000/tcp # control server - https://github.com/qdm12/gluetun-wiki/blob/main/setup/advanced/control-server.md
12 | - 8008:8888/tcp # HTTP proxy - https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/http-proxy.md
13 | - 9999:9999/tcp # healthcheck - https://github.com/qdm12/gluetun-wiki/blob/main/faq/healthcheck.md#internal-healthcheck
14 | volumes:
15 | - /usr/local/docker/gluetun/gluetun_config:/gluetun
16 | environment:
17 | - TZ=Etc/UTC
18 | #
19 | # healthcheck - https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/healthcheck.md
20 | #
21 | - HEALTH_SERVER_ADDRESS=0.0.0.0:9999
22 | #
23 | # custom - https://github.com/qdm12/gluetun-wiki/blob/main/setup/providers/custom.md
24 | #
25 | - VPN_SERVICE_PROVIDER=custom
26 | - VPN_TYPE=wireguard
27 | - VPN_ENDPOINT_IP=209.212.312.23
28 | - VPN_ENDPOINT_PORT=51820
29 | - WIREGUARD_PUBLIC_KEY=QaYrZvnCasdfweiowerCu+HwliJDU=
30 | - WIREGUARD_PRIVATE_KEY=+AvPIWEKSLKDOdkweoiwWERSDWEWEKksd0=
31 | - WIREGUARD_ADDRESSES=10.71.0.77/32
32 | #
33 | # http proxy - https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/http-proxy.md
34 | #
35 | - HTTPPROXY=on
36 | - HTTPPROXY_STEALTH=on
37 |
--------------------------------------------------------------------------------
/examples/docker-compose/docker-compose.gluetun_basic.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | gluetun:
4 | image: qmcgaw/gluetun
5 | container_name: gluetun
6 | cap_add:
7 | - NET_ADMIN
8 | devices:
9 | - /dev/net/tun:/dev/net/tun
10 | ports:
11 | - 8000:8000/tcp # control server - https://github.com/qdm12/gluetun-wiki/blob/main/setup/advanced/control-server.md
12 | - 8008:8888/tcp # HTTP proxy - https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/http-proxy.md
13 | - 9999:9999/tcp # healthcheck - https://github.com/qdm12/gluetun-wiki/blob/main/faq/healthcheck.md#internal-healthcheck
14 | volumes:
15 | - /usr/local/docker/gluetun/gluetun_config:/gluetun
16 | environment:
17 | - TZ=Etc/UTC
18 | #
19 | # healthcheck - https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/healthcheck.md
20 | #
21 | - HEALTH_SERVER_ADDRESS=0.0.0.0:9999
22 | #
23 | # providers - https://github.com/qdm12/gluetun-wiki/tree/main/setup/providers
24 | #
25 | - VPN_TYPE=wireguard
26 | - VPN_SERVICE_PROVIDER=${VPN_SERVICE_PROVIDER}
27 | - SERVER_COUNTRIES=${SERVER_COUNTRIES}
28 | - WIREGUARD_PRIVATE_KEY=${WIREGUARD_PRIVATE_KEY}
29 | - WIREGUARD_ADDRESSES=${WIREGUARD_ADDRESSES}
30 | - VPN_ENDPOINT_PORT=${VPN_ENDPOINT_PORT}
31 | #
32 | # server list update - https://github.com/qdm12/gluetun-wiki/blob/main/setup/servers.md#update-the-vpn-servers-list
33 | #
34 | - UPDATER_PERIOD=1h
35 | #
36 | # http proxy - https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/http-proxy.md
37 | #
38 | - HTTPPROXY=on
39 | - HTTPPROXY_STEALTH=on
40 |
--------------------------------------------------------------------------------
/examples/docker-compose/docker-compose.yml:
--------------------------------------------------------------------------------
1 | ##
2 | ## https://github.com/ingestbot/randomizer
3 | ##
4 | services:
5 | randomizer:
6 | image: randomizer
7 | container_name: randomizer
8 | network_mode: bridge
9 | volumes:
10 | - /usr/local/docker/gluetun:/app/gluetun
11 | - /var/run/docker.sock:/var/run/docker.sock
12 | environment:
13 | - TZ=America/Los_Angeles
14 |
15 | # - RANDOMIZER_CONFIG=/app/gluetun/randomizer.yml
16 |
17 | #
18 | # proxy is optional
19 | #
20 | - HTTP_PROXY=${http_proxy}
21 | - HTTPS_PROXY=${https_proxy}
22 | - NO_PROXY=${no_proxy}
23 |
24 | restart: always
25 |
--------------------------------------------------------------------------------
/examples/env.example:
--------------------------------------------------------------------------------
1 | VPN_SERVICE_PROVIDER=mullvad
2 | SERVER_COUNTRIES="Canada,USA"
3 | WIREGUARD_PRIVATE_KEY=WiweWETEWRRWEWsdfwioirweoiSDF
4 | WIREGUARD_ADDRESSES=10.12.70.11/32
5 | VPN_ENDPOINT_PORT=2049
6 |
--------------------------------------------------------------------------------
/examples/haproxy/haproxy.cfg:
--------------------------------------------------------------------------------
1 | global
2 | log /dev/log local0
3 | log /dev/log local1 notice
4 | stats socket /var/lib/haproxy/stats level admin
5 | chroot /var/lib/haproxy
6 | user haproxy
7 | group haproxy
8 | daemon
9 |
10 | defaults
11 | log global
12 | mode http
13 | option httplog
14 | option dontlognull
15 | timeout connect 5000
16 | timeout client 50000
17 | timeout server 50000
18 | errorfile 400 /etc/haproxy/errors/400.http
19 | errorfile 403 /etc/haproxy/errors/403.http
20 | errorfile 408 /etc/haproxy/errors/408.http
21 | errorfile 500 /etc/haproxy/errors/500.http
22 | errorfile 502 /etc/haproxy/errors/502.http
23 | errorfile 503 /etc/haproxy/errors/503.http
24 | errorfile 504 /etc/haproxy/errors/504.http
25 |
26 | frontend proxy-front
27 | bind *:8118
28 | mode http
29 | default_backend privoxy-back
30 |
31 | backend proxy-back
32 | mode http
33 | balance leastconn
34 | option forwardfor
35 | option httpchk
36 | ##
37 | ## This is running a health check against the Gluetun Container
38 | ##
39 | http-check connect port 6666
40 | http-check send meth GET uri /
41 | http-check expect status 200-399
42 | cookie SERVERID insert indirect
43 | server proxy01 192.168.20.10:8008 check cookie proxy01
44 | server proxy02 192.168.30.10:8008 check cookie proxy02
45 |
--------------------------------------------------------------------------------
/examples/systemd/randomizer.service:
--------------------------------------------------------------------------------
1 | ##
2 | ## see: Systemd Unit configuration logic -- http://man7.org/linux/man-pages/man5/systemd.unit.5.html for more details
3 | ##
4 | ## /etc/systemd/system/randomizer.service
5 | ##
6 |
7 | [Service]
8 | Type=simple
9 | ExecStart=/usr/local/bin/randomizer
10 | User=root
11 | Group=root
12 | StandardError=syslog
13 |
14 | [Install]
15 | WantedBy=multi-user.target
16 |
--------------------------------------------------------------------------------
/examples/unbound/docker-compose.unbound.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | gluetun:
4 | image: qmcgaw/gluetun
5 | container_name: gluetun
6 | dns:
7 | - 127.0.0.1
8 | cap_add:
9 | - NET_ADMIN
10 | devices:
11 | - /dev/net/tun:/dev/net/tun
12 | ports:
13 | # control server
14 | # https://github.com/qdm12/gluetun-wiki/blob/main/setup/advanced/control-server.md
15 | - 8000:8000/tcp
16 | # HTTP proxy
17 | # https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/http-proxy.md
18 | - 8008:8888/tcp
19 | # healthcheck
20 | # https://github.com/qdm12/gluetun-wiki/blob/main/faq/healthcheck.md#internal-healthcheck
21 | - 9999:9999/tcp
22 | # unbound
23 | - 8053:53/tcp
24 | # unbound
25 | - 8053:53/udp
26 | volumes:
27 | - /usr/local/docker/gluetun/gluetun_config:/gluetun
28 | environment:
29 | - TZ=Etc/UTC
30 | #
31 | # healthcheck
32 | # https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/healthcheck.md
33 | #
34 | - HEALTH_SERVER_ADDRESS=0.0.0.0:9999
35 | #
36 | # providers
37 | # https://github.com/qdm12/gluetun-wiki/tree/main/setup/providers
38 | #
39 | - VPN_TYPE=wireguard
40 | - VPN_SERVICE_PROVIDER=${VPN_SERVICE_PROVIDER}
41 | - SERVER_COUNTRIES=${SERVER_COUNTRIES}
42 | - WIREGUARD_PRIVATE_KEY=${WIREGUARD_PRIVATE_KEY}
43 | - WIREGUARD_ADDRESSES=${WIREGUARD_ADDRESSES}
44 | - VPN_ENDPOINT_PORT=${VPN_ENDPOINT_PORT}
45 | #
46 | # server list update
47 | # https://github.com/qdm12/gluetun-wiki/blob/main/setup/servers.md#update-the-vpn-servers-list
48 | #
49 | - UPDATER_PERIOD=1h
50 | #
51 | # http proxy
52 | # https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/http-proxy.md
53 | #
54 | - HTTPPROXY=on
55 | - HTTPPROXY_STEALTH=on
56 | #
57 | # DNS
58 | # https://github.com/qdm12/gluetun-wiki/blob/main/setup/options/dns.md
59 | #
60 | - DOT=off
61 | - DNS_KEEP_NAMESERVER=on
62 |
63 | unbound:
64 | image: "mvance/unbound:latest"
65 | container_name: unbound
66 | network_mode: "service:gluetun"
67 | volumes:
68 | - /usr/local/docker/gluetun/unbound_config/unbound.conf:/opt/unbound/etc/unbound/unbound.conf
69 | - /usr/local/docker/gluetun/unbound_config/conf.d/:/opt/unbound/etc/unbound/conf.d/
70 |
--------------------------------------------------------------------------------
/examples/unbound/unbound_config/conf.d/README:
--------------------------------------------------------------------------------
1 |
2 | Block list from https://big.oisd.nl/unbound
3 |
4 | curl -o big.oisd.nl.conf https://big.oisd.nl/unbound
5 |
--------------------------------------------------------------------------------
/examples/unbound/unbound_config/conf.d/local.conf:
--------------------------------------------------------------------------------
1 | #
2 | # https://github.com/NLnetLabs/unbound/issues/960
3 | #
4 | interface-automatic: yes
5 |
--------------------------------------------------------------------------------
/examples/unbound/unbound_config/unbound.conf:
--------------------------------------------------------------------------------
1 | server:
2 | ###########################################################################
3 | # BASIC SETTINGS
4 | ###########################################################################
5 | # Time to live maximum for RRsets and messages in the cache. If the maximum
6 | # kicks in, responses to clients still get decrementing TTLs based on the
7 | # original (larger) values. When the internal TTL expires, the cache item
8 | # has expired. Can be set lower to force the resolver to query for data
9 | # often, and not trust (very large) TTL values.
10 | cache-max-ttl: 86400
11 |
12 | # Time to live minimum for RRsets and messages in the cache. If the minimum
13 | # kicks in, the data is cached for longer than the domain owner intended,
14 | # and thus less queries are made to look up the data. Zero makes sure the
15 | # data in the cache is as the domain owner intended, higher values,
16 | # especially more than an hour or so, can lead to trouble as the data in
17 | # the cache does not match up with the actual data any more.
18 | cache-min-ttl: 300
19 |
20 | # Set the working directory for the program.
21 | directory: "/opt/unbound/etc/unbound"
22 |
23 | # If enabled, Unbound will respond with Extended DNS Error codes (RFC 8914).
24 | # These EDEs attach informative error messages to a response for various
25 | # errors.
26 | # When the val-log-level: option is also set to 2, responses with Extended
27 | # DNS Errors concerning DNSSEC failures that are not served from cache, will
28 | # also contain a descriptive text message about the reason for the failure.
29 | ede: yes
30 |
31 | # If enabled, Unbound will attach an Extended DNS Error (RFC 8914)
32 | # Code 3 - Stale Answer as EDNS0 option to the expired response.
33 | # This will not attach the EDE code without setting ede: yes as well.
34 | ede-serve-expired: yes
35 |
36 | # RFC 6891. Number of bytes size to advertise as the EDNS reassembly buffer
37 | # size. This is the value put into datagrams over UDP towards peers.
38 | # The actual buffer size is determined by msg-buffer-size (both for TCP and
39 | # UDP). Do not set higher than that value.
40 | # Default is 1232 which is the DNS Flag Day 2020 recommendation.
41 | # Setting to 512 bypasses even the most stringent path MTU problems, but
42 | # is seen as extreme, since the amount of TCP fallback generated is
43 | # excessive (probably also for this resolver, consider tuning the outgoing
44 | # tcp number).
45 | edns-buffer-size: 1232
46 |
47 | # Listen to for queries from clients and answer from this network interface
48 | # and port.
49 | interface: 0.0.0.0@53
50 |
51 | # Rotates RRSet order in response (the pseudo-random number is taken from
52 | # the query ID, for speed and thread safety).
53 | rrset-roundrobin: yes
54 |
55 | # Drop user privileges after binding the port.
56 | username: "_unbound"
57 |
58 | ###########################################################################
59 | # LOGGING
60 | ###########################################################################
61 |
62 | # Do not print log lines to inform about local zone actions
63 | log-local-actions: no
64 |
65 | # Do not print one line per query to the log
66 | log-queries: no
67 |
68 | # Do not print one line per reply to the log
69 | log-replies: no
70 |
71 | # Do not print log lines that say why queries return SERVFAIL to clients
72 | log-servfail: no
73 |
74 | # If you want to log to a file, use:
75 | # logfile: /opt/unbound/etc/unbound/unbound.log
76 | # Set log location (using /dev/null further limits logging)
77 | logfile: /dev/null
78 |
79 | # Set logging level
80 | # Level 0: No verbosity, only errors.
81 | # Level 1: Gives operational information.
82 | # Level 2: Gives detailed operational information including short information per query.
83 | # Level 3: Gives query level information, output per query.
84 | # Level 4: Gives algorithm level information.
85 | # Level 5: Logs client identification for cache misses.
86 | verbosity: 0
87 |
88 | ###########################################################################
89 | # PRIVACY SETTINGS
90 | ###########################################################################
91 |
92 | # RFC 8198. Use the DNSSEC NSEC chain to synthesize NXDO-MAIN and other
93 | # denials, using information from previous NXDO-MAINs answers. In other
94 | # words, use cached NSEC records to generate negative answers within a
95 | # range and positive answers from wildcards. This increases performance,
96 | # decreases latency and resource utilization on both authoritative and
97 | # recursive servers, and increases privacy. Also, it may help increase
98 | # resilience to certain DoS attacks in some circumstances.
99 | aggressive-nsec: yes
100 |
101 | # Extra delay for timeouted UDP ports before they are closed, in msec.
102 | # This prevents very delayed answer packets from the upstream (recursive)
103 | # servers from bouncing against closed ports and setting off all sort of
104 | # close-port counters, with eg. 1500 msec. When timeouts happen you need
105 | # extra sockets, it checks the ID and remote IP of packets, and unwanted
106 | # packets are added to the unwanted packet counter.
107 | delay-close: 10000
108 |
109 | # Prevent the unbound server from forking into the background as a daemon
110 | do-daemonize: no
111 |
112 | # Add localhost to the do-not-query-address list.
113 | do-not-query-localhost: no
114 |
115 | # Number of bytes size of the aggressive negative cache.
116 | neg-cache-size: 4M
117 |
118 | # Send minimum amount of information to upstream servers to enhance
119 | # privacy (best privacy).
120 | qname-minimisation: yes
121 |
122 | ###########################################################################
123 | # SECURITY SETTINGS
124 | ###########################################################################
125 | # Only give access to recursion clients from LAN IPs
126 | access-control: 127.0.0.1/32 allow
127 | access-control: 192.168.0.0/16 allow
128 | access-control: 172.16.0.0/12 allow
129 | access-control: 10.0.0.0/8 allow
130 | # access-control: fc00::/7 allow
131 | # access-control: ::1/128 allow
132 |
133 | # File with trust anchor for one zone, which is tracked with RFC5011
134 | # probes.
135 | auto-trust-anchor-file: "var/root.key"
136 |
137 | # Enable chroot (i.e, change apparent root directory for the current
138 | # running process and its children)
139 | chroot: "/opt/unbound/etc/unbound"
140 |
141 | # Deny queries of type ANY with an empty response.
142 | deny-any: yes
143 |
144 | # Harden against algorithm downgrade when multiple algorithms are
145 | # advertised in the DS record.
146 | harden-algo-downgrade: yes
147 |
148 | # Harden against unknown records in the authority section and additional
149 | # section. If no, such records are copied from the upstream and presented
150 | # to the client together with the answer. If yes, it could hamper future
151 | # protocol developments that want to add records.
152 | harden-unknown-additional: yes
153 |
154 | # RFC 8020. returns nxdomain to queries for a name below another name that
155 | # is already known to be nxdomain.
156 | harden-below-nxdomain: yes
157 |
158 | # Require DNSSEC data for trust-anchored zones, if such data is absent, the
159 | # zone becomes bogus. If turned off you run the risk of a downgrade attack
160 | # that disables security for a zone.
161 | harden-dnssec-stripped: yes
162 |
163 | # Only trust glue if it is within the servers authority.
164 | harden-glue: yes
165 |
166 | # Ignore very large queries.
167 | harden-large-queries: yes
168 |
169 | # Perform additional queries for infrastructure data to harden the referral
170 | # path. Validates the replies if trust anchors are configured and the zones
171 | # are signed. This enforces DNSSEC validation on nameserver NS sets and the
172 | # nameserver addresses that are encountered on the referral path to the
173 | # answer. Experimental option.
174 | harden-referral-path: no
175 |
176 | # Ignore very small EDNS buffer sizes from queries.
177 | harden-short-bufsize: yes
178 |
179 | # If enabled the HTTP header User-Agent is not set. Use with caution
180 | # as some webserver configurations may reject HTTP requests lacking
181 | # this header. If needed, it is better to explicitly set the
182 | # the http-user-agent.
183 | hide-http-user-agent: no
184 |
185 | # Refuse id.server and hostname.bind queries
186 | hide-identity: yes
187 |
188 | # Refuse version.server and version.bind queries
189 | hide-version: yes
190 |
191 | # Set the HTTP User-Agent header for outgoing HTTP requests. If
192 | # set to "", the default, then the package name and version are
193 | # used.
194 | http-user-agent: "DNS"
195 |
196 | # Report this identity rather than the hostname of the server.
197 | identity: "DNS"
198 |
199 | # These private network addresses are not allowed to be returned for public
200 | # internet names. Any occurrence of such addresses are removed from DNS
201 | # answers. Additionally, the DNSSEC validator may mark the answers bogus.
202 | # This protects against DNS Rebinding
203 | private-address: 10.0.0.0/8
204 | private-address: 172.16.0.0/12
205 | private-address: 192.168.0.0/16
206 | private-address: 169.254.0.0/16
207 | # private-address: fd00::/8
208 | # private-address: fe80::/10
209 | # private-address: ::ffff:0:0/96
210 |
211 | # Enable ratelimiting of queries (per second) sent to nameserver for
212 | # performing recursion. More queries are turned away with an error
213 | # (servfail). This stops recursive floods (e.g., random query names), but
214 | # not spoofed reflection floods. Cached responses are not rate limited by
215 | # this setting. Experimental option.
216 | ratelimit: 1000
217 |
218 | # Use this certificate bundle for authenticating connections made to
219 | # outside peers (e.g., auth-zone urls, DNS over TLS connections).
220 | tls-cert-bundle: /etc/ssl/certs/ca-certificates.crt
221 |
222 | # Set the total number of unwanted replies to eep track of in every thread.
223 | # When it reaches the threshold, a defensive action of clearing the rrset
224 | # and message caches is taken, hopefully flushing away any poison.
225 | # Unbound suggests a value of 10 million.
226 | unwanted-reply-threshold: 10000
227 |
228 | # Use 0x20-encoded random bits in the query to foil spoof attempts. This
229 | # perturbs the lowercase and uppercase of query names sent to authority
230 | # servers and checks if the reply still has the correct casing.
231 | # This feature is an experimental implementation of draft dns-0x20.
232 | # Experimental option.
233 | use-caps-for-id: yes
234 |
235 | # Help protect users that rely on this validator for authentication from
236 | # potentially bad data in the additional section. Instruct the validator to
237 | # remove data from the additional section of secure messages that are not
238 | # signed properly. Messages that are insecure, bogus, indeterminate or
239 | # unchecked are not affected.
240 | val-clean-additional: yes
241 |
242 | ###########################################################################
243 | # PERFORMANCE SETTINGS
244 | ###########################################################################
245 | # https://nlnetlabs.nl/documentation/unbound/howto-optimise/
246 | # https://nlnetlabs.nl/news/2019/Feb/05/unbound-1.9.0-released/
247 |
248 | # Number of slabs in the infrastructure cache. Slabs reduce lock contention
249 | # by threads. Must be set to a power of 2.
250 | infra-cache-slabs: 2
251 |
252 | # Number of incoming TCP buffers to allocate per thread. Default
253 | # is 10. If set to 0, or if do-tcp is "no", no TCP queries from
254 | # clients are accepted. For larger installations increasing this
255 | # value is a good idea.
256 | incoming-num-tcp: 10
257 |
258 | # Number of slabs in the key cache. Slabs reduce lock contention by
259 | # threads. Must be set to a power of 2. Setting (close) to the number
260 | # of cpus is a reasonable guess.
261 | key-cache-slabs: 2
262 |
263 | # Number of bytes size of the message cache.
264 | # Unbound recommendation is to Use roughly twice as much rrset cache memory
265 | # as you use msg cache memory.
266 | msg-cache-size: 191406762
267 |
268 | # Number of slabs in the message cache. Slabs reduce lock contention by
269 | # threads. Must be set to a power of 2. Setting (close) to the number of
270 | # cpus is a reasonable guess.
271 | msg-cache-slabs: 2
272 |
273 | # The number of queries that every thread will service simultaneously. If
274 | # more queries arrive that need servicing, and no queries can be jostled
275 | # out (see jostle-timeout), then the queries are dropped.
276 | # This is best set at half the number of the outgoing-range.
277 | # This Unbound instance was compiled with libevent so it can efficiently
278 | # use more than 1024 file descriptors.
279 | num-queries-per-thread: 4096
280 |
281 | # The number of threads to create to serve clients.
282 | # This is set dynamically at run time to effectively use available CPUs
283 | # resources
284 | num-threads: 1
285 |
286 | # Number of ports to open. This number of file descriptors can be opened
287 | # per thread.
288 | # This Unbound instance was compiled with libevent so it can efficiently
289 | # use more than 1024 file descriptors.
290 | outgoing-range: 8192
291 |
292 | # Number of bytes size of the RRset cache.
293 | # Use roughly twice as much rrset cache memory as msg cache memory
294 | rrset-cache-size: 382813525
295 |
296 | # Number of slabs in the RRset cache. Slabs reduce lock contention by
297 | # threads. Must be set to a power of 2.
298 | rrset-cache-slabs: 2
299 |
300 | # Do no insert authority/additional sections into response messages when
301 | # those sections are not required. This reduces response size
302 | # significantly, and may avoid TCP fallback for some responses. This may
303 | # cause a slight speedup.
304 | minimal-responses: yes
305 |
306 | # # Fetch the DNSKEYs earlier in the validation process, when a DS record
307 | # is encountered. This lowers the latency of requests at the expense of
308 | # little more CPU usage.
309 | prefetch: yes
310 |
311 | # Fetch the DNSKEYs earlier in the validation process, when a DS record is
312 | # encountered. This lowers the latency of requests at the expense of little
313 | # more CPU usage.
314 | prefetch-key: yes
315 |
316 | # Have unbound attempt to serve old responses from cache with a TTL of 0 in
317 | # the response without waiting for the actual resolution to finish. The
318 | # actual resolution answer ends up in the cache later on.
319 | serve-expired: yes
320 |
321 | # UDP queries that have waited in the socket buffer for a long time can be
322 | # dropped. The time is set in seconds, 3 could be a good value to ignore old
323 | # queries that likely the client does not need a reply for any more. This
324 | # could happen if the host has not been able to service the queries for a
325 | # while, i.e. Unbound is not running, and then is enabled again. It uses
326 | # timestamp socket options.
327 | sock-queue-timeout: 3
328 |
329 | # Open dedicated listening sockets for incoming queries for each thread and
330 | # try to set the SO_REUSEPORT socket option on each socket. May distribute
331 | # incoming queries to threads more evenly.
332 | so-reuseport: yes
333 |
334 | ###########################################################################
335 | # LOCAL ZONE
336 | ###########################################################################
337 |
338 | # Include file for local-data and local-data-ptr
339 | # include: /opt/unbound/etc/unbound/a-records.conf
340 | # include: /opt/unbound/etc/unbound/srv-records.conf
341 |
342 | ###########################################################################
343 | # FORWARD ZONE
344 | ###########################################################################
345 |
346 | # include: /opt/unbound/etc/unbound/forward-records.conf
347 |
348 | ###########################################################################
349 | # WILDCARD INCLUDE
350 | ###########################################################################
351 | # include: "/opt/unbound/etc/unbound/*.conf"
352 | include: "/opt/unbound/etc/unbound/conf.d/*.conf"
353 |
354 | remote-control:
355 | control-enable: no
356 |
--------------------------------------------------------------------------------
/images/gluetun_vpn_randomizer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ingestbot/randomizer/495eed2ca66333792d31ac27d9cd391b809aa5dc/images/gluetun_vpn_randomizer.png
--------------------------------------------------------------------------------
/images/random_image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ingestbot/randomizer/495eed2ca66333792d31ac27d9cd391b809aa5dc/images/random_image.png
--------------------------------------------------------------------------------
/randomizer:
--------------------------------------------------------------------------------
1 | #!/usr/bin/python3
2 |
3 | ##
4 | ## Python Third-Party Modules
5 | ##
6 | ## - mqtt:
7 | ## - https://pypi.org/project/paho-mqtt
8 | ## - python3-paho-mqtt, pip install paho-mqtt
9 | ## - influx:
10 | ## - https://pypi.org/project/influxdb
11 | ## - python3-influxdb, pip install influxdb
12 | ## - docker:
13 | ## - https://docker-py.readthedocs.io
14 | ## - python3-docker, pip install docker
15 | ## - requests:
16 | ## - https://requests.readthedocs.io
17 | ## - python3-requests, pip install requests
18 | ##
19 | ## Docker Compose V2 - https://docs.docker.com/compose/migrate/
20 | ##
21 | ## 'docker compose version'
22 | ## Docker Compose version v2.*
23 | ##
24 | ## Gluetun VPN client - https://github.com/qdm12/gluetun
25 | ##
26 |
27 | import argparse
28 | import docker
29 | import glob
30 | import json
31 | import logging
32 | import logging.handlers
33 | import os
34 | import pymysql
35 | import random
36 | import re
37 | import requests
38 | import socket
39 | import subprocess
40 | import sys
41 | import threading
42 | import time
43 | import yaml
44 |
45 | import paho.mqtt.client as mqtt_client
46 |
47 | from datetime import datetime, timezone
48 | from influxdb import InfluxDBClient
49 | from requests import ConnectionError
50 |
51 |
52 | parser = argparse.ArgumentParser()
53 | parser.add_argument(
54 | "--config", metavar="-c", required=True, type=str, help="config file"
55 | )
56 | args = parser.parse_args()
57 |
58 | config = yaml.safe_load(open(args.config))
59 |
60 | debug = config["debug"]
61 |
62 | instance_name = config["instance_name"]
63 |
64 | if config["is_docker_container_multi"]:
65 | hostname_fqdn = socket.gethostname()
66 | else:
67 | hostname_fqdn = config["hostname_fqdn"]
68 |
69 | gluetun_proxy_port = config["gluetun_proxy_port"]
70 |
71 | shuffle_vpn_provider = config["shuffle_vpn_provider"]
72 |
73 | mysql_enable = config["mysql_enable"]
74 |
75 | mqtt_enable = config["mqtt_enable"]
76 | mqtt_broker = config["mqtt_broker"]
77 | mqtt_topic_pub = config["mqtt_topic_pub"] + instance_name
78 | mqtt_topic_sub = config["mqtt_topic_sub"]
79 |
80 | ttime_mqtt_pub = config["ttime_mqtt_pub"]
81 | ttime_loop_sleep = ttime_mqtt_pub - 60
82 |
83 | influxdb_enable = config["influxdb_enable"]
84 | influxdb_host = config["influxdb_host"]
85 | influxdb_port = config["influxdb_port"]
86 | influxdb_user = config["influxdb_user"]
87 | influxdb_password = config["influxdb_password"]
88 | influxdb_database = config["influxdb_database"]
89 | influxdb_measurement = config["influxdb_measurement"]
90 |
91 | container_name = config["container_name"]
92 | gluetun_healthcheck_port = config["gluetun_healthcheck_port"]
93 | gluetun_control_port = config["gluetun_control_port"]
94 |
95 | if config["is_docker_container"]:
96 | gluetun_path = "/app/gluetun"
97 | else:
98 | gluetun_path = config["gluetun_path"]
99 |
100 | gluetun_servers_json = os.path.join(gluetun_path, "gluetun_config", "servers.json")
101 |
102 | start_delay = config["start_delay"]
103 |
104 | ttl_min = config["ttl_min"]
105 | ttl_max = config["ttl_max"]
106 | ttl_min_debug = config["ttl_min_debug"]
107 | ttl_max_debug = config["ttl_max_debug"]
108 |
109 | checkup_and_pub_interval = ttime_mqtt_pub
110 |
111 | #
112 | # Docker
113 | #
114 | compose_file_path = os.path.join(gluetun_path, "docker-compose.yml")
115 | client = docker.from_env()
116 | #
117 | # Logging (and debugging)
118 | #
119 | # todo: allow logging to one or both of syslog/stdout
120 | # https://www.delftstack.com/howto/python/python-logging-to-file-and-console/
121 | # handler = [logging.handlers.SysLogHandler(address='/dev/log'), logging.StreamHandler(sys.stdout)]
122 | #
123 | l = logging.getLogger("randomizer")
124 |
125 | level = logging.DEBUG if debug else logging.INFO
126 | l.setLevel(level)
127 |
128 | if config["is_docker_container"]:
129 | handler = logging.StreamHandler(sys.stdout)
130 | formatter = logging.Formatter(
131 | "%(asctime)s %(name)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
132 | )
133 | else:
134 | handler = logging.handlers.SysLogHandler(address="/dev/log")
135 | formatter = logging.Formatter(
136 | "%(name)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
137 | )
138 |
139 | handler.setFormatter(formatter)
140 | l.addHandler(handler)
141 |
142 |
143 | def send_request(method=None, url=None, data=None, headers=None, proxy=None):
144 | #
145 | # https://stackoverflow.com/questions/44448625/how-to-handle-a-connection-error-gracefully-in-requests
146 | #
147 | r = None
148 | connection_timeout = 5
149 | start_time = time.time()
150 |
151 | proxies = (
152 | {
153 | "http": proxy,
154 | "https": proxy,
155 | }
156 | if proxy
157 | else None
158 | )
159 |
160 | while True:
161 | try:
162 | if method == "get":
163 | r = requests.get(url, proxies=proxies)
164 | if method == "put":
165 | r = requests.put(url, data=data, headers=headers, proxies=proxies)
166 | break
167 | except ConnectionError as e:
168 | if time.time() > start_time + connection_timeout:
169 | l.debug(f"All request attempts have failed.")
170 | l.debug(f"{e}")
171 | response = "NOTOK"
172 | break
173 | else:
174 | l.debug(f"Request timed out. Trying again.")
175 | time.sleep(1)
176 | if r and r.status_code == 200:
177 | if "application/json" in r.headers.get("Content-Type", ""):
178 | json_response = r.json()
179 | response = (
180 | json_response.get("ip")
181 | or json_response.get("ip_address")
182 | or json_response.get("ip_addr")
183 | )
184 | else:
185 | response = "OK"
186 | else:
187 | response = "NOTOK"
188 |
189 | return response
190 |
191 |
192 | def influxdb_pub(vpn_gw_name, ipaddr_public):
193 | l.info(f"Publishing details to influxdb...")
194 | time_now = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
195 | ttime_now = time.time()
196 | d = {}
197 | d["measurement"] = influxdb_measurement
198 | d["time"] = time_now
199 | d["tags"] = {"vpn_tracker": "vpn_tracker"}
200 | vpn_provider = vpn().get_provider_name()
201 | d["fields"] = {
202 | "hostname": instance_name,
203 | "vpn_provider": vpn_provider,
204 | "gateway": vpn_gw_name,
205 | "ipaddr_public": ipaddr_public,
206 | }
207 | try:
208 | InfluxDBClient(
209 | influxdb_host,
210 | influxdb_port,
211 | influxdb_user,
212 | influxdb_password,
213 | influxdb_database,
214 | ssl=True,
215 | verify_ssl=True,
216 | ).write_points([d])
217 | except:
218 | l.info(f"Publishing details to influxdb failed but continuing...")
219 | pass
220 |
221 |
222 | class Docker:
223 |
224 | def __init__(self):
225 | pass
226 |
227 | def docker_get_ready_state(self):
228 | ##
229 | ## Checking for valid ip is an additional state on top of passing healthcheck
230 | ##
231 | i = 15
232 | while i >= 1:
233 | my_ipaddr = vpn().get_public_ip()
234 | try:
235 | socket.inet_aton(my_ipaddr)
236 | return True
237 | except socket.error as e:
238 | l.info(f"Waiting for valid ip address...")
239 | l.debug(f"error: {e}")
240 | time.sleep(1)
241 | i -= 1
242 | return False
243 |
244 | def docker_container_start(self):
245 | if not os.path.exists(compose_file_path):
246 | l.info(f"No Docker compose file ({compose_file_path}). Exiting.")
247 | os._exit(1)
248 | l.info(f"Starting docker container...")
249 | try:
250 | subprocess.call(["docker", "compose", "-f", compose_file_path, "up", "-d"])
251 | time.sleep(5)
252 | except ValueError as e:
253 | l.debug(f"{e}")
254 | l.info(f"Start of docker container failed.")
255 | return False
256 |
257 | def docker_container_get(self):
258 | connection_timeout = 5
259 | start_time = time.time()
260 | while True:
261 | try:
262 | container = client.containers.get(container_name)
263 | break
264 | except docker.errors.NotFound:
265 | if time.time() > start_time + connection_timeout:
266 | l.info(f"Unable to get docker container details.")
267 | os._exit(1)
268 | else:
269 | self.docker_container_start()
270 | time.sleep(1)
271 | if container:
272 | return container
273 | else:
274 | l.info(f"Unable to get docker container details.")
275 | os._exit(1)
276 |
277 | def docker_checkup(self):
278 | ##
279 | ## If a healthcheck passes, we can just quickly return True. A healthcheck indicates
280 | ## all supporting mechanisms are functioning as needed.
281 | ##
282 | if vpn().get_healthcheck() == "OK":
283 | l.info(f"Healthcheck has passed. Continuing...")
284 | return True
285 | else:
286 | cstate = False
287 |
288 | counter = 15
289 | limit = counter
290 | while cstate is False:
291 | for x in range(counter):
292 | try:
293 |
294 | container = self.docker_container_get()
295 |
296 | if (
297 | container.attrs["State"]["Running"]
298 | and container.attrs["State"]["Health"]["Status"]
299 | ):
300 | running = container.attrs["State"]["Running"]
301 | healthy = container.attrs["State"]["Health"]["Status"]
302 | l.debug(f"container_state_running: {running}")
303 | l.debug(f"container_state_healthy: {healthy}")
304 | else:
305 | l.info(f"Container is not running, restarting...")
306 | self.docker_container_start()
307 |
308 | if x == limit - 1:
309 |
310 | # Make one last ditch attempt to start this. If this doesn't work, we
311 | # will just bail on exit(1)
312 |
313 | # l.info(f'Attempting to start the container before exiting...')
314 | # self.docker_container_start()
315 |
316 | l.info(f"Cannot start container. Exiting.")
317 | os._exit(1)
318 | if running is True and healthy == "healthy":
319 | l.info(f"Container is running and healthy.")
320 | cstate = True
321 | break
322 |
323 | if running is True and (
324 | (healthy == "starting") or (healthy == "unhealthy")
325 | ):
326 |
327 | if counter < 10:
328 | self.docker_container_start()
329 |
330 | # There could be a persistent unhealthy status and the counts against that status
331 | # should result in a restart (or something).
332 |
333 | # See 'x == limit' above for possible fix
334 |
335 | l.debug(f"Counter is: {counter}")
336 | l.info(f"Waiting for container status...")
337 | time.sleep(5)
338 | if running is False:
339 | l.info(f"Container is not running, restarting...")
340 | self.docker_container_start()
341 |
342 | counter = counter - 1
343 | except ValueError as e:
344 | l.debug(f"{e}")
345 | os._exit(1)
346 |
347 | return True
348 |
349 | def docker_checkup_and_pub(self):
350 | try:
351 | if self.docker_checkup():
352 | if vpn().get_gw_new():
353 | my_ipaddr = vpn().get_public_ip()
354 | else:
355 | my_ipaddr = vpn().get_public_ip()
356 | try:
357 | socket.inet_aton(my_ipaddr)
358 | except socket.error as e:
359 | l.debug(f"error: {e}")
360 |
361 | vpn_gw_name = vpn().get_gw_name(vpn().get_gw_ip())
362 |
363 | if mqtt_enable:
364 | mqtt().mqtt_pub(vpn_gw_name, my_ipaddr)
365 | if mysql_enable:
366 | mysql().mysql_pub(vpn_gw_name, my_ipaddr)
367 | if influxdb_enable:
368 | influxdb_pub(vpn_gw_name, my_ipaddr)
369 |
370 | else:
371 | l.info("docker_checkup fail")
372 | except ValueError as e:
373 | l.debug(f"error: {e}")
374 | return False
375 |
376 |
377 | class vpn:
378 | def __init__(self):
379 | pass
380 |
381 | def get_healthcheck(self):
382 | l.debug(f"Running gluetun healthcheck against: {hostname_fqdn}")
383 | health = send_request(
384 | method="get", url=f"http://{hostname_fqdn}:" + str(gluetun_healthcheck_port)
385 | )
386 | return health
387 |
388 | def restart_vpn(self):
389 | ##
390 | ## gluetun control server: https://github.com/qdm12/gluetun-wiki/blob/main/setup/advanced/control-server.md
391 | ## gluetun healthcheck: https://github.com/qdm12/gluetun-wiki/blob/main/faq/healthcheck.md
392 | ##
393 | ## If only a stop were sent, the connection would auto-heal within 6 seconds. But sending
394 | ## a 'stopped', and immediately thereafter a 'running', we can save some precious time in
395 | ## returning to service.
396 | ##
397 | ## Re: alternating between endpoint /v1/openvpn, /v1/updater, and /v1/vpn see: https://github.com/qdm12/gluetun/issues/2277
398 | ##
399 | payload = {"status": "stopped"}
400 | headers = {"Content-Type": "application/json"}
401 | send_request(
402 | method="put",
403 | url=f"http://{hostname_fqdn}:"
404 | + str(gluetun_control_port)
405 | + "/v1/vpn/status",
406 | data=json.dumps(payload),
407 | headers=headers,
408 | )
409 |
410 | payload = {"status": "running"}
411 | send_request(
412 | method="put",
413 | url=f"http://{hostname_fqdn}:"
414 | + str(gluetun_control_port)
415 | + "/v1/vpn/status",
416 | data=json.dumps(payload),
417 | headers=headers,
418 | )
419 |
420 | def get_public_ip(self):
421 | urllist_ipaddr = [
422 | "https://api.ivpn.net/v4/geo-lookup",
423 | "https://ipinfo.io/json",
424 | "https://api.ipify.org/?format=json",
425 | "https://ipconfig.io/json",
426 | "https://ifconfig.io/all.json",
427 | "https://ifconfig.me/all.json",
428 | ]
429 | urllist_rand = random.sample(urllist_ipaddr, len(urllist_ipaddr))
430 |
431 | ipaddr_public = send_request(
432 | method="get",
433 | url=urllist_rand[0],
434 | proxy=f"http://{hostname_fqdn}:{gluetun_proxy_port}",
435 | )
436 |
437 | l.debug(f"Public ipaddr: {ipaddr_public}")
438 |
439 | return ipaddr_public
440 |
441 | def get_gw_name(self, ipaddr):
442 | ##
443 | # Using the servers.json file provided by gluetun, this matches
444 | # the gateway ip to it's associated hostname.
445 | ##
446 | try:
447 | with open(gluetun_servers_json) as f:
448 | jblob = f.read()
449 | except ValueError as e:
450 | l.debug(f"{e}")
451 | os._exit(1)
452 | js = json.loads(jblob)
453 | vpn_provider = self.get_provider_name()
454 | if vpn_provider in js:
455 | servers = js[vpn_provider]["servers"]
456 | else:
457 | l.debug(f"WARNING: servers is null")
458 | servers = []
459 |
460 | #
461 | # This is an example of redefining servers with specified regions to exclude.
462 | #
463 | # excluded_regions = {'TX', 'FL', 'GA'}
464 | # servers = [ ep for ep in servers if ep.get('region') not in excluded_regions ]
465 | #
466 |
467 | for ep in servers:
468 | for ip in ep["ips"]:
469 | if ip == ipaddr:
470 | return ep["hostname"]
471 |
472 | def get_gw_ip(self):
473 | ##
474 | # This gets the ip address of the gateway we're connected to. Currently this is the
475 | # only means of doing this with gluetun.
476 | ##
477 | try:
478 | container = Docker().docker_container_get()
479 | container_logs = container.logs()
480 | except ValueError as e:
481 | l.debug(f"{e}")
482 | os._exit(1)
483 | matches = []
484 | for line in container_logs.decode().splitlines():
485 | if "Connecting to" in line:
486 | matches.append(line)
487 | my_match = matches[-1]
488 | ipaddr = re.findall(r"[0-9]+(?:\.[0-9]+){3}", my_match)
489 | ipaddr = ipaddr[0]
490 |
491 | return ipaddr
492 |
493 | def randomizer(self):
494 | if vpn().get_healthcheck() == "OK":
495 | l.debug(
496 | f"Healthcheck has passed. Restarting VPN via gluetun control server"
497 | )
498 | vpn().restart_vpn()
499 | time.sleep(3)
500 | l.debug(f"VPN has been restarted. VPN IP address renewed.")
501 | if Docker().docker_get_ready_state():
502 | l.debug(f"Ready state is OK. Proceeding to Docker checkup...")
503 | Docker().docker_checkup_and_pub()
504 | else:
505 | l.debug(f"Healthcheck has NOT passed. Trying a Docker checkup...")
506 | Docker().docker_checkup_and_pub()
507 |
508 | def get_provider_name(self):
509 | container = Docker().docker_container_get()
510 | for item in container.attrs["Config"]["Env"]:
511 | if re.match("VPN_SERVICE_PROVIDER", item):
512 | provider_name = item.split("=")[1]
513 |
514 | return provider_name
515 |
516 | def shuffle_provider(self):
517 | ##
518 | ## https://stackoverflow.com/questions/6648493/how-to-open-a-file-for-both-reading-and-writing
519 | ## https://stackoverflow.com/questions/34575336/should-we-open-the-file-more-than-once-to-read-it
520 | ##
521 | l.info(f"Shuffling provider...")
522 | provider_files = []
523 | provider_name_current = self.get_provider_name()
524 | env_file = os.path.join(gluetun_path, ".env")
525 | env_glob = os.path.join(gluetun_path, ".env.*")
526 | for f in glob.glob(env_glob):
527 | provider_files.append(f)
528 | provider_newfile = open(random.choice(provider_files))
529 | with open(env_file, "w+") as env:
530 | env.write(provider_newfile.read())
531 | env.seek(0)
532 | lines = env.read().splitlines()
533 | for line in lines:
534 | if "VPN_SERVICE_PROVIDER" in line:
535 | provider_name_new = line.split("=")[1]
536 | l.info(f"Current provider is: {provider_name_current}")
537 | l.info(f"New provider is: {provider_name_new}")
538 | #
539 | # container will reload changes to .env with 'docker compose up -d'
540 | # https://stackoverflow.com/questions/42149529/how-to-reload-environment-variables-in-docker-compose-container-with-minimum-dow
541 | #
542 | if provider_name_current == provider_name_new:
543 | l.debug(f"VPN providers are the same. No restart neeeded.")
544 | pass
545 | else:
546 | l.info(f"Provider has changed. Reloading container .env")
547 | Docker().docker_container_start()
548 |
549 | Docker().docker_checkup_and_pub()
550 |
551 | def get_gw_other(self):
552 |
553 | if mqtt_enable or mysql_enable:
554 | if mqtt_enable:
555 | mdict = mqtt().mqtt_get_messages()
556 | if mysql_enable:
557 | mdict = mysql().mysql_get_messages()
558 | mdict = {item["instance_name"]: item for item in mdict}
559 |
560 | l.debug(f"mdict: {mdict}")
561 |
562 | gw_other = []
563 |
564 | for key, val in mdict.items():
565 | if key == instance_name:
566 | continue
567 | elif val.get("vpn_gateway"):
568 | gw_other.append(val["vpn_gateway"])
569 |
570 | l.debug(f"Current gateway list shows: {gw_other}")
571 | return gw_other
572 | else:
573 | return None
574 |
575 | def get_gw_new(self):
576 | if mqtt_enable or mysql_enable:
577 | gw_other = self.get_gw_other()
578 | while True:
579 | gw_current = self.get_gw_name(self.get_gw_ip())
580 | if gw_current not in gw_other:
581 | break
582 | else:
583 | vpn().restart_vpn()
584 | time.sleep(15)
585 |
586 | l.debug(f"{gw_current} is not in this list: {gw_other}")
587 | return gw_current
588 | else:
589 | return None
590 |
591 |
592 | class mqtt:
593 | def __init__(self):
594 | self.mqtt_broker = mqtt_broker
595 | self.topic_pub = mqtt_topic_pub
596 | self.topic_sub = mqtt_topic_sub
597 |
598 | def mqtt_pub(self, vpn_gateway, vpn_ipaddr):
599 | #
600 | # This takes two strings and publishes to mqtt with datetime.
601 | #
602 | l.info(f"Publishing details to mqtt...")
603 | client = mqtt_client.Client(mqtt_client.CallbackAPIVersion.VERSION2)
604 | client.connect(self.mqtt_broker)
605 | now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
606 | vpn_provider = vpn().get_provider_name()
607 | status = dict(
608 | datetime=now,
609 | vpn_provider=vpn_provider,
610 | vpn_gateway=vpn_gateway,
611 | vpn_ipaddr=vpn_ipaddr,
612 | )
613 | payload = json.dumps(status)
614 | client.publish(self.topic_pub, payload, retain=True)
615 | client.disconnect()
616 |
617 | def mqtt_get_messages(self):
618 | #
619 | # Returns a dictionary containing all mqtt messages 'proxies/#' with hostname as key and associated details as vals
620 | #
621 | mdict = {}
622 |
623 | def on_connect(client, userdata, flags, rc, properties=None):
624 | client.subscribe(self.topic_sub)
625 |
626 | def on_message(client, userdata, msg):
627 | nonlocal mdict
628 | messages = msg.payload.decode().splitlines()
629 | for m in messages:
630 | h = msg.topic.split("/")[-1]
631 | m = json.loads(m)
632 | ddict = {h: m}
633 | mdict.update(ddict)
634 | return mdict
635 |
636 | client = mqtt_client.Client(mqtt_client.CallbackAPIVersion.VERSION2)
637 | client.connect(self.mqtt_broker)
638 | client.loop_start()
639 | client.on_connect = on_connect
640 | client.on_message = on_message
641 | time.sleep(0.5)
642 | client.disconnect()
643 | client.loop_stop()
644 |
645 | return mdict
646 |
647 |
648 | class mysql:
649 | def __init__(self):
650 | self.db_config = {
651 | "host": config["mysql_host"],
652 | "user": config["mysql_user"],
653 | "password": config["mysql_password"],
654 | "database": config["mysql_database"],
655 | }
656 | self.connection = None
657 | self.cursor = None
658 |
659 | def mysql_connect(self):
660 | try:
661 | self.connection = pymysql.connect(**self.db_config)
662 | self.cursor = self.connection.cursor()
663 | except Exception as e:
664 | print(f"mysql: Error connecting to mysql: {e}")
665 |
666 | def mysql_close(self):
667 | if self.cursor:
668 | self.cursor.close()
669 | if self.connection:
670 | self.connection.close()
671 |
672 | def mysql_pub(self, vpn_gateway, vpn_ipaddr):
673 |
674 | l.info(f"Publishing details to mysql...")
675 | self.mysql_connect()
676 |
677 | if not self.connection:
678 | l.info(f"mysql: Error connecting to mysql")
679 | return
680 |
681 | now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
682 | vpn_provider = vpn().get_provider_name()
683 |
684 | insert_query = """
685 | INSERT INTO vpn_status (datetime, instance_name, vpn_provider, vpn_gateway, vpn_ipaddr)
686 | VALUES (%s, %s, %s, %s, %s)
687 | """
688 | status = (
689 | now,
690 | instance_name,
691 | vpn_provider,
692 | vpn_gateway,
693 | vpn_ipaddr,
694 | )
695 |
696 | try:
697 | self.cursor.execute(insert_query, status)
698 | self.connection.commit()
699 | except Exception as e:
700 | l.info(f"mysql: Error inserting record: {e}")
701 |
702 | self.mysql_close()
703 |
704 | def mysql_get_messages(self):
705 |
706 | self.mysql_connect()
707 |
708 | if not self.connection:
709 | l.info(f"mysql: Error connecting to mysql")
710 | return []
711 |
712 | query = """
713 | SELECT vpn_provider, vpn_gateway, vpn_ipaddr, instance_name FROM vpn_status
714 | WHERE id IN (
715 | SELECT MAX(id) FROM vpn_status
716 | GROUP BY instance_name
717 | )
718 | """
719 |
720 | try:
721 | self.cursor.execute(query)
722 | results = self.cursor.fetchall()
723 | l.debug(f"mysql_get_messages result: {results}")
724 | column_names = [desc[0] for desc in self.cursor.description]
725 | messages = [dict(zip(column_names, row)) for row in results]
726 | l.debug(f"mysql_get_messages messages: {messages}")
727 |
728 | return messages
729 |
730 | except Exception as e:
731 | l.info(f"mysql: Error retrieving records: {e}")
732 | return json.dumps([])
733 |
734 | self.mysql_close()
735 |
736 |
737 | class TimerThread(threading.Thread):
738 | def __init__(self, interval, function):
739 | threading.Thread.__init__(self)
740 | self.interval = interval
741 | self.function = function
742 | self.daemon = True
743 |
744 | def run(self):
745 | while True:
746 | l.info(f"{str(self.function)} is sleeping for: {self.interval}")
747 | time.sleep(self.interval)
748 | self.function()
749 | if self.function == v.randomizer:
750 | self.interval = random.randint(ttl_min, ttl_max)
751 | if self.function == v.shuffle_provider:
752 | self.interval = random.randint(ttl_min, ttl_max)
753 |
754 |
755 | if __name__ == "__main__":
756 | l.info(f"Starting randomizer...")
757 | d = Docker()
758 | v = vpn()
759 | timer1 = TimerThread(start_delay, v.randomizer)
760 | timer1.start()
761 | timer2 = TimerThread(checkup_and_pub_interval, d.docker_checkup_and_pub)
762 | timer2.start()
763 | if shuffle_vpn_provider:
764 | timer3 = TimerThread(random.randint(ttl_min, ttl_max), v.shuffle_provider)
765 | timer3.start()
766 |
767 | while True:
768 | time.sleep(ttime_loop_sleep)
769 |
--------------------------------------------------------------------------------
/randomizer.yml:
--------------------------------------------------------------------------------
1 | #
2 | # debug increases verbosity of logging and frequency of rotation (ie, ttl_min_debug, ttl_max_debug)
3 | #
4 | debug: True
5 | #
6 | # Is this a docker container?
7 | is_docker_container: True
8 | #
9 | # Not tested: If is_docker_container_multi is True, hostname_fqdn will be defined programmatically, overriding the
10 | # configured value below
11 | is_docker_container_multi: False
12 |
13 | # hostname_fqdn must be a resolvable, functional dns name which will be used
14 | # in http requests against the gluetun service
15 | hostname_fqdn: "haha90.sfio.win"
16 |
17 | # instance_name is used for superficial labeling against a shared database
18 | instance_name: "haha90"
19 |
20 | #
21 | # Rotate multiple VPN providers
22 | #
23 | shuffle_vpn_provider: True
24 | #
25 | # mysql - when mysql_enable is False, the consecutive mysql_* variables are not used
26 | #
27 | mysql_enable: False
28 | mysql_host: "mysql.example.com"
29 | mysql_user: "randomizer"
30 | mysql_password: "mysql123"
31 | mysql_database: "randomizer"
32 | #
33 | # MQTT - when mqtt_enable is False, the consecutive mqtt_* variables are not used
34 | #
35 | mqtt_enable: False
36 | mqtt_broker: "mqtt.example.com"
37 | mqtt_topic_pub: "proxies/"
38 | mqtt_topic_sub: "proxies/#"
39 | #
40 | # InfluxDB - when influxdb_enable is False, the consecutive influxdb_* variables are not used
41 | #
42 | influxdb_enable: False
43 | influxdb_host: "influxdb.example.com"
44 | influxdb_port: 8086
45 | influxdb_user: "influx_user"
46 | influxdb_password: "pa$$w0rd"
47 | influxdb_database: "randomizerdb"
48 | influxdb_measurement: "vpn_tracker"
49 | #
50 | # Gluetun
51 | #
52 | container_name: "gluetun"
53 | gluetun_healthcheck_port: 6666
54 | gluetun_control_port: 8000
55 | gluetun_proxy_port: 8008
56 | gluetun_path: "/usr/local/docker/gluetun"
57 | #
58 | # Control parameters for randomizer
59 | #
60 | # delay the start of this thing specified in seconds
61 | start_delay: 5
62 | #
63 | # min and max range for random execution (in seconds)
64 | #
65 | # ttl_min: 7200
66 | # ttl_max: 14400
67 |
68 | ttl_min: 30
69 | ttl_max: 120
70 |
71 | ttl_min_debug: 1200
72 | ttl_max_debug: 2700
73 | #
74 | # how often to publish details to mqtt (in seconds)
75 | #
76 | ttime_mqtt_pub: 900
77 | checkup_and_pub_interval: ttime_mqtt_pub
78 |
--------------------------------------------------------------------------------
/requirements-ci.txt:
--------------------------------------------------------------------------------
1 | requests
2 | pytest
3 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | docker
2 | pyyaml
3 | paho-mqtt
4 | influxdb
5 | argparse
6 | pymysql
7 |
--------------------------------------------------------------------------------