├── .github └── workflows │ ├── node.yml │ └── python.yml ├── .gitignore ├── .husky ├── post-merge └── pre-commit ├── .lintstagedrc.yml ├── .load-env ├── .npmrc ├── .nvmrc ├── .prettierignore ├── .prettierrc.yml ├── .python-version ├── .terraform-version ├── INIT.md ├── Makefile ├── README.md ├── conftest.py ├── fixtures ├── .env ├── .gitignore ├── README.md └── templates │ ├── .gitignore │ ├── foo.yml │ ├── foo.yml.j2 │ └── nested │ ├── bar.yml │ └── bar.yml.j2 ├── package-lock.json ├── package.json ├── poetry.lock ├── poetry.toml ├── pyproject.toml ├── pytest.ini ├── renovate.json ├── scripts ├── check-git.sh ├── render_all.py ├── render_all_test.py ├── restore-all.sh └── wait-for ├── services ├── adblock │ ├── README.md │ └── docker-compose.adguard.yml ├── backup │ ├── README.md │ ├── docker-compose.restic.yml │ └── excludes.txt.j2 ├── blog │ ├── README.md │ ├── config.json │ └── docker-compose.ghost.yml ├── cache │ ├── README.md │ ├── docker-compose.redis.yml │ ├── dump.sh │ └── restore.sh ├── change-detection │ ├── README.md │ └── docker-compose.change-detection.yml ├── checkin │ ├── README.md │ ├── config.json5.j2 │ └── docker-compose.hoyolab-auto.yml ├── database │ ├── README.md │ ├── docker-compose.mysql.yml │ ├── dump.sh │ ├── init │ │ ├── 01-janejeon.dev.sql │ │ ├── 02-authelia.sql │ │ └── 03-uptime-kuma.sql │ ├── my.cnf │ └── restore.sh ├── debug │ ├── README.md │ └── docker-compose.whoami.yml ├── docker-gc │ ├── README.md │ └── docker-compose.gc.yml ├── docker-management │ ├── README.md │ └── docker-compose.portainer.yml ├── monitoring │ ├── README.md │ └── docker-compose.dozzle.yml ├── notification │ ├── README.md │ ├── alerts.cfg.j2 │ ├── docker-compose.apprise.yml │ └── notifications.cfg.j2 ├── reverse-proxy │ ├── README.md │ ├── docker-compose.traefik.yml │ ├── traefik.dynamic.yml.j2 │ └── traefik.static.yml.j2 ├── sso-proxy │ ├── README.md │ ├── config.yml.j2 │ └── docker-compose.authelia.yml ├── sso │ ├── README.md │ ├── config.toml │ └── docker-compose.lldap.yml └── status-page │ ├── README.md │ └── docker-compose.uptime-kuma.yml └── src ├── README.md ├── get_cloudflare_ips.py ├── get_cloudflare_ips_mock.py ├── get_cloudflare_ips_test.py ├── log_invocation.py ├── requests_session.py ├── requests_session_test.py ├── set_logging_defaults.py └── templating ├── get_env_context.py ├── get_env_context_test.py ├── get_rendered_name.py ├── get_rendered_name_test.py ├── get_template_paths.py ├── get_template_paths_test.py ├── render_template.py └── render_template_test.py /.github/workflows/node.yml: -------------------------------------------------------------------------------- 1 | name: Node CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 17 | with: 18 | submodules: true # need to checkout submodules for scanning dependencies 19 | - uses: actions/setup-node@v4 20 | with: 21 | node-version-file: .nvmrc 22 | cache: npm 23 | - name: Install Dependencies 24 | run: npm ci --prefer-offline 25 | - name: Lint 26 | run: npm run lint 27 | - name: Scan Ghost Themes for Compatibility 28 | run: npm run scan 29 | -------------------------------------------------------------------------------- /.github/workflows/python.yml: -------------------------------------------------------------------------------- 1 | name: Python CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | branches: 9 | - master 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 17 | - name: Install Poetry 18 | run: pipx install poetry 19 | - name: Setup Python 20 | uses: actions/setup-python@v5 # it will resolve from the .python-version file 21 | with: 22 | cache: poetry # it will automatically detect poetry config and cache .venv 23 | # https://github.com/actions/setup-python/blob/main/docs/advanced-usage.md#caching-packages 24 | - run: poetry install 25 | - name: Lint 26 | run: make lint-py 27 | - name: Test 28 | run: make test 29 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Project-specific ignores 2 | **/*.env 3 | **/*.userfile 4 | **/*.pem 5 | 6 | # Logs 7 | logs 8 | *.log 9 | npm-debug.log* 10 | yarn-debug.log* 11 | yarn-error.log* 12 | lerna-debug.log* 13 | 14 | # Node.js ignores 15 | ## Diagnostic reports (https://nodejs.org/api/report.html) 16 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 17 | 18 | ## Runtime data 19 | pids 20 | *.pid 21 | *.seed 22 | *.pid.lock 23 | 24 | ## Directory for instrumented libs generated by jscoverage/JSCover 25 | lib-cov 26 | 27 | ## Coverage directory used by tools like istanbul 28 | coverage 29 | *.lcov 30 | 31 | ## nyc test coverage 32 | .nyc_output 33 | 34 | ## Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) 35 | .grunt 36 | 37 | ## Bower dependency directory (https://bower.io/) 38 | bower_components 39 | 40 | ## node-waf configuration 41 | .lock-wscript 42 | 43 | ## Compiled binary addons (https://nodejs.org/api/addons.html) 44 | build/Release 45 | 46 | ## Dependency directories 47 | node_modules/ 48 | jspm_packages/ 49 | 50 | ## TypeScript v1 declaration files 51 | typings/ 52 | 53 | ## TypeScript cache 54 | *.tsbuildinfo 55 | 56 | ## Optional npm cache directory 57 | .npm 58 | 59 | ## Optional eslint cache 60 | .eslintcache 61 | 62 | ## Microbundle cache 63 | .rpt2_cache/ 64 | .rts2_cache_cjs/ 65 | .rts2_cache_es/ 66 | .rts2_cache_umd/ 67 | 68 | ## Optional REPL history 69 | .node_repl_history 70 | 71 | ## Output of 'npm pack' 72 | *.tgz 73 | 74 | ## Yarn Integrity file 75 | .yarn-integrity 76 | 77 | **/.DS_Store 78 | 79 | # Byte-compiled / optimized / DLL files 80 | __pycache__/ 81 | *.py[cod] 82 | *$py.class 83 | 84 | # C extensions 85 | *.so 86 | 87 | # Distribution / packaging 88 | .Python 89 | build/ 90 | develop-eggs/ 91 | dist/ 92 | downloads/ 93 | eggs/ 94 | .eggs/ 95 | lib/ 96 | lib64/ 97 | parts/ 98 | sdist/ 99 | var/ 100 | wheels/ 101 | share/python-wheels/ 102 | *.egg-info/ 103 | .installed.cfg 104 | *.egg 105 | MANIFEST 106 | 107 | # PyInstaller 108 | # Usually these files are written by a python script from a template 109 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 110 | *.manifest 111 | *.spec 112 | 113 | # Installer logs 114 | pip-log.txt 115 | pip-delete-this-directory.txt 116 | 117 | # Unit test / coverage reports 118 | htmlcov/ 119 | .tox/ 120 | .nox/ 121 | .coverage 122 | .coverage.* 123 | .cache 124 | nosetests.xml 125 | coverage.xml 126 | *.cover 127 | *.py,cover 128 | .hypothesis/ 129 | .pytest_cache/ 130 | cover/ 131 | 132 | # Translations 133 | *.mo 134 | *.pot 135 | 136 | # Django stuff: 137 | *.log 138 | local_settings.py 139 | db.sqlite3 140 | db.sqlite3-journal 141 | 142 | # Flask stuff: 143 | instance/ 144 | .webassets-cache 145 | 146 | # Scrapy stuff: 147 | .scrapy 148 | 149 | # Sphinx documentation 150 | docs/_build/ 151 | 152 | # PyBuilder 153 | .pybuilder/ 154 | target/ 155 | 156 | # Jupyter Notebook 157 | .ipynb_checkpoints 158 | 159 | # IPython 160 | profile_default/ 161 | ipython_config.py 162 | 163 | # pyenv 164 | # For a library or package, you might want to ignore these files since the code is 165 | # intended to run in multiple environments; otherwise, check them in: 166 | # .python-version 167 | 168 | # pipenv 169 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 170 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 171 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 172 | # install all needed dependencies. 173 | #Pipfile.lock 174 | 175 | # poetry 176 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 177 | # This is especially recommended for binary packages to ensure reproducibility, and is more 178 | # commonly ignored for libraries. 179 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 180 | #poetry.lock 181 | 182 | # pdm 183 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 184 | #pdm.lock 185 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 186 | # in version control. 187 | # https://pdm.fming.dev/#use-with-ide 188 | .pdm.toml 189 | 190 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 191 | __pypackages__/ 192 | 193 | # Celery stuff 194 | celerybeat-schedule 195 | celerybeat.pid 196 | 197 | # SageMath parsed files 198 | *.sage.py 199 | 200 | # Environments 201 | .env 202 | .venv 203 | env/ 204 | venv/ 205 | ENV/ 206 | env.bak/ 207 | venv.bak/ 208 | 209 | # Spyder project settings 210 | .spyderproject 211 | .spyproject 212 | 213 | # Rope project settings 214 | .ropeproject 215 | 216 | # mkdocs documentation 217 | /site 218 | 219 | # mypy 220 | .mypy_cache/ 221 | .dmypy.json 222 | dmypy.json 223 | 224 | # Pyre type checker 225 | .pyre/ 226 | 227 | # pytype static type analyzer 228 | .pytype/ 229 | 230 | # Cython debug symbols 231 | cython_debug/ 232 | 233 | # PyCharm 234 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 235 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 236 | # and can be added to the global gitignore or merged into this file. For a more nuclear 237 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 238 | #.idea/ 239 | 240 | # Local .terraform directories 241 | **/.terraform/* 242 | 243 | # .tfstate files 244 | *.tfstate 245 | *.tfstate.* 246 | 247 | # Crash log files 248 | crash.log 249 | crash.*.log 250 | 251 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 252 | # password, private keys, and other secrets. These should not be part of version 253 | # control as they are data points which are potentially sensitive and subject 254 | # to change depending on the environment. 255 | *.tfvars 256 | *.tfvars.json 257 | 258 | # Ignore override files as they are usually used to override resources locally and so 259 | # are not checked in 260 | override.tf 261 | override.tf.json 262 | *_override.tf 263 | *_override.tf.json 264 | 265 | # Include override files you do wish to add to version control using negated pattern 266 | # !example_override.tf 267 | 268 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 269 | # example: *tfplan* 270 | 271 | # Ignore CLI configuration files 272 | .terraformrc 273 | terraform.rc 274 | 275 | # Intentionally gitignoring template rendered files! 276 | # Be sure to include the entries here in .prettierignore as well (see .prettierignore for why) :/ 277 | services/reverse-proxy/traefik.static.yml 278 | services/reverse-proxy/traefik.dynamic.yml 279 | services/backup/excludes.txt 280 | services/sso-proxy/config.yml 281 | services/checkin/config.json5 282 | services/notification/alerts.cfg 283 | services/notification/notifications.cfg 284 | -------------------------------------------------------------------------------- /.husky/post-merge: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . "$(dirname "$0")/_/husky.sh" 3 | 4 | npm ci 5 | -------------------------------------------------------------------------------- /.husky/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | . "$(dirname "$0")/_/husky.sh" 3 | 4 | npx lint-staged 5 | 6 | gitleaks protect --staged --no-banner 7 | -------------------------------------------------------------------------------- /.lintstagedrc.yml: -------------------------------------------------------------------------------- 1 | '*': 2 | - prettier --ignore-unknown --write 3 | 4 | '*.py': 5 | - black --quiet 6 | - isort --quiet --profile black 7 | 8 | '*.tf': 9 | - terraform fmt 10 | -------------------------------------------------------------------------------- /.load-env: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Loads the environment setup necessary to directly call `python` and `pytest` without relying on makefiles. 3 | # Activate using `source .load-env`. 4 | export PYTHONPATH=. 5 | -------------------------------------------------------------------------------- /.npmrc: -------------------------------------------------------------------------------- 1 | audit=false 2 | fund=false 3 | -------------------------------------------------------------------------------- /.nvmrc: -------------------------------------------------------------------------------- 1 | lts/* 2 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | fixtures/ 2 | dependencies/ 3 | **/themes/ 4 | -------------------------------------------------------------------------------- /.prettierrc.yml: -------------------------------------------------------------------------------- 1 | '@janejeon/prettier-config' 2 | -------------------------------------------------------------------------------- /.python-version: -------------------------------------------------------------------------------- 1 | 3.12 2 | -------------------------------------------------------------------------------- /.terraform-version: -------------------------------------------------------------------------------- 1 | latest:^1.8 2 | -------------------------------------------------------------------------------- /INIT.md: -------------------------------------------------------------------------------- 1 | server init requirements: 2 | 3 | - create non-root sudo user https://www.digitalocean.com/community/tutorials/initial-server-setup-with-ubuntu-20-04 4 | - basic ufw config https://www.digitalocean.com/community/tutorials/how-to-set-up-a-firewall-with-ufw-on-ubuntu-20-04 https://wiki.archlinux.org/title/Uncomplicated_Firewall 5 | - disable password auth https://www.digitalocean.com/community/tutorials/how-to-set-up-ssh-keys-on-debian-11 6 | - unattended-upgrades https://haydenjames.io/how-to-enable-unattended-upgrades-on-ubuntu-debian/ 7 | - install docker & compose https://docs.docker.com/compose/install/ 8 | 9 | ```sh 10 | # (computer) SSH into box as root 11 | # (server): 12 | 13 | adduser jane # interactive prompts 14 | usermod -aG sudo jane # make myself sudo-er 15 | 16 | sudo update-alternatives --config editor # (enter /usr/bin/vim.basic prompt) 17 | sudo visudo # interactive prompt: add "jane ALL=(ALL:ALL) NOPASSWD: ALL" to the bottom of the file 18 | 19 | sudo ln -sf /usr/share/zoneinfo/America/New_York /etc/localtime 20 | 21 | sudo apt update && sudo apt upgrade && sudo apt install rsync make 22 | mkdir self-hosted 23 | 24 | sudo apt install unattended-upgrades 25 | 26 | sudo vim /etc/apt/apt.conf.d/50unattended-upgrades 27 | # enable the following: 28 | # 29 | # Unattended-Upgrade::Origins-Pattern { 30 | # origin=Debian,codename=${distro_codename}-updates; 31 | # origin=Debian,codename=${distro_codename},label=Debian; 32 | # origin=Debian,codename=${distro_codename},label=Debian-Security; 33 | # origin=Debian,codename=${distro_codename}-security,label=Debian-Security; 34 | # } 35 | # Unattended-Upgrade::Mail "asdf@example.com"; 36 | # Unattended-Upgrade::MailReport "on-change"; 37 | # Unattended-Upgrade::Remove-Unused-Kernel-Packages "true"; 38 | # Unattended-Upgrade::Remove-New-Unused-Dependencies "true"; 39 | # Unattended-Upgrade::Remove-Unused-Dependencies "true"; 40 | 41 | # (computer): 42 | ssh-copy-id jane@remote_host # (enter password prompt) 43 | 44 | # (server): 45 | sudo vim /etc/ssh/sshd_config # (interactive prompt, set the following:) 46 | # ChallengeResponseAuthentication no 47 | # PasswordAuthentication no 48 | # UsePAM no 49 | # PermitRootLogin no 50 | /etc/init.d/ssh reload # interactive prompt: requires password 51 | 52 | # Installing docker 53 | curl -fsSL https://get.docker.com -o get-docker.sh 54 | sudo sh get-docker.sh 55 | sudo usermod -aG docker jane 56 | newgrp docker 57 | sudo service docker restart 58 | 59 | # Logging in 60 | echo $CR_PAT | docker login ghcr.io -u USERNAME --password-stdin 61 | 62 | # Installing docker-compose 63 | # See: https://docs.docker.com/engine/install/debian/#install-using-the-repository 64 | sudo apt-get install \ 65 | ca-certificates \ 66 | curl \ 67 | gnupg \ 68 | lsb-release 69 | sudo mkdir -p /etc/apt/keyrings 70 | curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg 71 | echo \ 72 | "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ 73 | $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 74 | sudo apt-get update 75 | 76 | sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 77 | sudo chmod +x /usr/local/bin/docker-compose 78 | 79 | # Rules 80 | sudo ufw default deny incoming 81 | sudo ufw default allow outgoing 82 | sudo ufw limit ssh # allow with rate-limiting! 83 | sudo ufw allow 'WWW Full' 84 | sudo ufw allow qBittorrent/dns/bonjour/'kerberos full' # (optional) 85 | sudo ufw enable 86 | 87 | # Need to write rules again for docker since it ignores it 88 | sudo wget -O /usr/local/bin/ufw-docker https://github.com/chaifeng/ufw-docker/raw/master/ufw-docker 89 | sudo chmod +x /usr/local/bin/ufw-docker 90 | sudo ufw-docker install 91 | 92 | # After starting containers: https://github.com/chaifeng/ufw-docker#solving-ufw-and-docker-issues 93 | sudo ufw-docker allow traefik 80/tcp 94 | sudo ufw-docker allow traefik 443/tcp 95 | 96 | sudo apt install jq 97 | 98 | # Swap 99 | ## Create Swap 100 | sudo dd if=/dev/zero of=/swapfile bs=1024 count=1048576 # 1GB 101 | sudo chmod 600 /swapfile 102 | sudo mkswap /swapfile 103 | sudo swapon /swapfile 104 | 105 | sudo vim /etc/fstab # add: /swapfile none swap sw 0 0 106 | 107 | ## Setting Swappiness 108 | sudo sysctl vm.swappiness=30 109 | sudo vim /etc/sysctl.conf # set vm.swappiness=30 110 | ``` 111 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL=deploy 2 | 3 | # use $(MAKE) where possible, but for remote commands, $(M) makes more sense 4 | M=make --no-print-directory 5 | D=docker 6 | DC=$(D) compose 7 | DC_ALL=--project-directory . $(shell ls services/*/docker-compose.*.yml | sed 's/.*/-f &/' | tr '\n' ' ') 8 | DIR=~/self-hosted 9 | SERVER=vultr 10 | 11 | ENV=. .env && 12 | 13 | PYTHON_PATH=PYTHONPATH=. # we want everything to be relative to the workspace root 14 | VENV=poetry run # the commands might run in a different shell (CI/local/make), so we have to activate venv every time to be safe 15 | PYTHON=$(PYTHON_PATH) $(VENV) python 16 | PYTEST=$(PYTHON_PATH) $(VENV) pytest 17 | 18 | # |------------------------- Commands to be run within the server -------------------------| 19 | up: 20 | @$(DC) $(DC_ALL) up -d --remove-orphans $(SERVICE) 21 | 22 | restart: 23 | @$(DC) $(DC_ALL) restart $(SERVICE) 24 | 25 | restart-hard: 26 | @$(DC) $(DC_ALL) up -d --remove-orphans --force-recreate --renew-anon-volumes $(SERVICE) 27 | 28 | down: 29 | @$(DC) $(DC_ALL) rm -sf $(SERVICE) 30 | 31 | down-everything: 32 | @echo "Are you sure? [y/N] " && read ans && [ $${ans:-N} = y ] 33 | @$(DC) $(DC_ALL) down --remove-orphans -v 34 | 35 | rm: 36 | @$(DC) $(DC_ALL) rm --stop $(SERVICE) 37 | 38 | rm-hard: 39 | @$(DC) $(DC_ALL) rm --stop -v $(SERVICE) 40 | 41 | prune: 42 | @$(D) system prune -f --filter "label!=com.docker.keep-container=true" 43 | 44 | pull: 45 | @$(DC) $(DC_ALL) pull $(SERVICE) 46 | 47 | exec: 48 | @$(DC) $(DC_ALL) exec $(SERVICE) $(COMMAND) 49 | 50 | run: 51 | @$(DC) $(DC_ALL) run --rm $(DC_RUN_OPTIONS) $(SERVICE) $(COMMAND) 52 | 53 | sh: 54 | @$(DC) $(DC_ALL) run --rm $(SERVICE) sh 55 | 56 | logs: 57 | @$(DC) $(DC_ALL) logs -f $(SERVICE) 58 | 59 | volumes: 60 | @$(D) inspect -f '{{json .Mounts}}' $(SERVICE) | jq 61 | 62 | ports: 63 | sudo netstat -tulpn | grep LISTEN 64 | 65 | # Spin up the backup container, which already has all of the configuration/commands "baked in". 66 | # Note that we're explicitly bypassing the existing entrypoint (which actually does all of the heavy lifting), 67 | # as we need to run this as a "one shot" and not schedule cron (which means this command would never exit). 68 | backup: 69 | @$(DC) $(DC_ALL) run --rm --entrypoint="" backup /usr/local/bin/backup 70 | 71 | list-snapshots: 72 | @$(DC) $(DC_ALL) run --rm backup snapshots 73 | 74 | restore: 75 | @./scripts/restore-all.sh $(SNAPSHOT) 76 | 77 | mem: 78 | free -h 79 | 80 | # |------------------------- Commands to be run locally -------------------------| 81 | tracked: 82 | git ls-tree -r master --name-only 83 | 84 | init: 85 | $(ENV) restic -r b2:$${B2_BUCKET} init 86 | 87 | check-config: 88 | $(DC) $(DC_ALL) config --quiet 89 | 90 | git-check: 91 | ./scripts/check-git.sh 92 | 93 | git-pull: # gotta remember to add the option so that I don't end up having to check the submodules in again 94 | git pull --recurse-submodules 95 | 96 | git-push: git-check 97 | git push 98 | 99 | push-files: 100 | rsync -avzP --delete --exclude=.git --exclude=volumes --exclude=node_modules --exclude=.venv --exclude=.vscode --exclude=**/__pycache__ --exclude=.pytest_cache . $(SERVER):$(DIR) 101 | 102 | render: 103 | $(PYTHON) ./scripts/render_all.py 104 | 105 | # Need to run `make restart` manually on config file changes 106 | deploy: check-config git-push render push-files 107 | $(M) ssh-command COMMAND='$(M) up prune' 108 | 109 | ssh: 110 | ssh $(SERVER) 111 | 112 | ssh-command: 113 | ssh $(SERVER) 'cd $(DIR) && $(COMMAND)' 114 | 115 | lint: lint-js lint-py 116 | 117 | lint-js: 118 | npm run lint 119 | 120 | lint-py: lint-py-black lint-py-isort 121 | 122 | lint-py-black: 123 | $(VENV) black --check . 124 | 125 | lint-py-isort: 126 | $(VENV) isort --profile black --check . 127 | 128 | # Run all tests 129 | test: test-unit test-integration 130 | 131 | test-unit: 132 | $(PYTEST) -m unit $(OPTIONS) 133 | 134 | test-integration: 135 | $(PYTEST) -m integration $(OPTIONS) 136 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Python CI](https://github.com/JaneJeon/self-hosted/actions/workflows/python.yml/badge.svg)](https://github.com/JaneJeon/self-hosted/actions/workflows/python.yml) 2 | [![Node CI](https://github.com/JaneJeon/self-hosted/actions/workflows/node.yml/badge.svg)](https://github.com/JaneJeon/self-hosted/actions/workflows/node.yml) 3 | [![Secrets Scan](https://github.com/JaneJeon/self-hosted/actions/workflows/secrets.yml/badge.svg)](https://github.com/JaneJeon/self-hosted/actions/workflows/secrets.yml) 4 | 5 | ## Prerequisites 6 | 7 | Cloudflare account, domains, API token 8 | 9 | ## Deploy process 10 | 11 | For now, I'll deploy from my local computer - I just don't trust myself to properly set up a CI to not leak credentials and fuck up production systems! 12 | 13 | - (not yet implemented) run terraform to apply anything, generate secrets and dump them into gitignored files 14 | - run `make deploy` to rsync this entire goddamn folder to the VPS over SSH or something (requires me to setup rsync first, or just scp or whatever it's called) 15 | - run `make up` to reload all the goddamn things (note that docker-compose only reloads containers that have changed - either image or the actual docker-compose config) 16 | 17 | ## Creating a new service 18 | 19 | Checklist for web-facing services: 20 | 21 | - [ ] Traefik labels (incl. entrypoint) 22 | - [ ] Authelia middleware label 23 | - [ ] Restart policy 24 | - [ ] Networks 25 | 26 | Checklist for internal services: 27 | 28 | - [ ] Restart policy 29 | - [ ] Networks 30 | 31 | ## Connecting to the server 32 | 33 | Use `~.ssh/config` to configure a specific host to connect to in the Makefile, named `vultr`, with your server's HostName and User. 34 | 35 | Then tell Makefile to use that server SSH configuration using the `SERVER` variable. 36 | 37 | ## Managing the Server 38 | 39 | ### Backup/Restore 40 | 41 | The way state is handled in this stack is that all stateful applications have volumes mounted onto where they need to read/write, and then we "aggregate" said volumes (living in the host) to backup/restore them in one go (i.e. we backup the "top-level" volumes folder containing all of the volume mounts in one go, and we restore that same folder so that the volume mounts are all restored in one go). 42 | 43 | For databases, there's an additional layer of complexity in that what's on the filesystem may not necessarily reflect the "current state" of the application, as databases commonly "flush" to WAL, but doesn't necessarily update its own state files, trusting the WAL as the thing to restore from when recovering from a shutdown. 44 | 45 | Therefore, for the databases, we forcefully "flush" their _entire_ state into something we _can_ backup/restore from (we'll call them "dump files" for now), and make sure to run the "dump state"/"restore state from the dump" whenever we backup/restore (in particular, the "dump state" should be run _before_ we run the backup, so the dump files are included in the backup, and the "restore from the dump" should be run _after_ we restore the folder, so that we actually have the dump files to work off of). 46 | 47 | Currently, we are backing up the `/var/lib/docker/volumes` folder [(which contains all Docker-managed volumes)](https://docs.docker.com/storage/#choose-the-right-type-of-mount), as we mount all stateful applications onto named docker volumes. 48 | 49 | The backup/restore process of that "top-level" volumes folder is handled by `restic`, which takes _incremental_ snapshots of that folder as a whole, letting you "time travel" back to past snapshots, with very minimal costs (in terms of the additional disk space). 50 | 51 | The actual backup is automatically done by the `restic-backup` container, which runs backup on startup (i.e. when you re-deploy that container, it will backup) and on schedule. The container already contains all of the scripts necessary for "dumping" databases. 52 | 53 | You can also run the `make backup` command, which uses that exact container to backup the same way it normally does during scheduled backups, with the only difference being that the command is a "one shot" (i.e. doesn't schedule further backups, and exits with a status code 0 upon a successful backup and a nonzero code + an alert to Slack upon failure). 54 | 55 | To restore, we first need to select the snapshot that we want to restore from (which will be especially relevant if you "fucked up" something and want to time travel to before you fucked up). 56 | 57 | You can either choose from the latest snapshot (`make restore`), or specify a snapshot to restore from. For the latter, you can figure out which snapshot you want to restore from by running `make list-snapshots` to list all of the snapshots you can choose from. Copy the ID of the snapshot you want, and pass it into `make restore SNAPSHOT=$id`. 58 | 59 | The restore script automatically handles "re-hydrating" from the database dump files. 60 | 61 | > One thing of note is that the restore should be done on an "empty state", so for databases like mysql, where its "running state" (and _not_ the dump) is stored in its own volume, we explicitly exclude those docker volumes from backup, so that we restore from the dump on an "empty state" for such containers. 62 | 63 | Once the restore is done, you can now start up the containers, given that the states are now restored and safe to be read. 64 | 65 | > Note: `restic` and all of its subcommands will need to secure an exclusive lock, and they do this by touching some files in your backup destination. However, sometimes it doesn't work (especially when you have multiple processes running at the same time), perhaps due to the "object storage" of choice being _eventually_ consistent. In that case, you need to break the lock (after making sure no other `restic` process is currently running) by running: 66 | > 67 | > ``` 68 | > restic unlock 69 | > ``` 70 | > 71 | > (this can be run inside any of the `restic` containers - backup/prune/check) 72 | 73 | ### Docker Resources 74 | 75 | We try to push as much of the stack onto Docker so that they are managed by it, and can have its lifecycle determined by it. For example, instead of creating the networks outside of the docker-compose stack and injecting them as "external: true", we let Docker itself create/destroy the networks as the stack is being created/deleted. 76 | 77 | This also serves as a way to "gc" the resources automatically. 78 | 79 | #### Networking 80 | 81 | Docker networks need special attention due to the way docker-compose works. 82 | 83 | Basically, when you ask Docker to create a network of name `foo`, it will create a docker network of name `foo`. Simple enough. 84 | 85 | However, when you ask docker _compose_ to create a network of name `foo` while you're in a folder named `folder_name`, it will create a docker network of name `folder_name_foo`, because a. docker compose defaults its `COMPOSE_PROJECT_NAME` to the folder name, and b. docker compose prefixes the networks it creates/manages with the `COMPOSE_PROJECT_NAME`. 86 | 87 | Thus, we manually set `COMPOSE_PROJECT_NAME` in our `.env` (to override docker-compose default), and tell Traefik to look at not the `public` network, but the `${COMPOSE_PROJECT_NAME}_public` network instead (as Traefik doesn't know anything about docker compose prefixing network names). 88 | 89 | ### Service Dependencies 90 | 91 | Just as we push all "resources" to Docker/Compose to be managed by it, we also try to push as much of the service lifecycle onto the orchestrator as possible to make our lives easier. 92 | 93 | One of the ways we do it is by 1. explicitly adding Docker healthchecks to as many containers as possible, and 2. setting service dependencies on each other. 94 | 95 | For example, for a service that uses mysql and redis, we might mark it with the following: 96 | 97 | ```yml 98 | depends_on: 99 | mysql: 100 | condition: service_healthy 101 | redis: 102 | condition: service_healthy 103 | ``` 104 | 105 | Docker then uses this information to basically build a DAG of service dependencies, such that: 106 | 107 | - when starting everything from scratch (such is the case when you're restoring from a "blank slate"), the service won't be started until mysql and redis are up and running. 108 | - when shutting everything down, the service goes down first _before_ mysql and redis. 109 | - when restarting mysql or redis, the service, too, shuts down and waits mysql/redis is up and running before starting up the service. 110 | 111 | All of this ensures that no matter what we do to the stack (take it all up/down, selectively destroy or recreate containers, etc), the orchestrator will always ensure that service dependencies are met. 112 | 113 | ## Local Development 114 | 115 | ### Managing Node Environment/Dependencies 116 | 117 | To get the benefits of DX tooling (including git hooks), you need to install the node dependencies. 118 | 119 | First, install [nvm](https://nvm.sh) and use the version of node specified in this repository (`nvm use`). 120 | 121 | Then, it's just a simple matter of running `npm install`, and all of the git hooks will be automatically installed. 122 | 123 | ### Managing Python Environment/Dependencies 124 | 125 | #### pyenv 126 | 127 | First, install pyenv to control the version of python used (mainly for consistency and reproducibility): https://github.com/pyenv/pyenv#installation 128 | 129 | (Optionally, install shell integrations: https://github.com/pyenv/pyenv#set-up-your-shell-environment-for-pyenv) 130 | 131 | If you don't have the specific version in the `.python-version`, install the version specified in the repository (in this case, 3.11): 132 | 133 | ```sh 134 | pyenv install 3.11 135 | ``` 136 | 137 | Then, to use the version of python specified in the repository (automatically), run: 138 | 139 | ```sh 140 | pyenv local # every time you open up this directory, pyenv will automatically switch to the repo's specified python version 141 | ``` 142 | 143 | #### poetry 144 | 145 | Now that we have pyenv set up for consistent python versions, we can install poetry for that specific python version: 146 | 147 | ```sh 148 | pip install poetry # note that you don't have to specify pip3 thanks to pyenv 149 | ``` 150 | 151 | Then, "activate" the poetry environment using our pyenv-provided python: 152 | 153 | ```sh 154 | poetry env use 3.11 # you may have to specify the full path to the pyenv python: https://python-poetry.org/docs/managing-environments/#switching-between-environments 155 | ``` 156 | 157 | Finally, we can install everything: 158 | 159 | ```sh 160 | poetry install 161 | ``` 162 | 163 | Hooray! 164 | 165 | > Note: by default, poetry installed via pyenv-provided python will install its dependencies inside the `.venv` folder, allowing your editor (like VS Code) to automatically pick up on the python dependencies when you use them in your code. 166 | > However, if it doesn't, you may have to set the `virtualenvs.in-project` option to configure poetry to install the dependencies locally: https://python-poetry.org/docs/configuration#virtualenvsin-project (and this requires destroying and recreating the poetry environment). 167 | 168 | #### Running Commands 169 | 170 | Because poetry controls the dependencies (and "regular" python can't see its virtualenv), you need to use poetry to run any python commands. 171 | 172 | You can either drop into a shell where all of that is already pre-configured: 173 | 174 | ```sh 175 | poetry shell 176 | ``` 177 | 178 | Or, alternatively, just run your python commands using poetry: 179 | 180 | ```sh 181 | poetry run python diagram.py 182 | ``` 183 | 184 | And either approach will let the pyenv-controlled python binary (which means it's going to use the "right" version) to pick up on all of the virtualenv dependencies. 185 | 186 | Happy hacking! 187 | 188 | ### Templating 189 | 190 | Passing down command line arguments and bespoke environment variables can only get you so far, and to really alleviate the "there's 5000 points I need to configure everywhere" problem, we're using templating as a solution (which will further help in cutting down bespoke command line/environment configuration to only where it's needed). 191 | 192 | Here, we're using a "convention-over-configuration" approach to templating, and consuming the results of said templating. 193 | 194 | Any file appended with a `.j2` will be rendered down into a file with the same name, except with the `.j2` extension stripped away (e.g. `services/$service/docker-compose.$container.yml.j2` will be rendered down into `services/$service/docker-compose.$container.yml`, and will be picked up by all of the Make commands as part of the docker-compose "fleet"). 195 | 196 | Since the rendered files 1. are auto-generated files (and thus don't belong in git histories), and 2. may contain sensitive secrets, we're intentionally choosing _not_ to commit the rendered files; you will be able to see which files will be rendered by the presence of a `.j2` file in the folder you're looking at. 197 | 198 | > **NOTE**: Since it's tricky to change the name in a way that's 1. obvious (e.g. if we were to generate `traefik.static.generated.yml` from `traefik.static.yml.j2` for the sake of always making sure the generated files had a `.generated` part in their file name so it'd be easier to grep all generated files and gitignore them, it would be confusing to try to _guess_ what the generated file name would be when rendering a template), and 2. doesn't disrupt the existing flow (e.g. if we were to generate `docker-compose.$service.yml.generated` from `docker-compose.$service.yml.j2`, existing workflows around grepping for `docker-compose.*.yml` would break), we're simply going to settle with stripping the `.j2` from the template file name. 199 | > 200 | > That means there is NO way for us to automatically detect generated files based on their filename! So take care to add each generated file to the `.gitignore` list! 201 | 202 | You can manually render the templates by running `make render` (mainly to inspect the generated files to see if they look correct). For deployment, to make the process as seamless as possible, it will automatically be run as part of `make deploy`, so there's no need to manually render down the templates before deployment to make the generated files reflect the latest values. 203 | 204 | ### Testing 205 | 206 | Right now, we're testing various pieces of "logic" (i.e. standalone functions that do not have external dependencies), but plan to expand the tests to cover more behaviours, such as e2e testing and integration testing (if I ever get to terraform modules), i.e. actually running the things and checking that the behaviour is what we expect. 207 | 208 | For now, simply run `make test` to run all the tests. 209 | 210 | #### Unit vs. Integration Tests 211 | 212 | You'll note that _all_ tests are marked either with `@pytest.mark.unit` or `@pytest.mark.integration` - appropriately, for unit tests and integration tests respectively. 213 | 214 | This allows us to run only unit tests and only integration tests (`make test-unit` and `make test-integration`) and separate them out. 215 | 216 | This is useful because unit tests, unlike integration tests, test only the specific bits of _logic_ in its purest form; and so, they are able to be tested in _complete_ isolation, with mocks provided to them so they don't hit the actual filesystem/make live network calls. 217 | 218 | > Note: you'll also note that in unit testing individual modules, I often mock out the actual I/O call that _would've_ been made: whether it's network calls via the `responses` library, or filesystem access via the `pyfakefs` library. Being able to fake I/O calls not only reduce the I/O latency in tests, but they also allow me to set up bespoke network/filesystem responses _for every test_ without having to setup a pre-canned response (e.g. as with the fixtures/ folder that I use for integration tests) that needs to be shared by every unit test. 219 | 220 | In comparison, integration tests test the _executables_ that actually coordinate and run the bits of logic, interfacing with the "real world" (i.e. I/O, external dependencies). This means that it can't really be tested in isolation, though we can feed it fixtures (different from mocks) to keep the test results consistent. 221 | 222 | This fundamental difference between testing isolated bits of logic vs. "executables" is why it's so useful to separate testing them - because, by their very nature, the integration tests are more likely to fail (due to the I/O involved) and in general will take longer (again, due to the I/O). 223 | 224 | To mark the tests, we rely on yet another "convention over configuration": any tests that don't have explicit markings will be marked as a unit test. Any test with `integration` in its test name (i.e. `test_integration_*`) will be marked as an integration test. 225 | 226 | #### Debugging Tests 227 | 228 | You can debug tests by running only one of them, or forcing pytest to let log/print statements through. 229 | 230 | You can pass any options to the `make test-*` commands by setting the OPTIONS object. For example: 231 | 232 | ```sh 233 | make test OPTIONS='-s' # to print output 234 | ``` 235 | 236 | ## Security 237 | 238 | ### Scanning for Secrets 239 | 240 | We use Gitleaks for securing this repo, and you'll need to make sure you have it installed locally for scanning secrets on commit. You can install it by running `brew install gitleaks`. 241 | 242 | As for why Gitleaks... Trivy scanner doesn't match much of anything (see https://github.com/aquasecurity/trivy/blob/main/pkg/fanal/secret/builtin-rules.go), and while Trufflehog is awesome, it is not currently built out for "incremental" scans, such as scanning staged files. 243 | 244 | If Trufflehog ever supports scanning one file at a time (or just integrates scanning staged files outright like gitleaks), I will drop gitleaks in a heartbeat. Until then, integrating gitleaks into pre-commit is the only "fast enough" way to do local, incremental scanning. 245 | 246 | For CI, we do use the Trufflehog scanner because it scans all commits within a branch just fine. 247 | -------------------------------------------------------------------------------- /conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import tempfile 4 | from typing import List 5 | 6 | import pytest 7 | 8 | 9 | def pytest_collection_modifyitems(items: List[pytest.Item]) -> None: 10 | """ 11 | Automatically add pytest markers (based on test name) to differentiate between unit and integration tests. 12 | """ 13 | for item in items: 14 | if "integration" in item.nodeid: 15 | item.add_marker(pytest.mark.integration) 16 | else: 17 | item.add_marker(pytest.mark.unit) 18 | 19 | 20 | @pytest.fixture 21 | def fixtures_directory(): 22 | """ 23 | Creates a local, unique copy of the fixtures/ directory for each (integration) test 24 | that requires this fixture. 25 | 26 | This allows for multiple integration tests to interact with the fixture folder concurrently 27 | without having to worry about race conditions - because each test will be working 28 | with its own *copy* of its fixture folder! 29 | """ 30 | 31 | # This folder is automatically deleted when the context is over, 32 | # so we don't need to worry about cleaning it up: 33 | # https://docs.python.org/3/library/tempfile.html#tempfile.TemporaryDirectory 34 | with tempfile.TemporaryDirectory() as newpath: 35 | # Workspace root 36 | old_cwd = os.getcwd() 37 | 38 | # Copy over the fixtures folder in the exact same placement 39 | original_fixture_path = os.path.join(old_cwd, "fixtures") 40 | copy_fixture_path = os.path.join(newpath, "fixtures") 41 | shutil.copytree(original_fixture_path, copy_fixture_path) 42 | 43 | os.chdir(newpath) 44 | 45 | yield 46 | 47 | os.chdir(old_cwd) 48 | -------------------------------------------------------------------------------- /fixtures/.env: -------------------------------------------------------------------------------- 1 | # Test environment file used for rendering down templates here, 2 | # and for any environment variable tests. 3 | ENVIRONMENT=test 4 | -------------------------------------------------------------------------------- /fixtures/.gitignore: -------------------------------------------------------------------------------- 1 | !.env 2 | -------------------------------------------------------------------------------- /fixtures/README.md: -------------------------------------------------------------------------------- 1 | This folder contains all of the fixtures needed either for automated testing (including python integration tests and end-to-end tests) or local testing. 2 | -------------------------------------------------------------------------------- /fixtures/templates/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore all files (and only files) within the directory. 2 | # Otherwise, the !exclusion doesn't work: https://stackoverflow.com/a/26469809 3 | # * 4 | # !*/ 5 | 6 | # !.gitignore 7 | # !*.j2 8 | 9 | # NOTE: this is the one place where we *will* check in the generated files, as we need them for testing. 10 | # Take care to update the generated files properly before committing! 11 | -------------------------------------------------------------------------------- /fixtures/templates/foo.yml: -------------------------------------------------------------------------------- 1 | default: 2 | ENVIRONMENT: test 3 | 4 | cloudflare: 5 | ips: ['1.1.1.1/20', '2.2.2.2/18', '3.3.3.3/16', '4444:5555::/32', '6666:7777::/26'] -------------------------------------------------------------------------------- /fixtures/templates/foo.yml.j2: -------------------------------------------------------------------------------- 1 | default: 2 | ENVIRONMENT: {{ env.ENVIRONMENT }} 3 | 4 | cloudflare: 5 | ips: {{ cloudflare_ips }} 6 | -------------------------------------------------------------------------------- /fixtures/templates/nested/bar.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JaneJeon/self-hosted/98ee7ba7f0bb807de4936e00d7d74ade2c5bf52e/fixtures/templates/nested/bar.yml -------------------------------------------------------------------------------- /fixtures/templates/nested/bar.yml.j2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JaneJeon/self-hosted/98ee7ba7f0bb807de4936e00d7d74ade2c5bf52e/fixtures/templates/nested/bar.yml.j2 -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "private": true, 3 | "type": "module", 4 | "scripts": { 5 | "lint": "run-s lint:*", 6 | "lint:prettier": "prettier --check .", 7 | "lint-fix": "run-s lint-fix:*", 8 | "lint-fix:prettier": "prettier --write .", 9 | "scan": "run-s scan:*", 10 | "scan:london": "gscan dependencies/london-theme --v5", 11 | "prepare": "husky install" 12 | }, 13 | "dependencies": { 14 | "gscan": "^4.39.4" 15 | }, 16 | "devDependencies": { 17 | "@janejeon/prettier-config": "^2.0.0", 18 | "husky": "^8.0.3", 19 | "lint-staged": "^15.0.0", 20 | "npm-run-all2": "^6.0.0", 21 | "prettier": "^3.0.0" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /poetry.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. 2 | 3 | [[package]] 4 | name = "black" 5 | version = "24.3.0" 6 | description = "The uncompromising code formatter." 7 | optional = false 8 | python-versions = ">=3.8" 9 | files = [ 10 | {file = "black-24.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395"}, 11 | {file = "black-24.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995"}, 12 | {file = "black-24.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7"}, 13 | {file = "black-24.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0"}, 14 | {file = "black-24.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9"}, 15 | {file = "black-24.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597"}, 16 | {file = "black-24.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d"}, 17 | {file = "black-24.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5"}, 18 | {file = "black-24.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f"}, 19 | {file = "black-24.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11"}, 20 | {file = "black-24.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4"}, 21 | {file = "black-24.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5"}, 22 | {file = "black-24.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837"}, 23 | {file = "black-24.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd"}, 24 | {file = "black-24.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213"}, 25 | {file = "black-24.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959"}, 26 | {file = "black-24.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb"}, 27 | {file = "black-24.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7"}, 28 | {file = "black-24.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7"}, 29 | {file = "black-24.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f"}, 30 | {file = "black-24.3.0-py3-none-any.whl", hash = "sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93"}, 31 | {file = "black-24.3.0.tar.gz", hash = "sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f"}, 32 | ] 33 | 34 | [package.dependencies] 35 | click = ">=8.0.0" 36 | mypy-extensions = ">=0.4.3" 37 | packaging = ">=22.0" 38 | pathspec = ">=0.9.0" 39 | platformdirs = ">=2" 40 | 41 | [package.extras] 42 | colorama = ["colorama (>=0.4.3)"] 43 | d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] 44 | jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] 45 | uvloop = ["uvloop (>=0.15.2)"] 46 | 47 | [[package]] 48 | name = "certifi" 49 | version = "2024.7.4" 50 | description = "Python package for providing Mozilla's CA Bundle." 51 | optional = false 52 | python-versions = ">=3.6" 53 | files = [ 54 | {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, 55 | {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, 56 | ] 57 | 58 | [[package]] 59 | name = "charset-normalizer" 60 | version = "3.3.2" 61 | description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." 62 | optional = false 63 | python-versions = ">=3.7.0" 64 | files = [ 65 | {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, 66 | {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, 67 | {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, 68 | {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, 69 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, 70 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, 71 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, 72 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, 73 | {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, 74 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, 75 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, 76 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, 77 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, 78 | {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, 79 | {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, 80 | {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, 81 | {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, 82 | {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, 83 | {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, 84 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, 85 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, 86 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, 87 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, 88 | {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, 89 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, 90 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, 91 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, 92 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, 93 | {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, 94 | {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, 95 | {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, 96 | {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, 97 | {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, 98 | {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, 99 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, 100 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, 101 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, 102 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, 103 | {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, 104 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, 105 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, 106 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, 107 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, 108 | {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, 109 | {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, 110 | {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, 111 | {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, 112 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, 113 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, 114 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, 115 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, 116 | {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, 117 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, 118 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, 119 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, 120 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, 121 | {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, 122 | {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, 123 | {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, 124 | {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, 125 | {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, 126 | {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, 127 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, 128 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, 129 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, 130 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, 131 | {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, 132 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, 133 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, 134 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, 135 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, 136 | {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, 137 | {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, 138 | {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, 139 | {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, 140 | {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, 141 | {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, 142 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, 143 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, 144 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, 145 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, 146 | {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, 147 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, 148 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, 149 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, 150 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, 151 | {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, 152 | {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, 153 | {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, 154 | {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, 155 | ] 156 | 157 | [[package]] 158 | name = "click" 159 | version = "8.1.7" 160 | description = "Composable command line interface toolkit" 161 | optional = false 162 | python-versions = ">=3.7" 163 | files = [ 164 | {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, 165 | {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, 166 | ] 167 | 168 | [package.dependencies] 169 | colorama = {version = "*", markers = "platform_system == \"Windows\""} 170 | 171 | [[package]] 172 | name = "colorama" 173 | version = "0.4.6" 174 | description = "Cross-platform colored terminal text." 175 | optional = false 176 | python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" 177 | files = [ 178 | {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, 179 | {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, 180 | ] 181 | 182 | [[package]] 183 | name = "fake-useragent" 184 | version = "1.4.0" 185 | description = "Up-to-date simple useragent faker with real world database" 186 | optional = false 187 | python-versions = "*" 188 | files = [ 189 | {file = "fake-useragent-1.4.0.tar.gz", hash = "sha256:5426e4015d8ccc5bb25f64d3dfcfd3915eba30ffebd31b86b60dc7a4c5d65528"}, 190 | {file = "fake_useragent-1.4.0-py3-none-any.whl", hash = "sha256:9acce439ee2c6cf9c3772fa6c200f62dc8d56605063327a4d8c5d0e47f414b85"}, 191 | ] 192 | 193 | [[package]] 194 | name = "idna" 195 | version = "3.7" 196 | description = "Internationalized Domain Names in Applications (IDNA)" 197 | optional = false 198 | python-versions = ">=3.5" 199 | files = [ 200 | {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, 201 | {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, 202 | ] 203 | 204 | [[package]] 205 | name = "iniconfig" 206 | version = "2.0.0" 207 | description = "brain-dead simple config-ini parsing" 208 | optional = false 209 | python-versions = ">=3.7" 210 | files = [ 211 | {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, 212 | {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, 213 | ] 214 | 215 | [[package]] 216 | name = "isort" 217 | version = "5.13.2" 218 | description = "A Python utility / library to sort Python imports." 219 | optional = false 220 | python-versions = ">=3.8.0" 221 | files = [ 222 | {file = "isort-5.13.2-py3-none-any.whl", hash = "sha256:8ca5e72a8d85860d5a3fa69b8745237f2939afe12dbf656afbcb47fe72d947a6"}, 223 | {file = "isort-5.13.2.tar.gz", hash = "sha256:48fdfcb9face5d58a4f6dde2e72a1fb8dcaf8ab26f95ab49fab84c2ddefb0109"}, 224 | ] 225 | 226 | [package.extras] 227 | colors = ["colorama (>=0.4.6)"] 228 | 229 | [[package]] 230 | name = "jinja2" 231 | version = "3.1.5" 232 | description = "A very fast and expressive template engine." 233 | optional = false 234 | python-versions = ">=3.7" 235 | files = [ 236 | {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"}, 237 | {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"}, 238 | ] 239 | 240 | [package.dependencies] 241 | MarkupSafe = ">=2.0" 242 | 243 | [package.extras] 244 | i18n = ["Babel (>=2.7)"] 245 | 246 | [[package]] 247 | name = "markupsafe" 248 | version = "2.1.3" 249 | description = "Safely add untrusted strings to HTML/XML markup." 250 | optional = false 251 | python-versions = ">=3.7" 252 | files = [ 253 | {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, 254 | {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, 255 | {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, 256 | {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, 257 | {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, 258 | {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, 259 | {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, 260 | {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, 261 | {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, 262 | {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, 263 | {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, 264 | {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, 265 | {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, 266 | {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, 267 | {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, 268 | {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, 269 | {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, 270 | {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, 271 | {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, 272 | {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, 273 | {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"}, 274 | {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"}, 275 | {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"}, 276 | {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"}, 277 | {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"}, 278 | {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"}, 279 | {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"}, 280 | {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"}, 281 | {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"}, 282 | {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"}, 283 | {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, 284 | {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, 285 | {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, 286 | {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, 287 | {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, 288 | {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, 289 | {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, 290 | {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, 291 | {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, 292 | {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, 293 | {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, 294 | {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, 295 | {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, 296 | {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, 297 | {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, 298 | {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, 299 | {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, 300 | {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, 301 | {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, 302 | {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, 303 | {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, 304 | {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, 305 | {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, 306 | {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, 307 | {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, 308 | {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, 309 | {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, 310 | {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, 311 | {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, 312 | {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, 313 | ] 314 | 315 | [[package]] 316 | name = "mypy-extensions" 317 | version = "1.0.0" 318 | description = "Type system extensions for programs checked with the mypy type checker." 319 | optional = false 320 | python-versions = ">=3.5" 321 | files = [ 322 | {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, 323 | {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, 324 | ] 325 | 326 | [[package]] 327 | name = "packaging" 328 | version = "23.2" 329 | description = "Core utilities for Python packages" 330 | optional = false 331 | python-versions = ">=3.7" 332 | files = [ 333 | {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, 334 | {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, 335 | ] 336 | 337 | [[package]] 338 | name = "pathspec" 339 | version = "0.12.1" 340 | description = "Utility library for gitignore style pattern matching of file paths." 341 | optional = false 342 | python-versions = ">=3.8" 343 | files = [ 344 | {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, 345 | {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, 346 | ] 347 | 348 | [[package]] 349 | name = "platformdirs" 350 | version = "4.1.0" 351 | description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." 352 | optional = false 353 | python-versions = ">=3.8" 354 | files = [ 355 | {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, 356 | {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, 357 | ] 358 | 359 | [package.extras] 360 | docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] 361 | test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] 362 | 363 | [[package]] 364 | name = "pluggy" 365 | version = "1.3.0" 366 | description = "plugin and hook calling mechanisms for python" 367 | optional = false 368 | python-versions = ">=3.8" 369 | files = [ 370 | {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"}, 371 | {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"}, 372 | ] 373 | 374 | [package.extras] 375 | dev = ["pre-commit", "tox"] 376 | testing = ["pytest", "pytest-benchmark"] 377 | 378 | [[package]] 379 | name = "pyfakefs" 380 | version = "5.3.2" 381 | description = "pyfakefs implements a fake file system that mocks the Python file system modules." 382 | optional = false 383 | python-versions = ">=3.7" 384 | files = [ 385 | {file = "pyfakefs-5.3.2-py3-none-any.whl", hash = "sha256:5a62194cfa24542a3c9080b66ce65d78b2e977957edfd3cd6fe98e8349bcca32"}, 386 | {file = "pyfakefs-5.3.2.tar.gz", hash = "sha256:a83776a3c1046d4d103f2f530029aa6cdff5f0386dffd59c15ee16926135493c"}, 387 | ] 388 | 389 | [[package]] 390 | name = "pytest" 391 | version = "8.0.0" 392 | description = "pytest: simple powerful testing with Python" 393 | optional = false 394 | python-versions = ">=3.8" 395 | files = [ 396 | {file = "pytest-8.0.0-py3-none-any.whl", hash = "sha256:50fb9cbe836c3f20f0dfa99c565201fb75dc54c8d76373cd1bde06b06657bdb6"}, 397 | {file = "pytest-8.0.0.tar.gz", hash = "sha256:249b1b0864530ba251b7438274c4d251c58d868edaaec8762893ad4a0d71c36c"}, 398 | ] 399 | 400 | [package.dependencies] 401 | colorama = {version = "*", markers = "sys_platform == \"win32\""} 402 | iniconfig = "*" 403 | packaging = "*" 404 | pluggy = ">=1.3.0,<2.0" 405 | 406 | [package.extras] 407 | testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] 408 | 409 | [[package]] 410 | name = "python-dotenv" 411 | version = "1.0.0" 412 | description = "Read key-value pairs from a .env file and set them as environment variables" 413 | optional = false 414 | python-versions = ">=3.8" 415 | files = [ 416 | {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"}, 417 | {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"}, 418 | ] 419 | 420 | [package.extras] 421 | cli = ["click (>=5.0)"] 422 | 423 | [[package]] 424 | name = "pyyaml" 425 | version = "6.0.1" 426 | description = "YAML parser and emitter for Python" 427 | optional = false 428 | python-versions = ">=3.6" 429 | files = [ 430 | {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, 431 | {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, 432 | {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, 433 | {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, 434 | {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, 435 | {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"}, 436 | {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, 437 | {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, 438 | {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, 439 | {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, 440 | {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, 441 | {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, 442 | {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, 443 | {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"}, 444 | {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, 445 | {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, 446 | {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"}, 447 | {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"}, 448 | {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"}, 449 | {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"}, 450 | {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"}, 451 | {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"}, 452 | {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"}, 453 | {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, 454 | {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, 455 | {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, 456 | {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, 457 | {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, 458 | {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, 459 | {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, 460 | {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, 461 | {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, 462 | {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, 463 | {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, 464 | {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, 465 | {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, 466 | {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, 467 | {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, 468 | {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, 469 | {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"}, 470 | {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, 471 | {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, 472 | {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, 473 | {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, 474 | {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, 475 | {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, 476 | {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, 477 | {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"}, 478 | {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, 479 | {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, 480 | {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, 481 | ] 482 | 483 | [[package]] 484 | name = "requests" 485 | version = "2.32.2" 486 | description = "Python HTTP for Humans." 487 | optional = false 488 | python-versions = ">=3.8" 489 | files = [ 490 | {file = "requests-2.32.2-py3-none-any.whl", hash = "sha256:fc06670dd0ed212426dfeb94fc1b983d917c4f9847c863f313c9dfaaffb7c23c"}, 491 | {file = "requests-2.32.2.tar.gz", hash = "sha256:dd951ff5ecf3e3b3aa26b40703ba77495dab41da839ae72ef3c8e5d8e2433289"}, 492 | ] 493 | 494 | [package.dependencies] 495 | certifi = ">=2017.4.17" 496 | charset-normalizer = ">=2,<4" 497 | idna = ">=2.5,<4" 498 | urllib3 = ">=1.21.1,<3" 499 | 500 | [package.extras] 501 | socks = ["PySocks (>=1.5.6,!=1.5.7)"] 502 | use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] 503 | 504 | [[package]] 505 | name = "responses" 506 | version = "0.25.0" 507 | description = "A utility library for mocking out the `requests` Python library." 508 | optional = false 509 | python-versions = ">=3.8" 510 | files = [ 511 | {file = "responses-0.25.0-py3-none-any.whl", hash = "sha256:2f0b9c2b6437db4b528619a77e5d565e4ec2a9532162ac1a131a83529db7be1a"}, 512 | {file = "responses-0.25.0.tar.gz", hash = "sha256:01ae6a02b4f34e39bffceb0fc6786b67a25eae919c6368d05eabc8d9576c2a66"}, 513 | ] 514 | 515 | [package.dependencies] 516 | pyyaml = "*" 517 | requests = ">=2.30.0,<3.0" 518 | urllib3 = ">=1.25.10,<3.0" 519 | 520 | [package.extras] 521 | tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli", "tomli-w", "types-PyYAML", "types-requests"] 522 | 523 | [[package]] 524 | name = "urllib3" 525 | version = "2.2.2" 526 | description = "HTTP library with thread-safe connection pooling, file post, and more." 527 | optional = false 528 | python-versions = ">=3.8" 529 | files = [ 530 | {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, 531 | {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, 532 | ] 533 | 534 | [package.extras] 535 | brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] 536 | h2 = ["h2 (>=4,<5)"] 537 | socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] 538 | zstd = ["zstandard (>=0.18.0)"] 539 | 540 | [metadata] 541 | lock-version = "2.0" 542 | python-versions = "^3.11" 543 | content-hash = "49289b1d644701d8c724812b1bbff17c7aad17ad2ca1414c7987ef17f7a7a4e6" 544 | -------------------------------------------------------------------------------- /poetry.toml: -------------------------------------------------------------------------------- 1 | [virtualenvs] 2 | in-project = true 3 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "self-hosted" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["Jane Jeon "] 6 | readme = "README.md" 7 | 8 | [tool.poetry.dependencies] 9 | python = "^3.11" 10 | jinja2 = "^3.1.3" 11 | requests = "^2.32.2" 12 | python-dotenv = "^1.0.0" 13 | fake-useragent = "^1.4.0" 14 | 15 | [tool.poetry.group.dev.dependencies] 16 | black = "^24.3.0" 17 | isort = "^5.12.0" 18 | pytest = "^8.0.0" 19 | responses = "^0.25.0" 20 | pyfakefs = "^5.1.0" 21 | 22 | [build-system] 23 | requires = ["poetry-core"] 24 | build-backend = "poetry.core.masonry.api" 25 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | # We need to pre-define the markers that we want to use. 3 | markers = 4 | unit: mark a test as a unit test 5 | integration: mark a test as an integration test 6 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": ["github>JaneJeon/dev//packages/renovate-config/default.json"] 4 | } 5 | -------------------------------------------------------------------------------- /scripts/check-git.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -n "$(git status --porcelain --ignore-submodules=dirty)" ]; then 4 | echo "There are uncommitted changes in working tree after execution of the build" 5 | echo "Please run the build locally and commit changes" 6 | exit 1 7 | else 8 | echo "Git working tree is clean" 9 | fi 10 | -------------------------------------------------------------------------------- /scripts/render_all.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | from src.get_cloudflare_ips import get_cloudflare_ips 5 | from src.set_logging_defaults import set_logging_defaults 6 | from src.templating.get_env_context import get_env_context 7 | from src.templating.get_rendered_name import get_rendered_name 8 | from src.templating.get_template_paths import get_template_paths 9 | from src.templating.render_template import render_template 10 | 11 | set_logging_defaults() 12 | 13 | logger = logging.getLogger(__name__) 14 | 15 | 16 | def render_all(env_file: str = ".env", folder="services"): 17 | """ 18 | Function to actually render all templates within a folder (by default, only transforms services/). 19 | """ 20 | logger.info("Generating context...") 21 | context = {"env": get_env_context(env_file), "cloudflare_ips": get_cloudflare_ips()} 22 | 23 | logger.info("Searching directory %s for templates...", folder) 24 | template_paths = get_template_paths(folder) 25 | 26 | for template_path in template_paths: 27 | with open(template_path, "r") as template_file: 28 | logger.info("Reading template %s", template_path) 29 | template_str = template_file.read() 30 | 31 | logger.info("Rendering template %s", template_path) 32 | rendered_str = render_template(template_str, context) 33 | rendered_path = get_rendered_name(template_path) 34 | 35 | with open(rendered_path, "w") as rendered_file: 36 | logger.info("Writing rendered file %s", rendered_path) 37 | rendered_file.write(rendered_str) 38 | 39 | if not template_paths: 40 | logger.info("No templates found.") 41 | 42 | 43 | if __name__ == "__main__": 44 | # When calling it from CLI, just execute "as-is". 45 | folder = sys.argv[1] if len(sys.argv) > 1 else "services" 46 | 47 | render_all(folder=folder) 48 | -------------------------------------------------------------------------------- /scripts/render_all_test.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import os 3 | 4 | import pytest 5 | import responses 6 | 7 | from scripts.render_all import render_all 8 | from src.get_cloudflare_ips_mock import mock_cloudflare_ips 9 | 10 | 11 | @responses.activate 12 | @pytest.mark.usefixtures("fixtures_directory") 13 | def test_integration_execute(): 14 | """ 15 | An integration test, checking that the script renders all templates within a folder as expected. 16 | Note that this test is NOT safe to run concurrently with any other test that touches fixtures folder. 17 | """ 18 | 19 | FILE_TO_CHECK = "fixtures/templates/foo.yml" 20 | with open(FILE_TO_CHECK, "r") as file: 21 | EXPECTED_VALUE = file.read() 22 | 23 | # Before running the mock execute, delete the file so that we *know* the file gets generated by calling execute(). 24 | os.remove(FILE_TO_CHECK) 25 | 26 | mock_cloudflare_ips() 27 | 28 | # Run the render_all() like normal with two exceptions: 29 | # 1. Pass in a fixture .env file instead of our local, "production" .env file, 30 | # which has all sorts of secrets that we don't want to expose our tests (and the test artifacts) to! 31 | # 2. Instead of rendering down the contents of the services/ folder (which is generally what we want), 32 | # render the contents of the fixtures/ folder so we can actually test on it in a reproducible manner. 33 | render_all(env_file="fixtures/.env", folder="fixtures") 34 | 35 | # Check the contents of the rendered file to see that: 36 | # 1. the file was actually rendered (checking for presence of file), 37 | # 2. the file was rendered properly (checking for contents of file). 38 | 39 | with open(FILE_TO_CHECK, "r") as fixture_file: 40 | result = fixture_file.read() 41 | expected_result = inspect.cleandoc(EXPECTED_VALUE) 42 | 43 | assert result == expected_result 44 | -------------------------------------------------------------------------------- /scripts/restore-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # This script is to be run from host, with the appropriate secrets. 3 | # It assumes that no containers are running at the moment. 4 | # Usage: ./restore-all.sh [snapshotId] 5 | 6 | set -e 7 | 8 | # Clear out anything in volumes folder; otherwise, we get conflicts 9 | sudo rm -rf /var/lib/docker/volumes/* 10 | 11 | # First, find the snapshot to restore to: 12 | BACKUP_ID="${1:-latest}" 13 | 14 | # Then, pass that snapshot id to restore volumes/ directory 15 | # NOTE: we restore to the "root" directory of the backup container, which *seems* wrong; 16 | # except that since restic backs up the /docker/volumes directory, 17 | # the path at which the files will be restored at would be: 18 | # ${prefix}/docker/volumes/${files}. 19 | # So, we need the prefix to be the root. 20 | echo 'Restoring from backup...' 21 | make run SERVICE=backup COMMAND="restore --target / $BACKUP_ID" 22 | 23 | # Finally, get the databases to ingest the restored WALs 24 | # Note: wait-for apparently doesn't work, 127.0.0.1/0.0.0.0/localhost/etc 25 | echo 'Restoring database states...' 26 | make up SERVICE=mysql # we need to stand up mysql before we can tell it to read from the dump 27 | make run SERVICE=mysql COMMAND=restore 28 | make run SERVICE=redis COMMAND=restore # we do not need to stand up redis beforehand, as it simply reads from dump on startup 29 | -------------------------------------------------------------------------------- /scripts/wait-for: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # The MIT License (MIT) 4 | # 5 | # Copyright (c) 2017 Eficode Oy 6 | # 7 | # Permission is hereby granted, free of charge, to any person obtaining a copy 8 | # of this software and associated documentation files (the "Software"), to deal 9 | # in the Software without restriction, including without limitation the rights 10 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | # copies of the Software, and to permit persons to whom the Software is 12 | # furnished to do so, subject to the following conditions: 13 | # 14 | # The above copyright notice and this permission notice shall be included in all 15 | # copies or substantial portions of the Software. 16 | # 17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | # SOFTWARE. 24 | 25 | VERSION="2.2.4" 26 | 27 | set -- "$@" -- "$TIMEOUT" "$QUIET" "$PROTOCOL" "$HOST" "$PORT" "$result" 28 | TIMEOUT=15 29 | QUIET=0 30 | # The protocol to make the request with, either "tcp" or "http" 31 | PROTOCOL="tcp" 32 | 33 | echoerr() { 34 | if [ "$QUIET" -ne 1 ]; then printf "%s\n" "$*" 1>&2; fi 35 | } 36 | 37 | usage() { 38 | exitcode="$1" 39 | cat << USAGE >&2 40 | Usage: 41 | $0 host:port|url [-t timeout] [-- command args] 42 | -q | --quiet Do not output any status messages 43 | -t TIMEOUT | --timeout=timeout Timeout in seconds, zero for no timeout 44 | Defaults to 15 seconds 45 | -v | --version Show the version of this tool 46 | -- COMMAND ARGS Execute command with args after the test finishes 47 | USAGE 48 | exit "$exitcode" 49 | } 50 | 51 | wait_for() { 52 | case "$PROTOCOL" in 53 | tcp) 54 | if ! command -v nc >/dev/null; then 55 | echoerr 'nc command is missing!' 56 | exit 1 57 | fi 58 | ;; 59 | http) 60 | if ! command -v wget >/dev/null; then 61 | echoerr 'wget command is missing!' 62 | exit 1 63 | fi 64 | ;; 65 | esac 66 | 67 | TIMEOUT_END=$(($(date +%s) + TIMEOUT)) 68 | 69 | while :; do 70 | case "$PROTOCOL" in 71 | tcp) 72 | nc -w 1 -z "$HOST" "$PORT" > /dev/null 2>&1 73 | ;; 74 | http) 75 | wget --timeout=1 --tries=1 -q "$HOST" -O /dev/null > /dev/null 2>&1 76 | ;; 77 | *) 78 | echoerr "Unknown protocol '$PROTOCOL'" 79 | exit 1 80 | ;; 81 | esac 82 | 83 | result=$? 84 | 85 | if [ $result -eq 0 ] ; then 86 | if [ $# -gt 7 ] ; then 87 | for result in $(seq $(($# - 7))); do 88 | result=$1 89 | shift 90 | set -- "$@" "$result" 91 | done 92 | 93 | TIMEOUT=$2 QUIET=$3 PROTOCOL=$4 HOST=$5 PORT=$6 result=$7 94 | shift 7 95 | exec "$@" 96 | fi 97 | exit 0 98 | fi 99 | 100 | if [ $TIMEOUT -ne 0 -a $(date +%s) -ge $TIMEOUT_END ]; then 101 | echo "Operation timed out" >&2 102 | exit 1 103 | fi 104 | 105 | sleep 1 106 | done 107 | } 108 | 109 | while :; do 110 | case "$1" in 111 | http://*|https://*) 112 | HOST="$1" 113 | PROTOCOL="http" 114 | shift 1 115 | ;; 116 | *:* ) 117 | HOST=$(printf "%s\n" "$1"| cut -d : -f 1) 118 | PORT=$(printf "%s\n" "$1"| cut -d : -f 2) 119 | shift 1 120 | ;; 121 | -v | --version) 122 | echo $VERSION 123 | exit 124 | ;; 125 | -q | --quiet) 126 | QUIET=1 127 | shift 1 128 | ;; 129 | -q-*) 130 | QUIET=0 131 | echoerr "Unknown option: $1" 132 | usage 1 133 | ;; 134 | -q*) 135 | QUIET=1 136 | result=$1 137 | shift 1 138 | set -- -"${result#-q}" "$@" 139 | ;; 140 | -t | --timeout) 141 | TIMEOUT="$2" 142 | shift 2 143 | ;; 144 | -t*) 145 | TIMEOUT="${1#-t}" 146 | shift 1 147 | ;; 148 | --timeout=*) 149 | TIMEOUT="${1#*=}" 150 | shift 1 151 | ;; 152 | --) 153 | shift 154 | break 155 | ;; 156 | --help) 157 | usage 0 158 | ;; 159 | -*) 160 | QUIET=0 161 | echoerr "Unknown option: $1" 162 | usage 1 163 | ;; 164 | *) 165 | QUIET=0 166 | echoerr "Unknown argument: $1" 167 | usage 1 168 | ;; 169 | esac 170 | done 171 | 172 | if ! [ "$TIMEOUT" -ge 0 ] 2>/dev/null; then 173 | echoerr "Error: invalid timeout '$TIMEOUT'" 174 | usage 3 175 | fi 176 | 177 | case "$PROTOCOL" in 178 | tcp) 179 | if [ "$HOST" = "" ] || [ "$PORT" = "" ]; then 180 | echoerr "Error: you need to provide a host and port to test." 181 | usage 2 182 | fi 183 | ;; 184 | http) 185 | if [ "$HOST" = "" ]; then 186 | echoerr "Error: you need to provide a host to test." 187 | usage 2 188 | fi 189 | ;; 190 | esac 191 | 192 | wait_for "$@" 193 | -------------------------------------------------------------------------------- /services/adblock/README.md: -------------------------------------------------------------------------------- 1 | I know "adblock DNS"s exist, but set up my own to have greater control over the traffic, what gets blocked, clients, etc. 2 | 3 | The current setup uses DNS-over-HTTPS, and apparently that means: 4 | 5 | - AGH won't actually listen to the HTTPS port (443) 6 | - and instead it only listens to 3000 (web) and 53 (default DNS) 7 | - and the DoH is actually served on the same "site" as the web dashboard, except with the `/dns-query` at the end! 8 | 9 | Thus, both the web dashboard and the DoH is being served at the adguard subdomain... 10 | -------------------------------------------------------------------------------- /services/adblock/docker-compose.adguard.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | adguardhome: 5 | image: adguard/adguardhome:v0.107.55 6 | container_name: adguardhome 7 | restart: unless-stopped 8 | healthcheck: 9 | test: wget --no-verbose --tries=1 --spider http://localhost:3000 || exit 1 10 | interval: 15s 11 | timeout: 5s 12 | retries: 3 13 | start_period: 30s 14 | networks: 15 | - public 16 | volumes: 17 | - adguard-work:/opt/adguardhome/work 18 | - adguard-conf:/opt/adguardhome/conf 19 | labels: 20 | traefik.enable: 'true' 21 | traefik.http.routers.adguard.tls.certresolver: letsencrypt 22 | traefik.http.routers.adguard.rule: Host(`adguard.${BASE_HOST}`) 23 | traefik.http.routers.adguard.middlewares: sso-proxy@file 24 | traefik.http.services.adguard.loadbalancer.server.port: 3000 25 | # A separate endpoint for DNS-over-HTTPS that doesn't have to go through Authelia (which is prone to rejecting requests) 26 | traefik.http.routers.adguard-doh.tls.certresolver: letsencrypt 27 | traefik.http.routers.adguard-doh.rule: Host(`dns.${BASE_HOST}`) && PathPrefix(`/dns-query`) 28 | traefik.http.routers.adguard-doh.service: adguard 29 | 30 | networks: 31 | public: {} 32 | 33 | volumes: 34 | adguard-work: {} 35 | adguard-conf: {} 36 | -------------------------------------------------------------------------------- /services/backup/README.md: -------------------------------------------------------------------------------- 1 | Restic to backup stateful docker volumes 2 | 3 | Backup strategies: 4 | 5 | - for stateful, non-database containers, just backup the docker volume 6 | - for the DB's (MySQL/Redis/Prometheus/Elasticsearch) where things aren't necessarily guaranteed to flush to disk, snapshot + restore, _then_ backup that snapshot with restic. 7 | 8 | Restoring process: 9 | 10 | - for stateful, non-database containers, restore directly from restic/bivac 11 | - for the DB's, restore the snapshot into local docker volume, _then_ restore from local volume to their respective DB's using their built-in restore solution. 12 | -------------------------------------------------------------------------------- /services/backup/docker-compose.restic.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | backup: 5 | image: mazzolino/restic:1.7.2 6 | container_name: restic-backup 7 | restart: unless-stopped 8 | networks: 9 | - private 10 | hostname: docker 11 | volumes: 12 | - ./services/backup/excludes.txt:/etc/restic/excludes.txt 13 | - ./scripts/wait-for:/usr/local/bin/wait-for:ro 14 | - /var/lib/docker/volumes:/docker/volumes 15 | - /var/run/docker.sock:/var/run/docker.sock 16 | environment: 17 | RUN_ON_STARTUP: 'true' 18 | BACKUP_CRON: '0 30 3 * * *' 19 | RESTIC_REPOSITORY: b2:${B2_BUCKET} 20 | RESTIC_PASSWORD: ${RESTIC_PASSWORD} 21 | RESTIC_BACKUP_SOURCES: /docker/volumes 22 | RESTIC_BACKUP_ARGS: >- 23 | --tag docker-volumes 24 | --exclude-file=/etc/restic/excludes.txt 25 | --verbose 26 | RESTIC_FORGET_ARGS: >- 27 | --keep-last 10 28 | --keep-daily 7 29 | --keep-weekly 5 30 | --keep-monthly 12 31 | B2_ACCOUNT_ID: ${B2_APPLICATION_KEY_ID} 32 | B2_ACCOUNT_KEY: ${B2_APPLICATION_KEY} 33 | TZ: ${TIMEZONE} 34 | # global context, can run anywhere within docker network 35 | # TODO: instead of using "healthchecks at home", just bake in the healthchecks as commands 36 | # within the respective docker containers, and call them. 37 | # e.g. timeout 1 bash -c 'while :; do echo check; sleep 0.1; done ' && echo "Do stuff" || echo 'Boo' 38 | PRE_COMMANDS: |- 39 | wait-for redis:6379 --timeout=60 -- docker exec redis dump 40 | wait-for mysql:3306 --timeout=60 -- docker exec mysql dump 41 | docker exec uptime-kuma sh -c 'rm -f data/*.db.bak*' 42 | POST_COMMANDS_SUCCESS: |- 43 | curl -sS 'http://uptime-kuma:3001/api/push/vdP9McS4Pm?status=up&msg=OK&ping=' 44 | POST_COMMANDS_FAILURE: 'curl -X POST -d "text=backup failed" http://apprise:8000/notify/alerts' 45 | 46 | prune: 47 | image: mazzolino/restic:1.7.2 48 | container_name: restic-prune 49 | restart: unless-stopped 50 | hostname: docker 51 | environment: 52 | RUN_ON_STARTUP: 'false' 53 | PRUNE_CRON: '0 0 4 * * *' 54 | RESTIC_REPOSITORY: b2:${B2_BUCKET} 55 | RESTIC_PASSWORD: ${RESTIC_PASSWORD} 56 | B2_ACCOUNT_ID: ${B2_APPLICATION_KEY_ID} 57 | B2_ACCOUNT_KEY: ${B2_APPLICATION_KEY} 58 | TZ: ${TIMEZONE} 59 | 60 | check: 61 | image: mazzolino/restic:1.7.2 62 | container_name: restic-check 63 | restart: unless-stopped 64 | hostname: docker 65 | environment: 66 | RUN_ON_STARTUP: 'false' 67 | CHECK_CRON: '0 30 4 * * *' 68 | RESTIC_CHECK_ARGS: >- 69 | --read-data-subset=10% 70 | RESTIC_REPOSITORY: b2:${B2_BUCKET} 71 | RESTIC_PASSWORD: ${RESTIC_PASSWORD} 72 | B2_ACCOUNT_ID: ${B2_APPLICATION_KEY_ID} 73 | B2_ACCOUNT_KEY: ${B2_APPLICATION_KEY} 74 | TZ: ${TIMEZONE} 75 | 76 | networks: 77 | private: {} 78 | -------------------------------------------------------------------------------- /services/backup/excludes.txt.j2: -------------------------------------------------------------------------------- 1 | # List of files that restic should ignore 2 | *.tmp 3 | 4 | # Ignore mysql data container; we want to restore from dump. 5 | /docker/volumes/{{ env.COMPOSE_PROJECT_NAME }}_mysql-data 6 | -------------------------------------------------------------------------------- /services/blog/README.md: -------------------------------------------------------------------------------- 1 | Ghost CMS for teh blog 2 | 3 | Use https://github.com/zaxbux/ghost-storage-b2 for storage? (fronted by CloudFlare), but be sure to rewrite the public URL! Or just keep using cloudinary? 4 | 5 | https://ghost.org/docs/search Need to add search 6 | 7 | Note that we need special caching - cloudflare doesn't "cache everything" by default, that's something you need to turn on, not to mention the fact that you need to disable /ghost endpoint from being included!! 8 | 9 | Can't protect it using Authelia because of domain... 10 | -------------------------------------------------------------------------------- /services/blog/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "url": "http://localhost:2368", 3 | "server": { 4 | "port": 2368, 5 | "host": "0.0.0.0" 6 | }, 7 | "database": { 8 | "client": "mysql", 9 | "connection": { 10 | "host": "mysql" 11 | }, 12 | "pool": { 13 | "min": 0, 14 | "max": 2 15 | } 16 | }, 17 | "mail": { 18 | "transport": "SMTP", 19 | "options": { 20 | "service": "FastMail" 21 | } 22 | }, 23 | "logging": { 24 | "transports": ["stdout"] 25 | }, 26 | "paths": { 27 | "contentPath": "/var/lib/ghost/content" 28 | }, 29 | "compress": false, 30 | "caching": { 31 | "frontend": { 32 | "maxAge": 10800 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /services/blog/docker-compose.ghost.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | devblog: 5 | image: ghost:5.105.0-alpine 6 | container_name: ghost-devblog 7 | restart: unless-stopped 8 | depends_on: 9 | mysql: 10 | condition: service_healthy 11 | healthcheck: 12 | # https://github.com/TryGhost/Ghost/issues/11181 13 | test: "wget --no-verbose --tries=1 --spider --header='X-Forwarded-Proto: https' http://localhost:2368/ghost/api/admin/site/ || exit 1" 14 | interval: 15s 15 | timeout: 5s 16 | retries: 3 17 | start_period: 30s 18 | networks: 19 | - public 20 | - private 21 | volumes: 22 | - ghost-devblog-content:/var/lib/ghost/content 23 | - ./services/blog/config.json:/var/lib/ghost/config.production.json:ro 24 | environment: 25 | url: https://${BLOG_HOST} 26 | database__connection__user: ghost-devblog 27 | database__connection__password: ghost-devblog-password 28 | database__connection__database: devblog 29 | mail__from: ${SMTP_SENDER} 30 | mail__options__auth__user: ${SMTP_USER} 31 | mail__options__auth__pass: ${SMTP_PASS} 32 | labels: 33 | traefik.enable: 'true' 34 | traefik.http.routers.devblog.tls.certresolver: letsencrypt 35 | traefik.http.routers.devblog.rule: Host(`${BLOG_HOST}`) 36 | traefik.http.routers.devblog.middlewares: ratelimit-burst@file,ratelimit-window@file 37 | traefik.http.services.devblog.loadbalancer.server.port: 2368 38 | 39 | networks: 40 | public: {} 41 | private: {} 42 | 43 | volumes: 44 | ghost-devblog-content: {} 45 | -------------------------------------------------------------------------------- /services/cache/README.md: -------------------------------------------------------------------------------- 1 | Session store, cache, in-memory queue, etc. 2 | -------------------------------------------------------------------------------- /services/cache/docker-compose.redis.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | redis: 5 | image: redis:7.4.1-alpine 6 | container_name: redis 7 | restart: unless-stopped 8 | healthcheck: 9 | # https://stackoverflow.com/a/71504657 10 | test: redis-cli ping | grep PONG || exit 1 11 | interval: 15s 12 | timeout: 5s 13 | retries: 3 14 | start_period: 30s 15 | networks: 16 | - private 17 | volumes: 18 | - redis-dump:/data # default redis container dump location - check using "config get dir" 19 | - ./services/cache/dump.sh:/usr/local/bin/dump:ro 20 | - ./services/cache/restore.sh:/usr/local/bin/restore:ro 21 | 22 | networks: 23 | private: {} 24 | 25 | volumes: 26 | redis-dump: {} 27 | -------------------------------------------------------------------------------- /services/cache/dump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo 'Dumping MySQL database...' 4 | redis-cli save 5 | -------------------------------------------------------------------------------- /services/cache/restore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo 'Restoring Redis database...' 4 | # no-op: redis will automatically pick up the dump file when restarting 5 | -------------------------------------------------------------------------------- /services/change-detection/README.md: -------------------------------------------------------------------------------- 1 | changedetection notifies me of changes in websites 2 | -------------------------------------------------------------------------------- /services/change-detection/docker-compose.change-detection.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | change-detection: 5 | image: ghcr.io/dgtlmoon/changedetection.io:0.48.05 6 | container_name: change-detection 7 | restart: unless-stopped 8 | networks: 9 | - public 10 | volumes: 11 | - change-detection-datastore:/datastore 12 | environment: 13 | BASE_URL: https://change-detection.${BASE_HOST} 14 | labels: 15 | traefik.enable: 'true' 16 | traefik.http.routers.change-detection.tls.certresolver: letsencrypt 17 | traefik.http.routers.change-detection.rule: Host(`change-detection.${BASE_HOST}`) 18 | traefik.http.routers.change-detection.middlewares: sso-proxy@file 19 | traefik.http.services.change-detection.loadbalancer.server.port: 5000 20 | 21 | networks: 22 | public: {} 23 | 24 | volumes: 25 | change-detection-datastore: {} 26 | -------------------------------------------------------------------------------- /services/checkin/README.md: -------------------------------------------------------------------------------- 1 | # Hoyolab auto-checkin 2 | 3 | Original: https://github.com/torikushiii/hoyolab-auto/tree/main 4 | 5 | Container repo: https://github.com/JaneJeon/hoyolab-auto 6 | 7 | Uses telegram notifications, checkin and stamina only (for HSR and ZZZ). 8 | -------------------------------------------------------------------------------- /services/checkin/config.json5.j2: -------------------------------------------------------------------------------- 1 | { 2 | loglevel: 'info', 3 | userAgent: 'Custom fork of HoyoLabAuto: github.com/torikushiii/hoyolab-auto', 4 | platforms: [ 5 | { 6 | id: 1, 7 | active: false, 8 | type: 'discord', // This platform is optional, only use this if you have want to access commands via Discord bot 9 | botId: '123', // Your Discord bot ID 10 | token: '(YOUR_DISCORD_BOT_TOKEN)', 11 | }, 12 | { 13 | id: 2, 14 | active: true, // Set to true if you want to enable Telegram bot 15 | type: 'telegram', 16 | chatId: {{ env.TELEGRAM_CHAT_ID }}, // You can follow this guide to create a bot: https://github.com/torikushiii/hoyolab-auto/blob/main/setup/TELEGRAM.md 17 | token: '{{ env.TELEGRAM_NOTIFICATIONS_BOT_TOKEN }}', 18 | disableNotification: false, // Set to true if you want to disable notification for Telegram bot (sounds, vibration, etc.) 19 | }, 20 | { 21 | id: 3, 22 | active: false, // Set to true if you want to send notification to Discord webhook 23 | type: 'webhook', 24 | url: '(YOUR_WEBHOOK_URL)', // You can follow this guide to create a webhook: https://github.com/torikushiii/hoyolab-auto/blob/main/setup/DISCORD_WEBHOOK.md 25 | }, 26 | ], 27 | crons: { 28 | whitelist: [], // You can whitelist some crons if you only want to run specific crons 29 | blacklist: [], // You can blacklist some crons if you don't want to run them 30 | // You can use this site to generate cron expression: https://crontab.guru/ 31 | checkIn: '0 20 * * *', 32 | codeRedeem: '*/15 * * * *', 33 | expedition: '0 */30 * * * *', 34 | missedCheckIn: '0 0 23 * * *', 35 | realmCurrency: '0 */1 * * *', 36 | shopStatus: '0 */1 * * *', 37 | stamina: '0 */30 * * * *' 38 | }, 39 | accounts: [ 40 | { 41 | id: 1, 42 | active: false, // Set to true if you want to enable Honkai Impact 3rd 43 | type: 'honkai', 44 | data: [ 45 | { 46 | cookie: '', 47 | }, 48 | ], 49 | }, 50 | { 51 | id: 2, 52 | active: false, // Set to true if you want to enable Tears of Themis 53 | type: 'termis', 54 | data: [ 55 | { 56 | cookie: '' 57 | } 58 | ] 59 | }, 60 | { 61 | id: 3, 62 | active: false, // Set to true if you want to enable Genshin Impact 63 | type: 'genshin', 64 | data: [ 65 | { 66 | cookie: '', 67 | // Enable this if you want to automatically redeem codes 68 | // Please note that if you have one account with characters in multiple servers 69 | // the code will be redeemed but you won't get the reward 70 | // so please be careful when enabling this, and only enable it on the account that you want to redeem the code 71 | redeemCode: false, 72 | dailiesCheck: true, // Enable this if you want to get reminded to do your daily commissions 73 | weekliesCheck: true, // Enable this if you want to get reminded to do your weeklies 74 | realm: { 75 | check: false, 76 | persistent: false, 77 | }, 78 | stamina: { 79 | check: false, // Enable this if you want to get notified when your stamina is above the threshold 80 | threshold: 150, // Your stamina threshold, only fires notification when your stamina is above this value 81 | persistent: false, // Set to true if you want to get notified every time your stamina is above the threshold 82 | }, 83 | expedition: { 84 | check: false, // Enable this if you want to get notified when your expedition is done 85 | persistent: false, // Set to true if you want to get notified every time your expedition is done 86 | }, 87 | discord: { 88 | // Your Discord user ID to ping you when to do your dailies/weeklies 89 | // or when your stamina is above the threshold and your expedition is done 90 | // this is optional, you can leave it as null if you don't want to get pinged 91 | // e.g userId: '123456789' 92 | userId: null, 93 | } 94 | }, 95 | ], 96 | }, 97 | { 98 | id: 4, 99 | active: true, // Set to true if you want to enable Honkai: Star Rail 100 | type: 'starrail', 101 | data: [ 102 | // If you have same account for both genshin and starrail 103 | // You can copy the cookie values from the genshin account 104 | // Same goes for any other game that will be supported in the future 105 | // Support multi-account for every type of game 106 | // Just add another object inside the data array 107 | // Account #1 108 | { 109 | cookie: '{{ env.HOYOLAB_COOKIE }}', 110 | redeemCode: false, 111 | dailiesCheck: false, 112 | weekliesCheck: false, 113 | stamina: { 114 | check: true, 115 | threshold: 230, 116 | persistent: true, 117 | }, 118 | expedition: { 119 | check: true, 120 | persistent: false, 121 | }, 122 | discord: { 123 | // Your Discord user ID to ping you when to do your dailies/weeklies 124 | // or when your stamina is above the threshold and your expedition is done 125 | // this is optional, you can leave it as null if you don't want to get pinged 126 | // e.g userId: '123456789' 127 | userId: null, 128 | } 129 | }, 130 | ], 131 | }, 132 | { 133 | id: 5, 134 | active: true, 135 | type: 'zenless', 136 | data: [ 137 | { 138 | cookie: '{{ env.HOYOLAB_COOKIE }}', 139 | redeemCode: false, 140 | shopStatus: true, // This will check if your shop has finished selling videos 141 | dailiesCheck: false, 142 | stamina: { 143 | check: true, 144 | threshold: 200, 145 | persistent: true, 146 | }, 147 | discord: { 148 | // Your Discord user ID to ping you when to do your dailies/weeklies 149 | // or when your stamina is above the threshold and your expedition is done 150 | // this is optional, you can leave it as null if you don't want to get pinged 151 | // e.g userId: '123456789' 152 | userId: null, 153 | } 154 | }, 155 | ], 156 | }, 157 | ], 158 | } -------------------------------------------------------------------------------- /services/checkin/docker-compose.hoyolab-auto.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | hoyolab-auto: 5 | image: ghcr.io/janejeon/hoyolab-auto:20250121 6 | container_name: hoyolab-auto 7 | restart: unless-stopped 8 | volumes: 9 | - ./services/checkin/config.json5:/app/config.json5:ro 10 | environment: 11 | TZ: Europe/London 12 | -------------------------------------------------------------------------------- /services/database/README.md: -------------------------------------------------------------------------------- 1 | MySQL instead of Postgres for easier operating experience (e.g. no need to "migrate" databases for updates) 2 | 3 | Used by: 4 | 5 | - analytics (matomo) 6 | - bitwarden 7 | - blink (TODO mysql support) 8 | - blog (ghost): mysql/mariadb/sqlite3 only 9 | - grafana 10 | - nextcloud 11 | - photoprism: mysql/mariadb/sqlite3 only 12 | - sso (keycloak) 13 | - wiki (documize) 14 | 15 | Ensure the database character set is set to utf8mb4 and collation is set to utf8mb4_bin!! 16 | -------------------------------------------------------------------------------- /services/database/docker-compose.mysql.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | mysql: 5 | image: mysql:8.4.3 6 | container_name: mysql 7 | restart: unless-stopped 8 | healthcheck: 9 | # https://stackoverflow.com/a/51641089 10 | test: mysql --user=root --password=mysql-root-password --execute 'SHOW DATABASES;' 11 | interval: 15s 12 | timeout: 5s 13 | retries: 3 14 | start_period: 30s 15 | networks: 16 | - private 17 | volumes: 18 | - mysql-data:/var/lib/mysql 19 | - mysql-dump:/mnt/dump 20 | - ./services/database/init:/docker-entrypoint-initdb.d:ro 21 | - ./services/database/my.cnf:/etc/mysql/conf.d/my.cnf:ro 22 | - ./services/database/dump.sh:/usr/local/bin/dump:ro 23 | - ./services/database/restore.sh:/usr/local/bin/restore:ro 24 | environment: 25 | MYSQL_ROOT_PASSWORD: mysql-root-password 26 | cap_add: 27 | - SYS_NICE # https://stackoverflow.com/questions/55559386/how-to-fix-mbind-operation-not-permitted-in-mysql-error-log 28 | 29 | networks: 30 | private: {} 31 | 32 | volumes: 33 | mysql-dump: {} 34 | mysql-data: {} 35 | -------------------------------------------------------------------------------- /services/database/dump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo 'Dumping MySQL database...' 4 | mysqldump -h mysql -u root --password=mysql-root-password --opt --verbose --all-databases > /mnt/dump/all.sql 5 | -------------------------------------------------------------------------------- /services/database/init/01-janejeon.dev.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE IF NOT EXISTS devblog; 2 | 3 | CREATE USER IF NOT EXISTS 'ghost-devblog'@'%' IDENTIFIED WITH caching_sha2_password BY 'ghost-devblog-password'; 4 | GRANT ALL PRIVILEGES ON devblog.* TO 'ghost-devblog'@'%'; 5 | -------------------------------------------------------------------------------- /services/database/init/02-authelia.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE IF NOT EXISTS auth_db; 2 | 3 | CREATE USER IF NOT EXISTS 'authelia'@'%' IDENTIFIED WITH caching_sha2_password BY 'authelia-password'; 4 | GRANT ALL PRIVILEGES ON auth_db.* TO 'authelia'@'%'; 5 | -------------------------------------------------------------------------------- /services/database/init/03-uptime-kuma.sql: -------------------------------------------------------------------------------- 1 | -- this user only exists to check that mysql is running 2 | CREATE USER IF NOT EXISTS 'uptime-kuma'@'%' IDENTIFIED WITH caching_sha2_password BY 'uptime-kuma-password'; 3 | -------------------------------------------------------------------------------- /services/database/my.cnf: -------------------------------------------------------------------------------- 1 | # https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html 2 | # https://dev.mysql.com/doc/refman/8.0/en/innodb-parameters.html 3 | 4 | [mysqld] 5 | performance_schema = off # maybe I can turn this back on once I have a clearer breakdown of the memory usage across the stack; 6 | # however, as is, turning off performance schema reduced my memory from 450M to 180M, so... 7 | innodb_buffer_pool_size = 20M 8 | max_connections = 20 9 | 10 | innodb_redo_log_capacity = 10485760 # limit size to 10MB 11 | -------------------------------------------------------------------------------- /services/database/restore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo 'Restoring MySQL database...' 4 | mysql -h mysql -u root --password=mysql-root-password < /mnt/dump/all.sql 5 | -------------------------------------------------------------------------------- /services/debug/README.md: -------------------------------------------------------------------------------- 1 | Services used for debugging (live). 2 | -------------------------------------------------------------------------------- /services/debug/docker-compose.whoami.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | whoami: 5 | image: traefik/whoami:v1.10.3 6 | container_name: whoami 7 | restart: unless-stopped 8 | networks: 9 | - public 10 | labels: 11 | traefik.enable: 'true' 12 | traefik.http.routers.whoami.tls.certresolver: letsencrypt 13 | traefik.http.routers.whoami.rule: Host(`whoami.${BASE_HOST}`) 14 | # traefik.http.routers.whoami.middlewares: sso-proxy@file 15 | traefik.http.services.whoami.loadbalancer.server.port: 80 16 | 17 | networks: 18 | public: {} 19 | -------------------------------------------------------------------------------- /services/docker-gc/README.md: -------------------------------------------------------------------------------- 1 | Run routine Docker cleanup jobs w/ https://github.com/Yelp/docker-custodian 2 | -------------------------------------------------------------------------------- /services/docker-gc/docker-compose.gc.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | gc: 5 | image: yelp/docker-custodian:0.7.4 6 | container_name: gc 7 | command: 8 | - --max-container-age 9 | - 7days 10 | - --max-image-age 11 | - 30days 12 | - --dangling-volumes 13 | - --exclude-container-label 14 | - com.docker.keep-container=true 15 | networks: 16 | - private 17 | volumes: 18 | - /var/run/docker.sock:/var/run/docker.sock 19 | labels: 20 | com.docker.keep-container: 'true' 21 | 22 | networks: 23 | private: {} 24 | -------------------------------------------------------------------------------- /services/docker-management/README.md: -------------------------------------------------------------------------------- 1 | Portainer for managing containers on docker-compose 2 | 3 | ## LDAP Authentication 4 | 5 | You can set up authentication with LDAP, so that users can authenticate with their LDAP credentials instead. You can follow the instructions here: https://github.com/nitnelave/lldap/blob/98acd68f060562f41a829e0e659a25029823069c/example_configs/portainer.md 6 | 7 | A couple of notes: 8 | 9 | - since the LLDAP instance is running on a different docker container, refer to it by its docker name (i.e. lldap:3890) 10 | - the `dc=example,dc=com` should be replaced by the `ldap_base_dn` configuration in LLDAP (see config.toml) 11 | - for the strangest reason, you're supposed to authenticate with the "base" LLDAP password (i.e. `LLDAP_LDAP_USER_PASS`) and not with any particular user's password 12 | 13 | For best practices, limit the users that can authenticate into Portainer into a group (in LLDAP), and then let Portainer auto-create users of that group when they authenticate with their LDAP credentials (note that the group must already exist in Portainer for this to happen). 14 | 15 | However, note that even with this, LDAP authentication with Portainer can be a bit of a pain in the ass, since in order to manage a pre-existing docker compose stack (as we are here), you need admin access, and the auto-generated user (at least in community edition) only has "base" permissions, meaning someone else must elevate that user (manually) to admin access to get access to the docker compose stack. 16 | -------------------------------------------------------------------------------- /services/docker-management/docker-compose.portainer.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | portainer: 5 | image: portainer/portainer-ce:2.25.1-alpine 6 | container_name: portainer 7 | restart: unless-stopped 8 | healthcheck: 9 | # https://github.com/portainer/portainer/issues/3572 10 | test: wget --no-verbose --tries=1 --spider http://localhost:9000 || exit 1 11 | interval: 15s 12 | timeout: 5s 13 | retries: 3 14 | start_period: 30s 15 | networks: 16 | - public 17 | volumes: 18 | - portainer-data:/data 19 | - /etc/localtime:/etc/localtime:ro 20 | - /var/run/docker.sock:/var/run/docker.sock 21 | labels: 22 | traefik.enable: 'true' 23 | traefik.http.routers.portainer.tls.certresolver: letsencrypt 24 | traefik.http.routers.portainer.rule: Host(`portainer.${BASE_HOST}`) 25 | traefik.http.routers.portainer.middlewares: sso-proxy@file 26 | traefik.http.services.portainer.loadbalancer.server.port: 9000 27 | 28 | networks: 29 | public: {} 30 | 31 | volumes: 32 | portainer-data: {} 33 | -------------------------------------------------------------------------------- /services/monitoring/README.md: -------------------------------------------------------------------------------- 1 | - logging: dozzle (for now) 2 | - monitoring: promstack 3 | - tracing: jaeger 4 | -------------------------------------------------------------------------------- /services/monitoring/docker-compose.dozzle.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | # Portainer fucking SUCKS, and is broken in so many ways. 5 | # While Dozzle isn't made for docker *administration*, per se, 6 | # it still allows me to view the various docker containers and logs 7 | # (in a far more ergonomic manner, mind you); 8 | # so for now, it will live alongside portainer. 9 | dozzle: 10 | container_name: dozzle 11 | image: amir20/dozzle:v8.9.1 12 | restart: unless-stopped 13 | healthcheck: 14 | test: ['CMD', '/dozzle', 'healthcheck'] 15 | interval: 15s 16 | timeout: 5s 17 | retries: 3 18 | start_period: 30s 19 | networks: 20 | - public 21 | environment: 22 | DOZZLE_NO_ANALYTICS: 'true' 23 | volumes: 24 | - /var/run/docker.sock:/var/run/docker.sock:ro 25 | labels: 26 | traefik.enable: 'true' 27 | traefik.http.routers.dozzle.tls.certresolver: letsencrypt 28 | traefik.http.routers.dozzle.rule: Host(`dozzle.${BASE_HOST}`) 29 | traefik.http.routers.dozzle.middlewares: sso-proxy@file 30 | traefik.http.services.dozzle.loadbalancer.server.port: 8080 31 | 32 | networks: 33 | public: {} 34 | -------------------------------------------------------------------------------- /services/notification/README.md: -------------------------------------------------------------------------------- 1 | Apprise notification provider 2 | -------------------------------------------------------------------------------- /services/notification/alerts.cfg.j2: -------------------------------------------------------------------------------- 1 | tgram://${{ env.TELEGRAM_ALERTS_BOT_TOKEN }}/{{ env.TELEGRAM_CHAT_ID }} -------------------------------------------------------------------------------- /services/notification/docker-compose.apprise.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | apprise: 5 | image: caronc/apprise:1.1 6 | container_name: apprise 7 | restart: unless-stopped 8 | healthcheck: 9 | test: curl -f http://localhost:8000 || exit 1 10 | interval: 15s 11 | timeout: 5s 12 | retries: 5 13 | start_period: 30s 14 | networks: 15 | - public 16 | - private 17 | volumes: 18 | - ./services/notification/alerts.cfg:/config/alerts.cfg 19 | - ./services/notification/notifications.cfg:/config/notifications.cfg 20 | labels: 21 | traefik.enable: 'true' 22 | traefik.http.routers.apprise.tls.certresolver: letsencrypt 23 | traefik.http.routers.apprise.rule: Host(`apprise.${BASE_HOST}`) 24 | traefik.http.routers.apprise.middlewares: sso-proxy@file 25 | traefik.http.services.apprise.loadbalancer.server.port: 8000 26 | environment: 27 | APPRISE_WORKER_COUNT: 1 28 | APPRISE_CONFIG_LOCK: 1 29 | APPRISE_STATEFUL_MODE: simple 30 | 31 | networks: 32 | public: {} 33 | private: {} 34 | -------------------------------------------------------------------------------- /services/notification/notifications.cfg.j2: -------------------------------------------------------------------------------- 1 | tgram://${{ env.TELEGRAM_NOTIFICATIONS_BOT_TOKEN }}/{{ env.TELEGRAM_CHAT_ID }} -------------------------------------------------------------------------------- /services/reverse-proxy/README.md: -------------------------------------------------------------------------------- 1 | Might have to move to nginx given the number of PHP applications... 2 | -------------------------------------------------------------------------------- /services/reverse-proxy/docker-compose.traefik.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | traefik: 5 | image: traefik:v3.2.3 6 | container_name: traefik 7 | restart: unless-stopped 8 | healthcheck: 9 | # Not sure why I need to enable --ping here again 10 | test: traefik healthcheck --ping 11 | interval: 15s 12 | timeout: 5s 13 | retries: 3 14 | start_period: 30s 15 | command: 16 | - --configFile=/etc/traefik/static.yml 17 | networks: 18 | - public 19 | ports: 20 | - 80:80 21 | - 443:443 22 | volumes: 23 | - traefik-data:/mnt/traefik 24 | - /var/run/docker.sock:/var/run/docker.sock 25 | - ./services/reverse-proxy/traefik.static.yml:/etc/traefik/static.yml:ro 26 | - ./services/reverse-proxy/traefik.dynamic.yml:/etc/traefik/dynamic.yml:ro 27 | environment: 28 | CF_DNS_API_TOKEN: ${CF_DNS_API_TOKEN} 29 | CF_ZONE_API_TOKEN: ${CF_ZONE_API_TOKEN} 30 | labels: 31 | traefik.enable: 'true' 32 | traefik.http.routers.traefik.tls.certresolver: letsencrypt 33 | traefik.http.routers.traefik.rule: Host(`traefik.${BASE_HOST}`) 34 | traefik.http.routers.traefik.middlewares: sso-proxy@file 35 | traefik.http.services.api@internal.loadbalancer.server.port: 8080 36 | 37 | networks: 38 | public: {} 39 | 40 | volumes: 41 | traefik-data: {} 42 | -------------------------------------------------------------------------------- /services/reverse-proxy/traefik.dynamic.yml.j2: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | gzip: 4 | compress: {} 5 | # Allow connections only from Cloudflare, so people can't just bypass it. 6 | # https://doc.traefik.io/traefik/middlewares/http/ipwhitelist/#ipwhitelist 7 | whitelist-cloudflare: 8 | ipAllowList: 9 | sourceRange: {{ cloudflare_ips | tojson }} 10 | # A rate limiter with a shorter window to prevent extreme bursts (e.g. spamming) 11 | ratelimit-burst: 12 | rateLimit: 13 | average: 5 14 | burst: 5 15 | period: 1 16 | sourceCriterion: &ipStrategy 17 | ipStrategy: 18 | depth: 1 # since we can always trust XFF (non-CF traffic is blocked), we can trust the rightmost entry in the XFF 19 | # A rate limiter with a longer window to protect against resource abuse 20 | ratelimit-window: 21 | rateLimit: 22 | average: 60 23 | burst: 60 24 | period: 1m 25 | sourceCriterion: *ipStrategy 26 | sso-proxy: 27 | forwardAuth: 28 | address: http://authelia:9091/api/verify?rd=https://auth.{{ env.BASE_HOST }} 29 | trustForwardHeader: true 30 | authResponseHeaders: 31 | - Remote-User 32 | - Remote-Groups 33 | - Remote-Name 34 | - Remote-Email 35 | -------------------------------------------------------------------------------- /services/reverse-proxy/traefik.static.yml.j2: -------------------------------------------------------------------------------- 1 | # static configuration 2 | # see: https://doc.traefik.io/traefik/reference/static-configuration/file/ 3 | global: 4 | checkNewVersion: false 5 | sendAnonymousUsage: false 6 | 7 | log: 8 | level: INFO 9 | format: common 10 | 11 | accessLog: 12 | format: common 13 | 14 | api: 15 | dashboard: true 16 | insecure: true 17 | 18 | # Note that this will *always* be internal, due to port 8080 not being exposed. 19 | ping: {} 20 | 21 | entryPoints: 22 | http: 23 | address: :80 24 | http: 25 | redirections: 26 | entryPoint: 27 | to: https 28 | scheme: https 29 | permanent: true 30 | 31 | https: 32 | address: :443 33 | asDefault: true 34 | http: 35 | middlewares: 36 | - gzip@file 37 | - whitelist-cloudflare@file 38 | # Trust headers forwarded from Cloudflare to get accurate X-Forwarded-* information: 39 | # https://doc.traefik.io/traefik/routing/entrypoints/#forwarded-headers 40 | forwardedHeaders: 41 | trustedIPs: {{ cloudflare_ips | tojson }} 42 | 43 | certificatesResolvers: 44 | letsencrypt: 45 | acme: 46 | email: {{ env.SMTP_SENDER }} 47 | storage: /mnt/traefik/acme.json 48 | # for testing, try https://acme-staging-v02.api.letsencrypt.org/directory 49 | caServer: {{ env.ACME_CASERVER | default('https://acme-v02.api.letsencrypt.org/directory') }} 50 | dnsChallenge: 51 | provider: cloudflare 52 | 53 | providers: 54 | docker: 55 | watch: true 56 | exposedByDefault: false 57 | # Note: COMPOSE_PROJECT_NAME is set by .env 58 | network: {{ env.COMPOSE_PROJECT_NAME }}_public 59 | file: 60 | watch: true 61 | filename: /etc/traefik/dynamic.yml 62 | -------------------------------------------------------------------------------- /services/sso-proxy/README.md: -------------------------------------------------------------------------------- 1 | Use Authelia to guard all web applications that shouldn't be open to the public, rather than a VPN. 2 | 3 | File/LDAP -> Authelia -> Traefik/OIDC SSO 4 | -------------------------------------------------------------------------------- /services/sso-proxy/config.yml.j2: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Authelia Configuration # 3 | ############################################################################### 4 | 5 | ## Certificates directory specifies where Authelia will load trusted certificates (public portion) from in addition to 6 | ## the system certificates store. 7 | ## They should be in base64 format, and have one of the following extensions: *.cer, *.crt, *.pem. 8 | # certificates_directory: /config/certificates/ 9 | 10 | ## The theme to display: light, dark, grey, auto. 11 | theme: auto 12 | 13 | identity_validation: 14 | reset_password: 15 | jwt_secret: {{ env.AUTHELIA_JWT_SECRET }} 16 | 17 | ## 18 | ## Log Configuration 19 | ## 20 | log: 21 | ## Level of verbosity for logs: info, debug, trace. 22 | level: info 23 | 24 | ## Format the logs are written as: json, text. 25 | format: text 26 | 27 | ## File path where the logs will be written. If not set logs are written to stdout. 28 | # file_path: /config/authelia.log 29 | 30 | ## Whether to also log to stdout when a log_file_path is defined. 31 | # keep_stdout: false 32 | 33 | ## 34 | ## TOTP Configuration 35 | ## 36 | ## Parameters used for TOTP generation. 37 | totp: 38 | ## The issuer name displayed in the Authenticator application of your choice 39 | ## See: https://github.com/google/google-authenticator/wiki/Key-Uri-Format for more info on issuer names 40 | issuer: auth.{{ env.BASE_HOST }} 41 | ## The period in seconds a one-time password is current for. Changing this will require all users to register 42 | ## their TOTP applications again. Warning: before changing period read the docs link below. 43 | period: 30 44 | ## The skew controls number of one-time passwords either side of the current one that are valid. 45 | ## Warning: before changing skew read the docs link below. 46 | skew: 1 47 | ## See: https://www.authelia.com/docs/configuration/one-time-password.html#period-and-skew to read the documentation. 48 | 49 | ## 50 | ## Authentication Backend Provider Configuration 51 | ## 52 | ## Used for verifying user passwords and retrieve information such as email address and groups users belong to. 53 | ## 54 | ## The available providers are: `file`, `ldap`. You must use only one of these providers. 55 | authentication_backend: 56 | password_reset: 57 | disable: false 58 | 59 | ## The amount of time to wait before we refresh data from the authentication backend. Uses duration notation. 60 | ## To disable this feature set it to 'disable', this will slightly reduce security because for Authelia, users will 61 | ## always belong to groups they belonged to at the time of login even if they have been removed from them in LDAP. 62 | ## To force update on every request you can set this to '0' or 'always', this will increase processor demand. 63 | ## See the below documentation for more information. 64 | ## Duration Notation docs: https://www.authelia.com/docs/configuration/index.html#duration-notation-format 65 | ## Refresh Interval docs: https://www.authelia.com/docs/configuration/authentication/ldap.html#refresh-interval 66 | refresh_interval: 5m 67 | 68 | ## 69 | ## LDAP (Authentication Provider) 70 | ## 71 | ## This is the recommended Authentication Provider in production 72 | ## because it allows Authelia to offload the stateful operations 73 | ## onto the LDAP service. 74 | ldap: 75 | ## The LDAP implementation, this affects elements like the attribute utilised for resetting a password. 76 | ## Acceptable options are as follows: 77 | ## - 'activedirectory' - For Microsoft Active Directory. 78 | ## - 'custom' - For custom specifications of attributes and filters. 79 | ## This currently defaults to 'custom' to maintain existing behaviour. 80 | ## 81 | ## Depending on the option here certain other values in this section have a default value, notably all of the 82 | ## attribute mappings have a default value that this config overrides, you can read more about these default values 83 | ## at https://www.authelia.com/docs/configuration/authentication/ldap.html#defaults 84 | implementation: custom 85 | 86 | ## The url to the ldap server. Format: ://
[:]. 87 | ## Scheme can be ldap or ldaps in the format (port optional). 88 | address: ldap://lldap:3890 89 | 90 | ## The dial timeout for LDAP. 91 | timeout: 5s 92 | 93 | ## Use StartTLS with the LDAP connection. 94 | start_tls: false 95 | 96 | # tls: 97 | # ## Server Name for certificate validation (in case it's not set correctly in the URL). 98 | # # server_name: ldap.example.com 99 | 100 | # ## Skip verifying the server certificate (to allow a self-signed certificate). 101 | # ## In preference to setting this we strongly recommend you add the public portion of the certificate to the 102 | # ## certificates directory which is defined by the `certificates_directory` option at the top of the config. 103 | # skip_verify: false 104 | 105 | # ## Minimum TLS version for either Secure LDAP or LDAP StartTLS. 106 | # minimum_version: TLS1.2 107 | 108 | ## The distinguished name of the container searched for objects in the directory information tree. 109 | ## See also: additional_users_dn, additional_groups_dn. 110 | base_dn: dc=janejeon,dc=com 111 | 112 | attributes: 113 | ## The attribute holding the username of the user. This attribute is used to populate the username in the session 114 | ## information. It was introduced due to #561 to handle case insensitive search queries. For you information, 115 | ## Microsoft Active Directory usually uses 'sAMAccountName' and OpenLDAP usually uses 'uid'. Beware that this 116 | ## attribute holds the unique identifiers for the users binding the user and the configuration stored in database. 117 | ## Therefore only single value attributes are allowed and the value must never be changed once attributed to a user 118 | ## otherwise it would break the configuration for that user. Technically, non-unique attributes like 'mail' can also 119 | ## be used but we don't recommend using them, we instead advise to use the attributes mentioned above 120 | ## (sAMAccountName and uid) to follow https://www.ietf.org/rfc/rfc2307.txt. 121 | username: uid 122 | 123 | ## The attribute holding the mail address of the user. If multiple email addresses are defined for a user, only the 124 | ## first one returned by the LDAP server is used. 125 | mail: mail 126 | 127 | ## The attribute holding the name of the group. 128 | group_name: cn 129 | 130 | ## The attribute holding the display name of the user. This will be used to greet an authenticated user. 131 | display_name: displayName 132 | 133 | ## The additional_users_dn is prefixed to base_dn and delimited by a comma when searching for users. 134 | ## i.e. with this set to OU=Users and base_dn set to DC=a,DC=com; OU=Users,DC=a,DC=com is searched for users. 135 | additional_users_dn: ou=people 136 | 137 | ## The users filter used in search queries to find the user profile based on input filled in login form. 138 | ## Various placeholders are available in the user filter which you can read about in the documentation which can 139 | ## be found at: https://www.authelia.com/docs/configuration/authentication/ldap.html#users-filter-replacements 140 | ## 141 | ## Recommended settings are as follows: 142 | ## - Microsoft Active Directory: (&({username_attribute}={input})(objectCategory=person)(objectClass=user)) 143 | ## - OpenLDAP: 144 | ## - (&({username_attribute}={input})(objectClass=person)) 145 | ## - (&({username_attribute}={input})(objectClass=inetOrgPerson)) 146 | ## 147 | ## To allow sign in both with username and email, one can use a filter like 148 | ## (&(|({username_attribute}={input})({mail_attribute}={input}))(objectClass=person)) 149 | users_filter: (&({username_attribute}={input})(objectClass=person)) 150 | 151 | ## The additional_groups_dn is prefixed to base_dn and delimited by a comma when searching for groups. 152 | ## i.e. with this set to OU=Groups and base_dn set to DC=a,DC=com; OU=Groups,DC=a,DC=com is searched for groups. 153 | additional_groups_dn: ou=groups 154 | 155 | ## The groups filter used in search queries to find the groups based on relevant authenticated user. 156 | ## Various placeholders are available in the groups filter which you can read about in the documentation which can 157 | ## be found at: https://www.authelia.com/docs/configuration/authentication/ldap.html#groups-filter-replacements 158 | ## 159 | ## If your groups use the `groupOfUniqueNames` structure use this instead: 160 | ## (&(uniqueMember={dn})(objectClass=groupOfUniqueNames)) 161 | groups_filter: (member={dn}) 162 | 163 | ## The username and password of the admin user. 164 | user: cn=admin,ou=people,dc=janejeon,dc=com 165 | 166 | ## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html 167 | password: {{ env.LLDAP_LDAP_USER_PASS }} 168 | 169 | ## 170 | ## File (Authentication Provider) 171 | ## 172 | ## With this backend, the users database is stored in a file which is updated when users reset their passwords. 173 | ## Therefore, this backend is meant to be used in a dev environment and not in production since it prevents Authelia 174 | ## to be scaled to more than one instance. The options under 'password' have sane defaults, and as it has security 175 | ## implications it is highly recommended you leave the default values. Before considering changing these settings 176 | ## please read the docs page below: 177 | ## https://www.authelia.com/docs/configuration/authentication/file.html#password-hash-algorithm-tuning 178 | ## 179 | ## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html 180 | ## 181 | # file: 182 | # path: /mnt/authelia.userfile 183 | 184 | ## 185 | ## Access Control Configuration 186 | ## 187 | ## Access control is a list of rules defining the authorizations applied for one resource to users or group of users. 188 | ## 189 | ## If 'access_control' is not defined, ACL rules are disabled and the 'bypass' rule is applied, i.e., access is allowed 190 | ## to anyone. Otherwise restrictions follow the rules defined. 191 | ## 192 | ## Note: One can use the wildcard * to match any subdomain. 193 | ## It must stand at the beginning of the pattern. (example: *.mydomain.com) 194 | ## 195 | ## Note: You must put patterns containing wildcards between simple quotes for the YAML to be syntactically correct. 196 | ## 197 | ## Definition: A 'rule' is an object with the following keys: 'domain', 'subject', 'policy' and 'resources'. 198 | ## 199 | ## - 'domain' defines which domain or set of domains the rule applies to. 200 | ## 201 | ## - 'subject' defines the subject to apply authorizations to. This parameter is optional and matching any user if not 202 | ## provided. If provided, the parameter represents either a user or a group. It should be of the form 203 | ## 'user:' or 'group:'. 204 | ## 205 | ## - 'policy' is the policy to apply to resources. It must be either 'bypass', 'one_factor', 'two_factor' or 'deny'. 206 | ## 207 | ## - 'resources' is a list of regular expressions that matches a set of resources to apply the policy to. This parameter 208 | ## is optional and matches any resource if not provided. 209 | ## 210 | ## Note: the order of the rules is important. The first policy matching (domain, resource, subject) applies. 211 | access_control: 212 | ## Default policy can either be 'bypass', 'one_factor', 'two_factor' or 'deny'. It is the policy applied to any 213 | ## resource if there is no policy to be applied to the user. 214 | default_policy: deny 215 | 216 | # https://www.authelia.com/docs/configuration/access-control.html 217 | rules: 218 | # Infra Access 219 | - domain: 220 | - portainer.{{ env.BASE_HOST }} 221 | - ldap.{{ env.BASE_HOST }} 222 | - dozzle.{{ env.BASE_HOST }} 223 | policy: two_factor 224 | subject: group:infra_admin 225 | 226 | - domain: auth.{{ env.BASE_HOST }} 227 | policy: bypass 228 | 229 | # Applications/Default Policy 230 | - domain: '*.{{ env.BASE_HOST }}' 231 | policy: two_factor 232 | 233 | ## 234 | ## Session Provider Configuration 235 | ## 236 | ## The session cookies identify the user once logged in. 237 | ## The available providers are: `memory`, `redis`. Memory is the provider unless redis is defined. 238 | session: 239 | ## The name of the session cookie. 240 | name: authelia_session 241 | 242 | secret: {{ env.AUTHELIA_SESSION_SECRET }} 243 | 244 | ## Sets the Cookie SameSite value. Possible options are none, lax, or strict. 245 | ## Please read https://www.authelia.com/docs/configuration/session/#same_site 246 | same_site: lax 247 | 248 | ## The value for expiration, inactivity, and remember_me_duration are in seconds or the duration notation format. 249 | ## See: https://www.authelia.com/docs/configuration/index.html#duration-notation-format 250 | ## All three of these values affect the cookie/session validity period. Longer periods are considered less secure 251 | ## because a stolen cookie will last longer giving attackers more time to spy or attack. 252 | 253 | ## The time before the cookie expires and the session is destroyed if remember me IS NOT selected. 254 | expiration: 1d 255 | 256 | ## The inactivity time before the session is reset. If expiration is set to 1h, and this is set to 5m, if the user 257 | ## does not select the remember me option their session will get destroyed after 1h, or after 5m since the last time 258 | ## Authelia detected user activity. 259 | inactivity: 15m 260 | 261 | ## The time before the cookie expires and the session is destroyed if remember me IS selected. 262 | ## Value of 0 disables remember me. 263 | remember_me: 1M 264 | 265 | cookies: 266 | - domain: {{ env.BASE_HOST }} 267 | authelia_url: https://auth.{{ env.BASE_HOST }} 268 | default_redirection_url: https://home.{{ env.BASE_HOST }} 269 | 270 | ## 271 | ## Redis Provider 272 | ## 273 | ## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html 274 | ## 275 | redis: 276 | host: redis 277 | port: 6379 278 | ## Use a unix socket instead 279 | # host: /var/run/redis/redis.sock 280 | 281 | ## Username used for redis authentication. This is optional and a new feature in redis 6.0. 282 | # username: authelia 283 | 284 | ## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html 285 | # password: authelia 286 | 287 | ## This is the Redis DB Index https://redis.io/commands/select (sometimes referred to as database number, DB, etc). 288 | database_index: 0 289 | 290 | ## The maximum number of concurrent active connections to Redis. 291 | maximum_active_connections: 2 292 | 293 | ## The target number of idle connections to have open ready for work. Useful when opening connections is slow. 294 | minimum_idle_connections: 0 295 | 296 | ## The Redis TLS configuration. If defined will require a TLS connection to the Redis instance(s). 297 | # tls: 298 | ## Server Name for certificate validation (in case you are using the IP or non-FQDN in the host option). 299 | # server_name: myredis.example.com 300 | 301 | ## Skip verifying the server certificate (to allow a self-signed certificate). 302 | ## In preference to setting this we strongly recommend you add the public portion of the certificate to the 303 | ## certificates directory which is defined by the `certificates_directory` option at the top of the config. 304 | # skip_verify: false 305 | 306 | ## Minimum TLS version for the connection. 307 | # minimum_version: TLS1.2 308 | 309 | ## The Redis HA configuration options. 310 | ## This provides specific options to Redis Sentinel, sentinel_name must be defined (Master Name). 311 | # high_availability: 312 | ## Sentinel Name / Master Name. 313 | # sentinel_name: mysentinel 314 | 315 | ## Specific username for Redis Sentinel. The node username and password is configured above. 316 | # sentinel_username: sentinel_specific_user 317 | 318 | ## Specific password for Redis Sentinel. The node username and password is configured above. 319 | # sentinel_password: sentinel_specific_pass 320 | 321 | ## The additional nodes to pre-seed the redis provider with (for sentinel). 322 | ## If the host in the above section is defined, it will be combined with this list to connect to sentinel. 323 | ## For high availability to be used you must have either defined; the host above or at least one node below. 324 | # nodes: 325 | # - host: sentinel-node1 326 | # port: 6379 327 | # - host: sentinel-node2 328 | # port: 6379 329 | 330 | ## Choose the host with the lowest latency. 331 | # route_by_latency: false 332 | 333 | ## Choose the host randomly. 334 | # route_randomly: false 335 | 336 | ## 337 | ## Regulation Configuration 338 | ## 339 | ## This mechanism prevents attackers from brute forcing the first factor. It bans the user if too many attempts are made 340 | ## in a short period of time. 341 | regulation: 342 | ## The number of failed login attempts before user is banned. Set it to 0 to disable regulation. 343 | max_retries: 3 344 | 345 | ## The time range during which the user can attempt login before being banned. The user is banned if the 346 | ## authentication failed 'max_retries' times in a 'find_time' seconds window. Find Time accepts duration notation. 347 | ## See: https://www.authelia.com/docs/configuration/index.html#duration-notation-format 348 | find_time: 5m 349 | 350 | ## The length of time before a banned user can login again. Ban Time accepts duration notation. 351 | ## See: https://www.authelia.com/docs/configuration/index.html#duration-notation-format 352 | ban_time: 1h 353 | 354 | ## 355 | ## Storage Provider Configuration 356 | ## 357 | ## The available providers are: `local`, `mysql`, `postgres`. You must use one and only one of these providers. 358 | storage: 359 | encryption_key: {{ env.AUTHELIA_STORAGE_ENCRYPTION_KEY }} 360 | 361 | ## 362 | ## Local (Storage Provider) 363 | ## 364 | ## This stores the data in a SQLite3 Database. 365 | ## This is only recommended for lightweight non-stateful installations. 366 | ## 367 | ## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html 368 | ## 369 | # local: 370 | # path: /config/db.sqlite3 371 | 372 | ## 373 | ## MySQL / MariaDB (Storage Provider) 374 | ## 375 | mysql: 376 | address: tcp://mysql:3306 377 | database: auth_db 378 | username: authelia 379 | ## Password can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html 380 | password: authelia-password 381 | timeout: 5s 382 | 383 | ## 384 | ## Notification Provider 385 | ## 386 | ## Notifications are sent to users when they require a password reset, a U2F registration or a TOTP registration. 387 | ## The available providers are: filesystem, smtp. You must use only one of these providers. 388 | notifier: 389 | ## You can disable the notifier startup check by setting this to true. 390 | disable_startup_check: true 391 | 392 | ## 393 | ## File System (Notification Provider) 394 | ## 395 | ## Important: Kubernetes (or HA) users must read https://www.authelia.com/docs/features/statelessness.html 396 | ## 397 | # filesystem: 398 | # filename: /config/notification.txt 399 | 400 | ## 401 | ## SMTP (Notification Provider) 402 | ## 403 | ## Use a SMTP server for sending notifications. Authelia uses the PLAIN or LOGIN methods to authenticate. 404 | ## [Security] By default Authelia will: 405 | ## - force all SMTP connections over TLS including unauthenticated connections 406 | ## - use the disable_require_tls boolean value to disable this requirement 407 | ## (only works for unauthenticated connections) 408 | ## - validate the SMTP server x509 certificate during the TLS handshake against the hosts trusted certificates 409 | ## (configure in tls section) 410 | smtp: 411 | address: smtp://smtp.fastmail.com:465 412 | 413 | ## The connection timeout. 414 | timeout: 5s 415 | 416 | ## The username used for SMTP authentication. 417 | username: {{ env.SMTP_USER }} 418 | 419 | ## The password used for SMTP authentication. 420 | ## Can also be set using a secret: https://www.authelia.com/docs/configuration/secrets.html 421 | password: {{ env.SMTP_PASS }} 422 | 423 | ## The address to send the email FROM. 424 | sender: {{ env.SMTP_SENDER }} 425 | 426 | ## HELO/EHLO Identifier. Some SMTP Servers may reject the default of localhost. 427 | # identifier: localhost 428 | 429 | ## Subject configuration of the emails sent. {title} is replaced by the text from the notifier. 430 | subject: '[Authelia] {title}' 431 | 432 | ## This address is used during the startup check to verify the email configuration is correct. 433 | ## It's not important what it is except if your email server only allows local delivery. 434 | startup_check_address: {{ env.SMTP_SENDER }} 435 | 436 | ## By default we require some form of TLS. This disables this check though is not advised. 437 | disable_require_tls: false 438 | 439 | ## Disables sending HTML formatted emails. 440 | disable_html_emails: false 441 | 442 | # tls: 443 | # ## Server Name for certificate validation (in case you are using the IP or non-FQDN in the host option). 444 | # # server_name: smtp.example.com 445 | 446 | # ## Skip verifying the server certificate (to allow a self-signed certificate). 447 | # ## In preference to setting this we strongly recommend you add the public portion of the certificate to the 448 | # ## certificates directory which is defined by the `certificates_directory` option at the top of the config. 449 | # skip_verify: false 450 | 451 | # ## Minimum TLS version for either StartTLS or SMTPS. 452 | # minimum_version: TLS1.2 453 | ## 454 | ## Identity Providers 455 | ## 456 | -------------------------------------------------------------------------------- /services/sso-proxy/docker-compose.authelia.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | authelia: 5 | image: authelia/authelia:4.38.18 6 | container_name: authelia 7 | restart: unless-stopped 8 | depends_on: 9 | mysql: 10 | condition: service_healthy 11 | redis: 12 | condition: service_healthy 13 | lldap: 14 | condition: service_healthy 15 | networks: 16 | - public 17 | - private 18 | volumes: 19 | - ./services/sso-proxy/config.yml:/config/configuration.yml:ro 20 | environment: 21 | TZ: ${TIMEZONE} 22 | labels: 23 | traefik.enable: 'true' 24 | traefik.http.routers.authelia.tls.certresolver: letsencrypt 25 | traefik.http.routers.authelia.rule: Host(`auth.${BASE_HOST}`) 26 | traefik.http.routers.authelia.middlewares: sso-proxy@file 27 | traefik.http.services.authelia.loadbalancer.server.port: 9091 28 | 29 | networks: 30 | public: {} 31 | private: {} 32 | -------------------------------------------------------------------------------- /services/sso/README.md: -------------------------------------------------------------------------------- 1 | LLDAP to serve as a very basic, self-contained, lightweight LDAP implementation to back NextCloud and Authelia with 2 | -------------------------------------------------------------------------------- /services/sso/config.toml: -------------------------------------------------------------------------------- 1 | ## Default configuration for Docker. 2 | ## All the values can be overridden through environment variables, prefixed 3 | ## with "LLDAP_". For instance, "ldap_port" can be overridden with the 4 | ## "LLDAP_LDAP_PORT" variable. 5 | 6 | ## The port on which to have the LDAP server. 7 | #ldap_port = 3890 8 | 9 | ## The port on which to have the HTTP server, for user login and 10 | ## administration. 11 | #http_port = 17170 12 | 13 | ## The public URL of the server, for password reset links. 14 | #http_url = "http://localhost" 15 | 16 | ## Random secret for JWT signature. 17 | ## This secret should be random, and should be shared with application 18 | ## servers that need to consume the JWTs. 19 | ## Changing this secret will invalidate all user sessions and require 20 | ## them to re-login. 21 | ## You should probably set it through the LLDAP_JWT_SECRET environment 22 | ## variable from a secret ".env" file. 23 | ## You can generate it with (on linux): 24 | ## LC_ALL=C tr -dc 'A-Za-z0-9!"#%&'\''()*+,-./:;<=>?@[\]^_{|}~' . 91 | #from="LLDAP Admin " 92 | ## Same for reply-to, optional. 93 | #reply_to="Do not reply " 94 | -------------------------------------------------------------------------------- /services/sso/docker-compose.lldap.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | lldap: 5 | image: nitnelave/lldap:v0.5.0 6 | container_name: lldap 7 | restart: unless-stopped 8 | healthcheck: 9 | test: wget --no-verbose --tries=1 --spider http://localhost:17170/health || exit 1 10 | interval: 15s 11 | timeout: 5s 12 | retries: 3 13 | start_period: 30s 14 | networks: 15 | - public 16 | volumes: 17 | - lldap-data:/data 18 | - ./services/sso/config.toml:/data/lldap_config.toml:ro 19 | environment: 20 | LLDAP_JWT_SECRET: ${LLDAP_JWT_SECRET} 21 | LLDAP_LDAP_USER_PASS: ${LLDAP_LDAP_USER_PASS} 22 | labels: 23 | traefik.enable: 'true' 24 | traefik.http.routers.lldap.tls.certresolver: letsencrypt 25 | traefik.http.routers.lldap.rule: Host(`ldap.${BASE_HOST}`) 26 | traefik.http.routers.lldap.middlewares: sso-proxy@file # if anything, for the forced 2FA 27 | traefik.http.services.lldap.loadbalancer.server.port: 17170 28 | 29 | networks: 30 | public: {} 31 | 32 | volumes: 33 | lldap-data: {} 34 | -------------------------------------------------------------------------------- /services/status-page/README.md: -------------------------------------------------------------------------------- 1 | Use uptime kuma to display uptimes for my services 2 | -------------------------------------------------------------------------------- /services/status-page/docker-compose.uptime-kuma.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | uptime-kuma: 5 | image: louislam/uptime-kuma:1.23.16-alpine 6 | container_name: uptime-kuma 7 | restart: unless-stopped 8 | healthcheck: 9 | test: wget --no-verbose --tries=1 --spider http://localhost:3001/status || exit 1 10 | interval: 15s 11 | timeout: 5s 12 | retries: 3 13 | start_period: 60s 14 | networks: 15 | - public 16 | - private 17 | volumes: 18 | - uptime-kuma-data:/app/data 19 | - /var/run/docker.sock:/var/run/docker.sock # allow uptime kuma to monitor docker containers 20 | labels: 21 | traefik.enable: 'true' 22 | traefik.http.routers.uptime-kuma.tls.certresolver: letsencrypt 23 | traefik.http.routers.uptime-kuma.rule: Host(`up.${BASE_HOST}`) 24 | traefik.http.routers.uptime-kuma.middlewares: sso-proxy@file 25 | traefik.http.services.uptime-kuma.loadbalancer.server.port: 3001 26 | 27 | networks: 28 | public: {} 29 | private: {} 30 | 31 | volumes: 32 | uptime-kuma-data: {} 33 | -------------------------------------------------------------------------------- /src/README.md: -------------------------------------------------------------------------------- 1 | This folder contains all of the "logic" (and its tests) that are required to run the repository. 2 | -------------------------------------------------------------------------------- /src/get_cloudflare_ips.py: -------------------------------------------------------------------------------- 1 | from src.log_invocation import log_invocation 2 | from src.requests_session import get_session 3 | 4 | CLOUDFLARE_IPV4_LIST_URL = "https://www.cloudflare.com/ips-v4" 5 | CLOUDFLARE_IPV6_LIST_URL = "https://www.cloudflare.com/ips-v6" 6 | 7 | 8 | @log_invocation 9 | def get_cloudflare_ips() -> str: 10 | """ 11 | Returns a concatenated list of Cloudflare's IPs (both IPv4 and IPv6). 12 | Useful for whitelisting IPs, as we want to make sure all connections are proxied by Cloudflare, 13 | and block off anyone trying to bypass Cloudflare's protections by making direct connections to the server. 14 | """ 15 | session = get_session() 16 | 17 | # with the requests upgrade, urllib3 broke and I need to specify super short timeout for requests to actually work? 18 | ipv4s = session.get(CLOUDFLARE_IPV4_LIST_URL, timeout=1).text.split("\n") 19 | ipv6s = session.get(CLOUDFLARE_IPV6_LIST_URL, timeout=1).text.split("\n") 20 | 21 | return sorted(ipv4s + ipv6s) 22 | -------------------------------------------------------------------------------- /src/get_cloudflare_ips_mock.py: -------------------------------------------------------------------------------- 1 | import responses 2 | 3 | from src.get_cloudflare_ips import CLOUDFLARE_IPV4_LIST_URL, CLOUDFLARE_IPV6_LIST_URL 4 | 5 | IPV4_LIST_MOCK = "1.1.1.1/20\n2.2.2.2/18\n3.3.3.3/16" 6 | IPV6_LIST_MOCK = "4444:5555::/32\n6666:7777::/26" 7 | 8 | 9 | def mock_cloudflare_ips(): 10 | """ 11 | Returns a mocked cloudflare IPs list so that we don't end up hitting the endpoint for testing. 12 | It's to be a good citizen, and to keep the tests consistent. 13 | """ 14 | responses.get(CLOUDFLARE_IPV4_LIST_URL, body=IPV4_LIST_MOCK) 15 | responses.get(CLOUDFLARE_IPV6_LIST_URL, body=IPV6_LIST_MOCK) 16 | -------------------------------------------------------------------------------- /src/get_cloudflare_ips_test.py: -------------------------------------------------------------------------------- 1 | import responses 2 | 3 | from src.get_cloudflare_ips import get_cloudflare_ips 4 | from src.get_cloudflare_ips_mock import mock_cloudflare_ips 5 | 6 | 7 | @responses.activate 8 | def test_get_cloudflare_ips(): 9 | """ 10 | Test that the get_cloudflare_ips() function returns the list of IP addresses correctly. 11 | """ 12 | EXPECTED_RESULT = [ 13 | "1.1.1.1/20", 14 | "2.2.2.2/18", 15 | "3.3.3.3/16", 16 | "4444:5555::/32", 17 | "6666:7777::/26", 18 | ] 19 | 20 | mock_cloudflare_ips() 21 | 22 | result = get_cloudflare_ips() 23 | 24 | assert result == EXPECTED_RESULT 25 | 26 | 27 | # NOTE: not testing any "failure cases" here because we expect any HTTP errors to immediately throw anyway, 28 | # halting execution. 29 | -------------------------------------------------------------------------------- /src/log_invocation.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import logging 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | 7 | def log_invocation(func): 8 | """ 9 | A decorator to log when the function is called (more specifically, when it returns). 10 | Mainly for debugging. 11 | """ 12 | function_name = func.__name__ 13 | 14 | @functools.wraps(func) 15 | def wrapper(*args, **kwargs): 16 | logger.debug("Calling %s", function_name) 17 | return_value = func(*args, **kwargs) 18 | logger.debug("%s returned %s", function_name, return_value) 19 | 20 | return return_value 21 | 22 | return wrapper 23 | 24 | 25 | # NOTE: can't test this 26 | -------------------------------------------------------------------------------- /src/requests_session.py: -------------------------------------------------------------------------------- 1 | import requests 2 | from fake_useragent import UserAgent 3 | 4 | ua = UserAgent() 5 | 6 | 7 | def _raise_on_status(r: requests.Response, *args, **kwargs) -> None: 8 | """ 9 | A middleware to throw on HTTP status errors. 10 | """ 11 | r.raise_for_status() 12 | 13 | 14 | def get_session() -> requests.Session: 15 | """ 16 | Returns a requests.Session object with the following defaults: 17 | - throwing on HTTP status 4xx and 5xx 18 | """ 19 | session = requests.Session() 20 | 21 | # Note: no need to log requests, as urllib3 already does it if we set the logging level to DEBUG 22 | session.hooks = {"response": [_raise_on_status]} 23 | session.timeout = 10 24 | session.headers = {"User-Agent": ua.chrome} 25 | 26 | return session 27 | -------------------------------------------------------------------------------- /src/requests_session_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import requests 3 | import responses 4 | 5 | from src.requests_session import get_session 6 | 7 | 8 | @responses.activate 9 | def test_request_session_throws_by_default(): 10 | """ 11 | Test the default behaviour of the session that it throws on 4xx and 5xx HTTP status codes. 12 | """ 13 | TEST_URL = "https://www.example.com" 14 | 15 | responses.get(TEST_URL, status=404) 16 | 17 | session = get_session() 18 | 19 | # Assert that HTTP errors are thrown, and not silently returned. 20 | with pytest.raises(requests.exceptions.HTTPError): 21 | session.get(TEST_URL) 22 | -------------------------------------------------------------------------------- /src/set_logging_defaults.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module that, when imported, sets some logging defaults: 3 | - logging level of default INFO, but can be overridden by the LOG_LEVEL environment variable 4 | - logging format that makes it easier to separate the log caller from the log message 5 | """ 6 | 7 | import logging 8 | import os 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | def set_logging_defaults() -> None: 14 | log_level = os.environ.get("LOG_LEVEL", "INFO") 15 | 16 | # https://docs.python.org/3/library/logging.html#logrecord-attributes 17 | logging_format = "[%(levelname)s] %(name)s: %(message)s" 18 | 19 | logging.basicConfig(level=log_level, format=logging_format) 20 | 21 | logger.debug("Finished setting logging defaults") 22 | 23 | 24 | # NOTE: can't test this 25 | -------------------------------------------------------------------------------- /src/templating/get_env_context.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from typing import Dict 4 | 5 | from dotenv import load_dotenv 6 | 7 | from src.log_invocation import log_invocation 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | @log_invocation 13 | def get_env_context(dotenv_path: str = ".env") -> Dict[str, str]: 14 | """ 15 | Returns all environment variables (also loaded from .env file) as a context object. 16 | """ 17 | load_dotenv(dotenv_path) # this will load .env from the workspace root 18 | logger.debug("Loaded environment variables from .env") 19 | 20 | return dict(os.environ) 21 | -------------------------------------------------------------------------------- /src/templating/get_env_context_test.py: -------------------------------------------------------------------------------- 1 | from pyfakefs.fake_filesystem import FakeFilesystem 2 | 3 | from src.templating.get_env_context import get_env_context 4 | 5 | 6 | def test_get_env_context(fs: FakeFilesystem): 7 | """ 8 | Test that the correct environment variables are loaded from the .env we specify. 9 | """ 10 | # Create a fake .env so that when the dotenv is called, it reads from that instead. 11 | # For one, we don't want to read the production .env file, but that's possible by 12 | # passing in fixtures/.env as the argument into get_env_context(). 13 | # The real reason is that I want to avoid *actually* hitting the filesystem 14 | # and otherwise making real I/O in a unit test. 15 | fs.create_file(".env", contents="FOO=BAR") 16 | 17 | result = get_env_context() 18 | 19 | # We know that the dotenv module loaded the .env file correctly, 20 | # because I locally don't have FOO=BAR set, and so it's reading the value of that 21 | # "fake" .env file (which is the only place where I have FOO=BAR set) and parsing it. 22 | assert result["FOO"] == "BAR" 23 | -------------------------------------------------------------------------------- /src/templating/get_rendered_name.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | 3 | from src.log_invocation import log_invocation 4 | 5 | 6 | @log_invocation 7 | def get_rendered_name(template_path: str) -> str: 8 | """ 9 | Given a template path, returns what the name should be for the generated file as an absolute path. 10 | """ 11 | # We're assuming that the path ends with a .j2 12 | return path.splitext(template_path)[0] 13 | -------------------------------------------------------------------------------- /src/templating/get_rendered_name_test.py: -------------------------------------------------------------------------------- 1 | from src.templating.get_rendered_name import get_rendered_name 2 | 3 | 4 | def test_get_rendered_name(): 5 | """ 6 | Test that the .j2 file extension is stripped away 7 | """ 8 | expected_result = "fixtures/templates/foo.yml" 9 | result = get_rendered_name("fixtures/templates/foo.yml.j2") 10 | 11 | assert result == expected_result 12 | -------------------------------------------------------------------------------- /src/templating/get_template_paths.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from glob import glob 3 | from os import path 4 | from typing import List 5 | 6 | from src.log_invocation import log_invocation 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | 11 | @log_invocation 12 | def get_template_paths(folder_to_check: str = ".") -> List[str]: 13 | """ 14 | Given a folder, searches for .j2 template files and returns them as a list of absolute paths. 15 | """ 16 | pattern = path.join(folder_to_check, "**/*.j2") 17 | 18 | logger.debug("Searching templates that match %s", pattern) 19 | 20 | return sorted(glob(pattern, recursive=True)) 21 | -------------------------------------------------------------------------------- /src/templating/get_template_paths_test.py: -------------------------------------------------------------------------------- 1 | from pyfakefs.fake_filesystem import FakeFilesystem 2 | 3 | from src.templating.get_template_paths import get_template_paths 4 | 5 | 6 | def test_get_template_paths(fs: FakeFilesystem): 7 | """ 8 | Test that the globbing works as expected. 9 | """ 10 | BASE_DIR = "folder" 11 | 12 | for fake_test_file in [ 13 | "foo.yml", 14 | "foo.yml.j2", 15 | "nested/bar.yml", 16 | "nested/bar.yml.j2", 17 | ]: 18 | fs.create_file(f"{BASE_DIR}/{fake_test_file}") 19 | 20 | expected_result = [ 21 | f"{BASE_DIR}/foo.yml.j2", 22 | f"{BASE_DIR}/nested/bar.yml.j2", 23 | ] 24 | result = get_template_paths(BASE_DIR) 25 | 26 | assert result == expected_result 27 | -------------------------------------------------------------------------------- /src/templating/render_template.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict 2 | 3 | from jinja2 import Template 4 | 5 | from src.log_invocation import log_invocation 6 | 7 | 8 | @log_invocation 9 | def render_template(template_str: str, context: Dict[str, Any]) -> str: 10 | """ 11 | Renders a template with the given context and returns the raw string value of the generated file. 12 | """ 13 | template = Template(template_str, trim_blocks=True, lstrip_blocks=True) 14 | 15 | return template.render(context) 16 | -------------------------------------------------------------------------------- /src/templating/render_template_test.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | 3 | from src.templating.render_template import render_template 4 | 5 | 6 | def test_render_template(): 7 | """ 8 | Test that a Jinja2 template is rendered correctly with the given context. 9 | """ 10 | template_str = """ 11 | var 1's value is {{ var1 }}, 12 | and var 2's value is {{ var2 }}. 13 | """ 14 | context = { 15 | "var1": "val1", 16 | "var2": "val2", 17 | } 18 | expected_result = """ 19 | var 1's value is val1, 20 | and var 2's value is val2. 21 | """ 22 | result = render_template(inspect.cleandoc(template_str), context) 23 | 24 | assert result == inspect.cleandoc(expected_result) 25 | --------------------------------------------------------------------------------