├── .devcontainer └── rke2 │ ├── Dockerfile │ └── devcontainer.json ├── .pre-commit-config.yaml ├── Ansible ├── Installation │ └── readme.md ├── Playbooks │ ├── Docker-Portainer │ │ ├── inventory.yaml │ │ ├── playbook.yaml │ │ └── roles │ │ │ ├── docker_install │ │ │ ├── handlers │ │ │ │ └── main.yaml │ │ │ ├── tasks │ │ │ │ └── main.yaml │ │ │ ├── templates │ │ │ │ └── docker_daemon.json.j2 │ │ │ └── vars │ │ │ │ └── main.yaml │ │ │ └── portainer_deploy │ │ │ ├── handlers │ │ │ └── main.yaml │ │ │ ├── tasks │ │ │ └── main.yaml │ │ │ ├── templates │ │ │ └── docker_compose.yaml.j2 │ │ │ └── vars │ │ │ └── main.yaml │ ├── Docker-Swarm │ │ ├── ansible.cfg │ │ ├── collections │ │ │ └── requirements.yaml │ │ ├── inventory │ │ │ ├── group_vars │ │ │ │ └── all.yaml │ │ │ └── inventory.yaml │ │ ├── roles │ │ │ ├── docker_install │ │ │ │ ├── handlers │ │ │ │ │ └── main.yaml │ │ │ │ ├── tasks │ │ │ │ │ └── main.yaml │ │ │ │ ├── templates │ │ │ │ │ └── docker_daemon.json.j2 │ │ │ │ └── vars │ │ │ │ │ └── main.yaml │ │ │ ├── init_docker_swarm │ │ │ │ ├── handlers │ │ │ │ │ └── main.yaml │ │ │ │ ├── tasks │ │ │ │ │ └── main.yaml │ │ │ │ └── vars │ │ │ │ │ └── main.yaml │ │ │ ├── manager_join_docker_swarm │ │ │ │ ├── handlers │ │ │ │ │ └── main.yaml │ │ │ │ ├── tasks │ │ │ │ │ └── main.yaml │ │ │ │ └── vars │ │ │ │ │ └── main.yaml │ │ │ ├── mount_nfs │ │ │ │ ├── handlers │ │ │ │ │ └── main.yaml │ │ │ │ ├── tasks │ │ │ │ │ └── main.yaml │ │ │ │ └── vars │ │ │ │ │ └── main.yaml │ │ │ ├── portainer_deploy │ │ │ │ ├── handlers │ │ │ │ │ └── main.yaml │ │ │ │ ├── tasks │ │ │ │ │ └── main.yaml │ │ │ │ ├── templates │ │ │ │ │ └── docker_compose.yaml.j2 │ │ │ │ └── vars │ │ │ │ │ └── main.yaml │ │ │ └── worker_join_docker_swarm │ │ │ │ ├── handlers │ │ │ │ └── main.yaml │ │ │ │ ├── tasks │ │ │ │ └── main.yaml │ │ │ │ └── vars │ │ │ │ └── main.yaml │ │ └── site.yaml │ ├── File-Copy │ │ ├── File-Copy-Playbook.yaml │ │ ├── File-Copy-Undo-Playbook.yaml │ │ ├── inventory.yaml │ │ └── nginx │ │ │ ├── docker-compose.yaml │ │ │ └── website │ │ │ ├── Jims-Garage-1.png │ │ │ └── index.html │ ├── Multi-OS-Update │ │ ├── Update-Playbook.yaml │ │ └── inventory.yaml │ ├── RKE2 │ │ ├── ansible.cfg │ │ ├── collections │ │ │ └── requirements.yaml │ │ ├── inventory │ │ │ ├── group_vars │ │ │ │ └── all.yaml │ │ │ └── hosts.ini │ │ ├── roles │ │ │ ├── add-agent │ │ │ │ ├── tasks │ │ │ │ │ └── main.yaml │ │ │ │ └── templates │ │ │ │ │ └── rke2-agent-config.j2 │ │ │ ├── add-server │ │ │ │ ├── tasks │ │ │ │ │ └── main.yaml │ │ │ │ └── templates │ │ │ │ │ └── rke2-server-config.j2 │ │ │ ├── apply-manifests │ │ │ │ ├── tasks │ │ │ │ │ └── main.yaml │ │ │ │ └── templates │ │ │ │ │ └── metallb-ippool.j2 │ │ │ ├── kube-vip │ │ │ │ ├── defaults │ │ │ │ │ └── main.yaml │ │ │ │ ├── meta │ │ │ │ │ └── argument_specs.yml │ │ │ │ ├── tasks │ │ │ │ │ └── main.yaml │ │ │ │ └── templates │ │ │ │ │ └── kube-vip-config.j2 │ │ │ ├── prepare-nodes │ │ │ │ └── tasks │ │ │ │ │ └── main.yaml │ │ │ ├── rke2-download │ │ │ │ ├── defaults │ │ │ │ │ └── main.yml │ │ │ │ ├── tasks │ │ │ │ │ └── main.yaml │ │ │ │ └── vars │ │ │ │ │ └── main.yaml │ │ │ └── rke2-prepare │ │ │ │ ├── tasks │ │ │ │ └── main.yaml │ │ │ │ ├── templates │ │ │ │ ├── rke2-agent.service.j2 │ │ │ │ ├── rke2-server-config.j2 │ │ │ │ └── rke2-server.service.j2 │ │ │ │ └── vars │ │ │ │ └── main.yaml │ │ └── site.yaml │ ├── Secrets-Variables │ │ ├── File-Copy-Playbook.yaml │ │ ├── password │ │ └── secrets_file.enc │ ├── Talos │ │ ├── collections │ │ │ └── requirements.yaml │ │ ├── inventory │ │ │ ├── group_vars │ │ │ │ └── all.yaml │ │ │ └── hosts.ini │ │ ├── roles │ │ │ ├── add-workers │ │ │ │ └── tasks │ │ │ │ │ └── main.yaml │ │ │ ├── apply-config │ │ │ │ └── tasks │ │ │ │ │ └── main.yaml │ │ │ ├── configure-cluster │ │ │ │ └── tasks │ │ │ │ │ └── main.yaml │ │ │ ├── configure-talosctl │ │ │ │ └── tasks │ │ │ │ │ └── main.yaml │ │ │ └── install-talosctl │ │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ └── site.yaml │ └── Update │ │ ├── readme.md │ │ ├── update-builtin.yml │ │ └── update.yml └── SSH │ └── readme.md ├── Authelia ├── Authelia │ ├── configuration.yml │ ├── docker-compose.yaml │ └── users_database.yml ├── Nginx │ └── docker-compose.yaml └── Traefik │ └── docker-compose.yaml ├── Authentik ├── .env ├── Web-Proxies │ ├── .env │ ├── authentik-docker-compose.yaml │ ├── example-nginx-docker-compose.yaml │ └── traefik-conf.yaml ├── docker-compose-traefik.yaml └── docker-compose.yaml ├── BunkerWeb └── docker-compose.yaml ├── Caddy ├── .env ├── Caddyfile ├── Dockerfile ├── docker-compose.yaml └── index.html ├── Checkmate └── docker-compose.yaml ├── Cloudflare-HTTPS ├── cloudflared │ └── docker-compose.yaml └── nginx │ └── docker-compose.yaml ├── Cloudflare-Tunnel ├── docker-compose.yaml └── macvlan ├── Code-Server └── docker-compose.yaml ├── Crowdsec ├── Traefik │ ├── config.yaml │ └── traefik.yaml ├── acquis.yaml └── docker-compose.yml ├── DIUN └── docker-compose.yaml ├── Deconz └── docker-compose.yaml ├── Deepseek └── docker-compose.yaml ├── Docker-Swarm ├── portainer-agent-stack.yml ├── swarm-3-nodes.sh └── swarm.sh ├── DynamicDNS ├── config ├── docker-compose.yaml └── script.sh ├── Enclosed └── docker-compose.yaml ├── Ente ├── config │ ├── museum.yaml │ └── scripts │ │ └── compose │ │ ├── credentials.yaml │ │ └── minio-provision.sh ├── docker-compose-traefik.yaml └── docker-compose.yaml ├── Frigate ├── config.yml └── docker-compose.yaml ├── GPU_passthrough └── readme.md ├── Gitea └── docker-compose.yaml ├── Gotify └── docker-compose.yaml ├── Grafana-Monitoring ├── Part-2 │ ├── mibs.txt │ ├── prometheus.yml │ └── telegraf.conf ├── docker-compose.yaml ├── prometheus.yml └── telegraf.conf ├── Headscale ├── Tailscale-Client │ └── docker-compose,yaml ├── config.yaml ├── docker-compose.yaml └── with-Traefik │ └── docker-compose.yaml ├── Headscale2 ├── conf │ └── config.yaml └── docker-compose.yaml ├── Hoarder ├── .env └── docker-compose.yaml ├── Home-Assistant ├── Kubernetes │ ├── default-headers.yaml │ ├── homeassistant-deployment.yaml │ ├── ingress.yaml │ ├── sealed-secret.yaml │ └── secret.yaml ├── configuration.yaml └── docker-compose.yaml ├── Homelab-Buyer's-Guide └── Q3-2023.md ├── Homepage └── Homepage │ ├── docker-compose.yaml │ └── services.yaml ├── IT-Tools └── docker-compose.yaml ├── Immich ├── .env ├── docker-compose.yaml └── hwaccel.yml ├── Jellyfin ├── Kubernetes │ ├── default-headers.yaml │ ├── ingress.yaml │ ├── jellyfin-deployment.yaml │ ├── networkpolicy.yaml │ ├── pv-smb.yaml │ └── pvc-smb.yaml └── docker-compose.yml ├── Jitsi ├── .env ├── docker-compose.yml └── gen-passwords.sh ├── Keycloak └── docker-compose.yaml ├── Komodo ├── .env └── docker-compose.yaml ├── Kubernetes ├── Cloud-Init │ └── readme.md ├── Create-VMS │ ├── create-vms.sh │ └── readme.md ├── Create-manifest-helm │ ├── Portainer │ │ ├── default-headers.yaml │ │ ├── ingress.yaml │ │ └── values.yaml │ ├── WireGuard-Easy │ │ ├── default-headers.yaml │ │ ├── deployment.yaml │ │ ├── ingress.yaml │ │ └── ingressRouteUDP.yaml │ └── readme.md ├── CrowdSec │ ├── Bouncer │ │ └── bouncer-middleware.yaml │ ├── CrowdSec │ │ └── values.yaml │ ├── Reflector │ │ └── values.yaml │ ├── Traefik │ │ └── values.yaml │ └── readme.md ├── Docker-Kubernetes-Data-Migration │ └── readme.md ├── GPU-Passthrough │ ├── jellyfin.yaml │ └── readme.md ├── GitOps │ ├── Gotify │ │ ├── default-headers.yaml │ │ ├── deployment.yaml │ │ └── ingress.yaml │ ├── Grafana │ │ ├── fleet.yaml │ │ └── values.yaml │ └── readme.md ├── K3S-Deploy │ ├── ipAddressPool │ ├── k3s.sh │ ├── kube-vip │ ├── l2Advertisement.yaml │ └── readme.md ├── Kubernetes-Lite │ └── k3s.sh ├── Longhorn │ ├── longhorn-K3S.sh │ ├── longhorn-RKE2.sh │ ├── longhorn.yaml │ └── readme.md ├── NetworkPolicies │ ├── allow-all-ingress.yaml │ ├── default-deny-all-ingress.yaml │ ├── example.yaml │ ├── namespace-example.yaml │ ├── networkpolicy-egress.yaml │ ├── networkpolicy-ingress.yaml │ └── port-example.yaml ├── RKE2-Cilium │ ├── rke2-cilium-config.yaml │ └── rke2.sh ├── RKE2 │ ├── ipAddressPool │ ├── kube-vip │ ├── l2Advertisement.yaml │ └── rke2.sh ├── Rancher-Deployment │ └── readme.md ├── SMB │ ├── deployment.yaml │ ├── pv-smb.yaml │ ├── pvc-smb.yaml │ └── readme.md ├── Traefik-External-Service │ ├── default-headers.yaml │ ├── ingress.yaml │ └── service.yaml ├── Traefik-PiHole │ ├── Helm │ │ └── Traefik │ │ │ ├── Cert-Manager │ │ │ ├── Certificates │ │ │ │ └── Production │ │ │ │ │ └── jimsgarage-production.yaml │ │ │ ├── Issuers │ │ │ │ ├── letsencrypt-production.yaml │ │ │ │ └── secret-cf-token.yaml │ │ │ └── values.yaml │ │ │ ├── Dashboard │ │ │ ├── ingress.yaml │ │ │ ├── middleware.yaml │ │ │ └── secret-dashboard.yaml │ │ │ ├── default-headers.yaml │ │ │ └── values.yaml │ ├── Manifest │ │ └── PiHole │ │ │ ├── PiHole-Deployment.yaml │ │ │ ├── default-headers.yaml │ │ │ └── ingress.yaml │ ├── deploy.sh │ └── readme.md └── Upgrade │ └── readme.md ├── LXC ├── Jellyfin │ ├── docker-compose.yaml │ └── readme.md └── NAS │ └── readme.md ├── Linkwarden ├── .env └── docker-compose.yaml ├── Logo ├── Jim's Garage-1 (1).mp4 ├── Jim's Garage-1 (1).png ├── Jim's Garage-1 (2).png ├── Jim's Garage-1 (3).png ├── Jim's Garage-1 (4).png ├── Jim's Garage-1 (5).png ├── Jim's Garage-1.mp4 ├── Jim's Garage-1.png └── Jim'sGarage-1(2).png ├── Minecraft ├── Kubernetes │ ├── deployment.yaml │ └── networkpolicy.yaml ├── docker-compose.yaml └── macvlan ├── MiroTalk ├── .env └── docker-compose.yaml ├── Mosquitto ├── Kubernetes │ ├── deployment.yaml │ ├── namespace.yaml │ ├── pv.yaml │ ├── pvc.yaml │ └── service.yaml ├── conf ├── docker-compose.yaml └── mosquitto.conf ├── Netbird ├── docker-compose.yml ├── management.json ├── openid-configuration.json ├── setup.env └── turnserver.conf ├── Nextcloud └── docker-compose.yaml ├── Nginx ├── cloudflare.ini ├── docker-compose.yaml ├── it-tools │ └── docker-compose.yaml └── nginx.conf ├── NordVPN-Wireguard └── wireguard.ps1 ├── Ollama ├── docker-compose.yml └── readme.md ├── Omni-Tools └── docker-compose.yaml ├── OpenHands └── docker-compose.yaml ├── Paperless-ngx ├── .env └── docker-compose.yaml ├── Pihole ├── Kubernetes │ ├── default-headers.yaml │ ├── ingress.yaml │ ├── middleware.yaml │ ├── networkpolicy.yaml │ ├── pihole-deployment.yaml │ └── sealed-secret.yaml ├── docker-compose.yml └── ubuntu port 53 fix ├── Piholev6 └── docker-compose.yaml ├── Plex ├── Kubernetes │ ├── default-headers.yaml │ ├── ingress.yaml │ ├── networkpolicy.yaml │ ├── plex-deployment-2.yaml │ ├── pms-docker-service.yaml │ ├── pv-smb.yaml │ └── pvc-smb.yaml └── docker-compose.yaml ├── Pocket-ID ├── .env └── docker-compose.yaml ├── Popup-Homelab ├── .env ├── acquis.yaml ├── cf-token ├── custom.list ├── docker-compose.yaml └── docker │ └── traefik │ ├── acme.json │ ├── config.yaml │ └── traefik.yaml ├── Portainer └── docker-compose.yaml ├── Postiz ├── .env └── docker-compose.yaml ├── PrivateBin └── docker-compose.yaml ├── Proxmox-Backup-Server └── readme.md ├── Proxmox-NAS ├── config.yml └── docker-compose.yaml ├── Pterodactyl ├── config.yml ├── docker-compose.yaml └── readme.md ├── README.md ├── SafeLine ├── .env └── docker-compose.yaml ├── SearXNG ├── .env ├── docker-compose-gluetun.yaml ├── docker-compose.yaml └── settings.yaml ├── Synapse ├── docker-compose.yaml ├── homeserver.yaml ├── mautrix-discord-bridge │ ├── docker-compose.yaml │ ├── example-config.yaml │ └── example-registration.yaml └── readme.md ├── Terraform ├── providers.tf └── test-cloudinit.tf ├── Tinyauth ├── .env ├── docker-compose.yaml └── users ├── Torrent-VPN └── docker-compose.yml ├── Traefik-Secure ├── config.yaml ├── docker-compose.yaml └── traefik.yaml ├── Traefik ├── docker-compose.yml └── traefik-config │ ├── acme.json │ ├── config.yml │ └── traefik.yml ├── Traefikv3 ├── .env ├── cf-token ├── config │ ├── acme.json │ ├── config.yaml │ └── traefik.yaml └── docker-compose.yaml ├── Trilium └── docker-compose.yaml ├── UltimateVPS ├── .env ├── docker-compose-VPS.yaml ├── docker-compose.yaml └── traefik │ └── readme.md ├── Unbound ├── a-records.conf ├── docker-compose-vpn.yaml ├── docker-compose.yaml ├── forward-records.conf ├── srv-records.conf └── unbound.conf ├── Unifi-Controller ├── docker-compose.yaml ├── init-mongo.js └── kubernetes │ ├── README.md │ ├── deployment.yaml │ ├── ingress.yaml │ ├── init-mongo.js │ └── namespaceAndSecret.yaml ├── UptimeKuma ├── Kubernetes │ ├── default-headers.yaml │ ├── deployment.yaml │ ├── ingress.yaml │ └── longhorn-pv.yaml └── docker-compose.yaml ├── Vaultwarden ├── Kubernetes │ ├── default-headers.yaml │ ├── deployment.yaml │ ├── ingress.yaml │ ├── ipwhitelist.yaml │ └── networkpolicy.yaml └── docker-compose.yaml ├── Vikunja └── docker-compose.yaml ├── Watchtower ├── access_token └── docker-compose.yaml ├── Web-Servers ├── Hugo │ ├── docker-compose.yaml │ └── site-build-command ├── Nginx │ └── docker-compose.yaml └── WordPress │ ├── .env │ └── docker-compose.yaml ├── Wireguard ├── Kubernetes │ ├── default-headers.yaml │ ├── deployment.yaml │ ├── ingress.yaml │ └── ingressRouteUDP.yaml └── docker-compose.yml ├── Zitadel ├── docker-compose.yaml ├── example-zitadel-config.yaml ├── example-zitadel-init-steps.yaml └── example-zitadel-secrets.yaml ├── rClone ├── docker-compose.yml ├── mount │ ├── docker-compose.yml │ └── windows_mount.bat ├── remote-upload └── sync_script └── restic └── docker-compose.yml /.devcontainer/rke2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mcr.microsoft.com/devcontainers/python:1-3.12-bookworm 2 | 3 | # enable git bash completion 4 | RUN ln -s /usr/share/bash-completion/completions/git /usr/share/bash-completion/bash_completion 5 | 6 | # install sshpass for ansible SSH password auth and vim just in case 7 | RUN apt-get update && apt-get upgrade -y 8 | RUN apt-get install -y sshpass vim 9 | RUN apt-get clean 10 | 11 | # install ansible and ansible-lint 12 | RUN python3 -m pip install ansible-core~=2.16.6 ansible-lint~=24.2.3 13 | 14 | # create /workspace directory 15 | WORKDIR /workspace 16 | RUN chown -R vscode:vscode /workspace 17 | 18 | # run commands as non-root user 19 | USER vscode 20 | 21 | # make prompt multiline cause it's too long by default 22 | RUN sed -i -E -e '/PS1="\$/c\ PS1="${userpart} ${lightblue}\\w ${gitbranch}${removecolor}\\n\\$ "' ~/.bashrc 23 | 24 | # install collection requirements 25 | COPY collections/requirements.yaml . 26 | RUN ansible-galaxy collection install -r requirements.yaml 27 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v4.5.0 5 | hooks: 6 | - id: check-symlinks 7 | - id: destroyed-symlinks 8 | - id: detect-aws-credentials 9 | args: [--allow-missing-credentials] 10 | - repo: https://github.com/IamTheFij/docker-pre-commit 11 | rev: v3.0.1 12 | hooks: 13 | - id: docker-compose-check 14 | -------------------------------------------------------------------------------- /Ansible/Installation/readme.md: -------------------------------------------------------------------------------- 1 | # Update Repos 2 | ``` 3 | sudo apt update 4 | ``` 5 | # Install Dependencies 6 | ``` 7 | sudo apt install software-properties-common 8 | ``` 9 | 10 | # Add Ansible Repo 11 | ``` 12 | sudo add-apt-repository --yes --update ppa:ansible/ansible 13 | ``` 14 | 15 | # Install Ansible 16 | ``` 17 | sudo apt install ansible 18 | ``` -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Portainer/inventory.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | docker: 3 | hosts: 4 | docker01: 5 | ansible_host: 192.168.200.222 6 | ansible_user: 'ubuntu' 7 | ansible_become: true 8 | ansible_become_method: sudo 9 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Portainer/playbook.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Docker on Ubuntu 3 | hosts: all 4 | become: true 5 | roles: 6 | - docker_install 7 | - portainer_deploy 8 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Portainer/roles/docker_install/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart Docker 3 | ansible.builtin.systemd: 4 | name: docker 5 | state: restarted 6 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Portainer/roles/docker_install/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure apt is using HTTPS 3 | ansible.builtin.apt: 4 | name: "{{ item }}" 5 | state: present 6 | loop: 7 | - apt-transport-https 8 | - ca-certificates 9 | - curl 10 | - software-properties-common 11 | 12 | - name: Add Docker GPG key 13 | ansible.builtin.apt_key: 14 | url: "https://download.docker.com/linux/ubuntu/gpg" 15 | state: present 16 | 17 | - name: Add Docker repository 18 | ansible.builtin.apt_repository: 19 | repo: "{{ docker_apt_repository }}" 20 | state: present 21 | 22 | - name: Install Docker CE 23 | ansible.builtin.apt: 24 | name: docker-ce 25 | state: present 26 | update_cache: true 27 | 28 | - name: Configure Docker daemon options 29 | ansible.builtin.template: 30 | src: "templates/docker_daemon.json.j2" 31 | dest: "/etc/docker/daemon.json" 32 | owner: 'root' 33 | group: 'root' 34 | mode: '0755' # Optional file permissions 35 | notify: Restart Docker 36 | 37 | - name: Ensure Docker service is enabled and running 38 | ansible.builtin.systemd: 39 | name: docker 40 | enabled: true 41 | state: started 42 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Portainer/roles/docker_install/templates/docker_daemon.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "storage-driver": "{{ docker_daemon_options['storage-driver'] }}" 3 | } 4 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Portainer/roles/docker_install/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_apt_release_channel: "stable" 3 | docker_apt_repository: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" 4 | docker_daemon_options: 5 | storage-driver: "overlay2" 6 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Portainer/roles/portainer_deploy/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Start Portainer 3 | community.docker.docker_compose: 4 | project_src: /home/ubuntu/docker-compose/portainer 5 | state: present 6 | restarted: true 7 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Portainer/roles/portainer_deploy/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure docker-compose is installed 3 | ansible.builtin.package: 4 | name: docker-compose 5 | state: present 6 | 7 | - name: Ensure Docker service is running 8 | ansible.builtin.service: 9 | name: docker 10 | state: started 11 | enabled: true 12 | 13 | - name: Setup Portainer directory 14 | ansible.builtin.file: 15 | path: /home/ubuntu/docker-compose/portainer 16 | state: directory 17 | mode: '0755' # Optional file permissions 18 | owner: ubuntu # Optional ownership 19 | group: ubuntu # Optional group ownership 20 | 21 | - name: Deploy Portainer using Docker Compose 22 | ansible.builtin.template: 23 | src: "templates/docker_compose.yaml.j2" 24 | dest: "/home/ubuntu/docker-compose/portainer/docker-compose.yaml" 25 | mode: '0755' # Optional file permissions 26 | owner: ubuntu # Optional ownership 27 | group: ubuntu # Optional group ownership 28 | notify: 29 | - Start Portainer 30 | 31 | - name: Run Portainer docker-compose up 32 | community.docker.docker_compose: 33 | project_src: /home/ubuntu/docker-compose/portainer 34 | state: present 35 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Portainer/roles/portainer_deploy/templates/docker_compose.yaml.j2: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | services: 3 | portainer: 4 | image: portainer/portainer-ce:{{ portainer_version }} 5 | volumes: 6 | - /var/run/docker.sock:/var/run/docker.sock 7 | - portainer_data:/data 8 | ports: 9 | - "9000:9000" 10 | restart: always 11 | 12 | volumes: 13 | portainer_data: 14 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Portainer/roles/portainer_deploy/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | portainer_version: "latest" 3 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory/inventory.yaml 3 | host_key_checking = false -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/collections/requirements.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: ansible.utils 4 | - name: community.general 5 | - name: ansible.posix 6 | - name: community.docker.docker_stack -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/inventory/group_vars/all.yaml: -------------------------------------------------------------------------------- 1 | # ansible vars 2 | ansible_user: ubuntu 3 | ansible_become: true 4 | ansible_become_method: sudo 5 | 6 | # nfs vars 7 | nfs_server: 192.168.6.2 8 | nfs_share: /mnt/share/swarm 9 | mount_point: /share 10 | 11 | # portainer vars 12 | portainer_data: /share/portainer_data -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/inventory/inventory.yaml: -------------------------------------------------------------------------------- 1 | all: 2 | children: 3 | managers: 4 | hosts: 5 | manager1: 6 | ansible_host: 192.168.200.71 7 | manager2: 8 | ansible_host: 192.168.200.72 9 | manager3: 10 | ansible_host: 192.168.200.73 11 | workers: 12 | hosts: 13 | worker1: 14 | ansible_host: 192.168.200.74 15 | worker2: 16 | ansible_host: 192.168.200.75 17 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/docker_install/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart Docker 3 | ansible.builtin.systemd: 4 | name: docker 5 | state: restarted 6 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/docker_install/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure apt is using HTTPS 3 | ansible.builtin.apt: 4 | name: "{{ item }}" 5 | state: present 6 | loop: 7 | - apt-transport-https 8 | - ca-certificates 9 | - curl 10 | - software-properties-common 11 | 12 | - name: Add Docker GPG key 13 | ansible.builtin.apt_key: 14 | url: "https://download.docker.com/linux/ubuntu/gpg" 15 | state: present 16 | 17 | - name: Add Docker repository 18 | ansible.builtin.apt_repository: 19 | repo: "{{ docker_apt_repository }}" 20 | state: present 21 | 22 | - name: Install Docker CE 23 | ansible.builtin.apt: 24 | name: docker-ce 25 | state: present 26 | update_cache: true 27 | 28 | - name: Configure Docker daemon options 29 | ansible.builtin.template: 30 | src: "templates/docker_daemon.json.j2" 31 | dest: "/etc/docker/daemon.json" 32 | owner: 'root' 33 | group: 'root' 34 | mode: '0755' # Optional file permissions 35 | notify: Restart Docker 36 | 37 | - name: Ensure Docker service is enabled and running 38 | ansible.builtin.systemd: 39 | name: docker 40 | enabled: true 41 | state: started 42 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/docker_install/templates/docker_daemon.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "storage-driver": "{{ docker_daemon_options['storage-driver'] }}" 3 | } 4 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/docker_install/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_apt_release_channel: "stable" 3 | docker_apt_repository: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable" 4 | docker_daemon_options: 5 | storage-driver: "overlay2" 6 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/init_docker_swarm/handlers/main.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Ansible/Playbooks/Docker-Swarm/roles/init_docker_swarm/handlers/main.yaml -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/init_docker_swarm/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if Swarm is already initialized 3 | ansible.builtin.command: 4 | cmd: docker info 5 | register: swarm_check 6 | changed_when: false 7 | 8 | - name: Initialize Docker Swarm 9 | ansible.builtin.command: 10 | cmd: docker swarm init --advertise-addr {{ ansible_host }} 11 | when: "'Swarm: inactive' in swarm_check.stdout" 12 | register: swarm_init 13 | changed_when: swarm_init.rc == 0 14 | 15 | - name: Get Swarm join token for managers 16 | ansible.builtin.command: 17 | cmd: docker swarm join-token -q manager 18 | register: manager_token 19 | changed_when: false 20 | 21 | - name: Get Swarm join token for workers 22 | ansible.builtin.command: 23 | cmd: docker swarm join-token -q worker 24 | register: worker_token 25 | changed_when: false 26 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/init_docker_swarm/vars/main.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Ansible/Playbooks/Docker-Swarm/roles/init_docker_swarm/vars/main.yaml -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/manager_join_docker_swarm/handlers/main.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Ansible/Playbooks/Docker-Swarm/roles/manager_join_docker_swarm/handlers/main.yaml -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/manager_join_docker_swarm/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if Swarm is already initialized 3 | ansible.builtin.command: 4 | cmd: docker info 5 | register: swarm_check 6 | changed_when: false 7 | 8 | - name: Retrieve manager join token from manager 9 | ansible.builtin.set_fact: 10 | manager_token: "{{ hostvars[groups['managers'][0]]['manager_token'].stdout }}" 11 | when: "'Swarm: inactive' in swarm_check.stdout" 12 | 13 | - name: Join manager to Swarm 14 | ansible.builtin.command: 15 | cmd: docker swarm join --token {{ manager_token }} {{ hostvars[groups['managers'][0]].ansible_host }}:2377 16 | when: 17 | - manager_token is defined 18 | - "'Swarm: inactive' in swarm_check.stdout" 19 | register: swarm_join 20 | changed_when: "'This node joined a swarm as a manager' in swarm_join.stdout" 21 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/manager_join_docker_swarm/vars/main.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Ansible/Playbooks/Docker-Swarm/roles/manager_join_docker_swarm/vars/main.yaml -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/mount_nfs/handlers/main.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Ansible/Playbooks/Docker-Swarm/roles/mount_nfs/handlers/main.yaml -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/mount_nfs/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure NFS utilities are installed 3 | ansible.builtin.apt: 4 | name: nfs-common 5 | state: present 6 | update_cache: true 7 | 8 | - name: Reload systemd to recognize NFS changes 9 | ansible.builtin.systemd: 10 | daemon_reload: true 11 | 12 | - name: Check if NFS mount point exists 13 | ansible.builtin.stat: 14 | path: "{{ mount_point }}" 15 | register: mount_point_stat 16 | 17 | - name: Create mount point for NFS if it doesn't exist 18 | ansible.builtin.file: 19 | path: "{{ mount_point }}" 20 | state: directory 21 | mode: '0777' 22 | when: not mount_point_stat.stat.exists 23 | 24 | - name: Mount NFS share 25 | ansible.posix.mount: 26 | path: "{{ mount_point }}" 27 | src: "{{ nfs_server }}:{{ nfs_share }}" 28 | fstype: "nfs" 29 | opts: "vers=4,proto=tcp,nolock" 30 | state: mounted 31 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/mount_nfs/vars/main.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Ansible/Playbooks/Docker-Swarm/roles/mount_nfs/vars/main.yaml -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/portainer_deploy/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Start Portainer 3 | community.docker.docker_compose: 4 | project_src: /home/ubuntu/docker-compose/portainer 5 | state: present 6 | restarted: true 7 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/portainer_deploy/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure docker-compose is installed 3 | ansible.builtin.package: 4 | name: docker-compose 5 | state: present 6 | 7 | - name: Ensure Docker service is running 8 | ansible.builtin.service: 9 | name: docker 10 | state: started 11 | enabled: true 12 | 13 | - name: Setup Portainer directory 14 | ansible.builtin.file: 15 | path: /home/ubuntu/docker-compose/portainer 16 | state: directory 17 | mode: '0755' # Optional file permissions 18 | owner: ubuntu # Optional ownership 19 | group: ubuntu # Optional group ownership 20 | 21 | - name: Copy compose from template to host 22 | ansible.builtin.template: 23 | src: "templates/docker_compose.yaml.j2" 24 | dest: "/home/ubuntu/docker-compose/portainer/docker-compose.yaml" 25 | mode: '0755' # Optional file permissions 26 | owner: ubuntu # Optional ownership 27 | group: ubuntu # Optional group ownership 28 | notify: 29 | - Start Portainer 30 | 31 | - name: Create Portainer storage on NFS if it doesn't exist 32 | ansible.builtin.file: 33 | path: "{{ portainer_data }}" 34 | state: directory 35 | mode: '0755' 36 | 37 | - name: Deploy Portainer stack 38 | ansible.builtin.command: 39 | cmd: docker stack deploy -c /home/ubuntu/docker-compose/portainer/docker-compose.yaml portainer 40 | register: swarm_check 41 | changed_when: false 42 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/portainer_deploy/templates/docker_compose.yaml.j2: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | agent: 5 | image: portainer/agent:2.21.5 6 | volumes: 7 | - /var/run/docker.sock:/var/run/docker.sock 8 | - /var/lib/docker/volumes:/var/lib/docker/volumes 9 | networks: 10 | - agent_network 11 | deploy: 12 | mode: global 13 | placement: 14 | constraints: [node.platform.os == linux] 15 | 16 | portainer: 17 | image: portainer/portainer-ce:2.21.5 18 | command: -H tcp://tasks.agent:9001 --tlsskipverify 19 | ports: 20 | - "9443:9443" 21 | - "9000:9000" 22 | - "8000:8000" 23 | volumes: 24 | - /share/portainer_data:/data 25 | networks: 26 | - agent_network 27 | deploy: 28 | mode: replicated 29 | replicas: 1 30 | placement: 31 | constraints: [node.role == manager] 32 | 33 | networks: 34 | agent_network: 35 | driver: overlay 36 | attachable: true -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/portainer_deploy/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | portainer_version: "latest" 3 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/worker_join_docker_swarm/handlers/main.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Ansible/Playbooks/Docker-Swarm/roles/worker_join_docker_swarm/handlers/main.yaml -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/worker_join_docker_swarm/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if Swarm is already initialized 3 | ansible.builtin.command: 4 | cmd: docker info 5 | register: swarm_check 6 | changed_when: false 7 | 8 | - name: Retrieve worker join token from manager 9 | ansible.builtin.set_fact: 10 | worker_token: "{{ hostvars[groups['managers'][0]]['worker_token'].stdout }}" 11 | when: "'Swarm: inactive' in swarm_check.stdout" 12 | 13 | - name: Join worker to Swarm 14 | ansible.builtin.command: 15 | argv: 16 | - docker 17 | - swarm 18 | - join 19 | - --token 20 | - "{{ worker_token }}" 21 | - "{{ hostvars[groups['managers'][0]].ansible_host }}:2377" 22 | when: 23 | - worker_token is defined 24 | - "'Swarm: inactive' in swarm_check.stdout" 25 | changed_when: true 26 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/roles/worker_join_docker_swarm/vars/main.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Ansible/Playbooks/Docker-Swarm/roles/worker_join_docker_swarm/vars/main.yaml -------------------------------------------------------------------------------- /Ansible/Playbooks/Docker-Swarm/site.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup Docker & NFS on Ubuntu 3 | hosts: all 4 | become: true 5 | roles: 6 | - docker_install 7 | - mount_nfs 8 | 9 | - name: Init Docker Swarm 10 | hosts: managers[0] 11 | become: true 12 | roles: 13 | - init_docker_swarm 14 | 15 | - name: Join Managers 16 | hosts: managers 17 | become: true 18 | roles: 19 | - manager_join_docker_swarm 20 | 21 | - name: Join Workers 22 | hosts: workers 23 | become: true 24 | roles: 25 | - worker_join_docker_swarm 26 | 27 | - name: Deploy Portainer 28 | hosts: managers[0] 29 | become: true 30 | roles: 31 | - portainer_deploy 32 | -------------------------------------------------------------------------------- /Ansible/Playbooks/File-Copy/File-Copy-Undo-Playbook.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Undo Docker Compose Deployment 3 | hosts: all 4 | become: true 5 | tasks: 6 | - name: Stop Docker Container 7 | community.docker.docker_compose: 8 | project_src: /home/ubuntu/ansible-docker/docker-compose 9 | state: absent 10 | 11 | - name: Remove Docker Compose file 12 | ansible.builtin.file: 13 | path: /home/ubuntu/ansible-docker/docker-compose/docker-compose.yml 14 | state: absent 15 | 16 | - name: Remove Docker Compose directory 17 | ansible.builtin.file: 18 | path: /home/ubuntu/ansible-docker 19 | state: absent 20 | 21 | - name: Remove Website directory 22 | ansible.builtin.file: 23 | path: /home/ubuntu/docker/nginx/web 24 | state: absent 25 | -------------------------------------------------------------------------------- /Ansible/Playbooks/File-Copy/inventory.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | docker: 3 | hosts: 4 | docker01: 5 | ansible_host: 192.168.200.50 6 | ansible_user: 'ubuntu' 7 | ansible_become: true 8 | ansible_become_method: sudo 9 | -------------------------------------------------------------------------------- /Ansible/Playbooks/File-Copy/nginx/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | web: 4 | image: nginx 5 | container_name: jimsgarage 6 | volumes: 7 | - /home/ubuntu/docker/nginx/templates:/etc/nginx/templates 8 | - /home/ubuntu/docker/nginx/web/website:/usr/share/nginx/html 9 | environment: 10 | - NGINX_HOST=nginx.jimsgarage.co.uk 11 | - NGINX_PORT=80 12 | labels: 13 | - "traefik.enable=true" 14 | - "traefik.http.routers.nginx.entrypoints=http" 15 | - "traefik.http.routers.nginx.rule=Host(`nginx.jimsgarage.co.uk`)" 16 | - "traefik.http.middlewares.nginx-https-redirect.redirectscheme.scheme=https" 17 | - "traefik.http.routers.nginx.middlewares=nginx-https-redirect" 18 | - "traefik.http.routers.nginx-secure.entrypoints=https" 19 | - "traefik.http.routers.nginx-secure.rule=Host(`nginx.jimsgarage.co.uk`)" 20 | - "traefik.http.routers.nginx-secure.tls=true" 21 | - "traefik.http.routers.nginx-secure.service=nginx" 22 | - "traefik.http.services.nginx.loadbalancer.server.port=80" 23 | - "traefik.docker.network=proxy" 24 | networks: 25 | proxy: 26 | security_opt: 27 | - no-new-privileges:true 28 | 29 | networks: 30 | proxy: 31 | external: true -------------------------------------------------------------------------------- /Ansible/Playbooks/File-Copy/nginx/website/Jims-Garage-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Ansible/Playbooks/File-Copy/nginx/website/Jims-Garage-1.png -------------------------------------------------------------------------------- /Ansible/Playbooks/Multi-OS-Update/inventory.yaml: -------------------------------------------------------------------------------- 1 | arch: 2 | hosts: 3 | arch01: 4 | ansible_host: 192.168.200.214 5 | ansible_user: 'root' 6 | ansible_python_interpreter: /usr/bin/python3 7 | 8 | docker: 9 | hosts: 10 | docker01: 11 | ansible_host: 192.168.200.50 12 | ansible_user: 'ubuntu' 13 | ansible_become: true 14 | ansible_become_method: sudo -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory/hosts.ini 3 | host_key_checking = false 4 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/collections/requirements.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: ansible.utils 4 | - name: community.general 5 | - name: ansible.posix 6 | - name: kubernetes.core -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/inventory/group_vars/all.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | os: "linux" 3 | arch: "amd64" 4 | 5 | vip: 192.168.3.50 6 | 7 | metallb_version: v0.13.12 8 | lb_range: 192.168.3.80-192.168.3.90 9 | lb_pool_name: first-pool 10 | 11 | ansible_become: true 12 | ansible_become_method: sudo 13 | ################################################################################ 14 | # options to change default values 15 | # kube_vip_version: "v0.8.0" 16 | # vip_interface: "eth0" 17 | # rke2_version: "v1.29.4+rke2r1" 18 | # rke2_install_dir: "/usr/local/bin" 19 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/inventory/hosts.ini: -------------------------------------------------------------------------------- 1 | # Make sure Ansible host has access to these devices 2 | # Good idea to snapshot all machines and deploy uing cloud-init 3 | 4 | [servers] 5 | server1 ansible_host=192.168.3.21 6 | server2 ansible_host=192.168.3.22 7 | server3 ansible_host=192.168.3.23 8 | 9 | [agents] 10 | agent1 ansible_host=192.168.3.24 11 | agent2 ansible_host=192.168.3.25 12 | 13 | [rke2] 14 | 15 | [rke2:children] 16 | servers 17 | agents 18 | 19 | [rke2:vars] 20 | ansible_user=ansible 21 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/add-agent/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | # Copy agent config to all agents - we need to change agent2 & 3 later with the token 2 | - name: Deploy RKE2 Agent Configuration 3 | ansible.builtin.template: 4 | src: templates/rke2-agent-config.j2 5 | dest: /etc/rancher/rke2/config.yaml 6 | owner: root 7 | group: root 8 | mode: '0644' 9 | when: inventory_hostname in groups['agents'] 10 | 11 | # Check agents have restarted to pick up config 12 | - name: Ensure RKE2 agents are enabled and running 13 | ansible.builtin.systemd: 14 | name: rke2-agent 15 | enabled: true 16 | state: restarted 17 | daemon_reload: true 18 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/add-agent/templates/rke2-agent-config.j2: -------------------------------------------------------------------------------- 1 | write-kubeconfig-mode: "0644" 2 | token: {{ hostvars['server1']['token'] }} 3 | server: https://{{ hostvars['server1']['ansible_host'] }}:9345 4 | node-label: 5 | - "agent=true" 6 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/add-server/templates/rke2-server-config.j2: -------------------------------------------------------------------------------- 1 | write-kubeconfig-mode: "0644" 2 | token: {{ hostvars['server1']['token'] }} 3 | server: https://{{ hostvars['server1']['ansible_host'] }}:9345 4 | tls-san: 5 | - {{ vip }} 6 | - {{ hostvars['server1']['ansible_host'] }} 7 | - {{ hostvars['server2']['ansible_host'] }} 8 | - {{ hostvars['server3']['ansible_host'] }} 9 | node-label: 10 | - server=true -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/apply-manifests/templates/metallb-ippool.j2: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: {{ lb_pool_name }} 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - {{ lb_range }} -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/kube-vip/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kube_vip_version: v0.8.0 3 | vip_interface: eth0 4 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/kube-vip/meta/argument_specs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | argument_specs: 3 | main: 4 | short_description: Install kube-vip manifest 5 | description: Install kube-vip manifest 6 | author: 7 | - Jim's Garage 8 | options: 9 | kube_vip_version: 10 | type: str 11 | required: false 12 | default: v0.8.0 13 | description: Version of kube-vip to install 14 | vip_interface: 15 | type: str 16 | required: false 17 | default: eth0 18 | description: Interface to bind kube-vip 19 | vip: 20 | type: str 21 | required: true 22 | description: The virtual IP to use with kube-vip 23 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/kube-vip/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | # Create directory to deploy kube-vip manifest 2 | - name: Create directory for Kube VIP Manifest 3 | ansible.builtin.file: 4 | path: "/var/lib/rancher/rke2/server/manifests" 5 | state: directory 6 | mode: "0755" 7 | when: inventory_hostname in groups['servers'] 8 | 9 | # Copy kube-vip to server 1 manifest folder for auto deployment at bootstrap 10 | - name: Deploy Kube VIP Configuration 11 | ansible.builtin.template: 12 | src: templates/kube-vip-config.j2 13 | dest: /var/lib/rancher/rke2/server/manifests/kube-vip.yaml 14 | owner: root 15 | group: root 16 | mode: "0644" 17 | when: inventory_hostname == groups['servers'][0] 18 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/prepare-nodes/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Enable IPv4 forwarding 2 | ansible.posix.sysctl: 3 | name: net.ipv4.ip_forward 4 | value: "1" 5 | state: present 6 | reload: true 7 | tags: sysctl 8 | 9 | - name: Enable IPv6 forwarding 10 | ansible.posix.sysctl: 11 | name: net.ipv6.conf.all.forwarding 12 | value: "1" 13 | state: present 14 | reload: true 15 | tags: sysctl -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/rke2-download/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rke2_install_dir: "/usr/local/bin" 3 | rke2_version: "v1.29.4+rke2r1" 4 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/rke2-download/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | # Create a directory to download RKE2 binary to 2 | - name: Create directory for RKE2 binary 3 | ansible.builtin.file: 4 | path: "{{ rke2_install_dir }}" 5 | state: directory 6 | mode: '0755' 7 | 8 | # Download the RKE2 binary 9 | - name: Download RKE2 binary 10 | ansible.builtin.get_url: 11 | url: "{{ rke2_binary_url }}" 12 | dest: "{{ rke2_install_dir }}/rke2" 13 | mode: '0755' 14 | 15 | # Set permissions on the RKE2 binary 16 | - name: Set executable permissions on the RKE2 binary 17 | ansible.builtin.file: 18 | path: "{{ rke2_install_dir }}/rke2" 19 | mode: '0755' 20 | state: file 21 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/rke2-download/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | rke2_binary_url: "https://github.com/rancher/rke2/releases/download/{{ rke2_version }}/rke2.{{ os }}-{{ arch }}" 3 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/rke2-prepare/templates/rke2-agent.service.j2: -------------------------------------------------------------------------------- 1 | # rke2-agent.service.j2 2 | [Unit] 3 | Description=RKE2 Agent 4 | After=network.target 5 | 6 | [Service] 7 | ExecStart=/usr/local/bin/rke2 agent 8 | KillMode=process 9 | Restart=on-failure 10 | RestartSec=5s 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/rke2-prepare/templates/rke2-server-config.j2: -------------------------------------------------------------------------------- 1 | write-kubeconfig-mode: "0644" 2 | tls-san: 3 | - {{ vip }} 4 | - {{ hostvars['server1']['ansible_host'] }} 5 | - {{ hostvars['server2']['ansible_host'] }} 6 | - {{ hostvars['server3']['ansible_host'] }} 7 | node-label: 8 | - server=true 9 | disable: 10 | - rke2-ingress-nginx -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/rke2-prepare/templates/rke2-server.service.j2: -------------------------------------------------------------------------------- 1 | # rke2-server.service.j2 2 | [Unit] 3 | Description=RKE2 server 4 | After=network.target 5 | 6 | [Service] 7 | ExecStart=/usr/local/bin/rke2 server 8 | KillMode=process 9 | Restart=on-failure 10 | RestartSec=5s 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/roles/rke2-prepare/vars/main.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Ansible/Playbooks/RKE2/roles/rke2-prepare/vars/main.yaml -------------------------------------------------------------------------------- /Ansible/Playbooks/RKE2/site.yaml: -------------------------------------------------------------------------------- 1 | # Hello, thanks for using my playbook, hopefully you can help to improve it. 2 | # Things that need adding: (there are many more) 3 | # 1) Support different OS & architectures 4 | # 2) Support multiple CNIs 5 | # 3) Improve the wait logic 6 | # 4) Use kubernetes Ansible plugins more sensibly 7 | # 5) Optimise flow logic 8 | # 6) Clean up 9 | 10 | ############################################################### 11 | # MAKE SURE YOU CHANGE group_vars/all.yaml VARIABLES!!!!!!!!!!! 12 | ############################################################### 13 | 14 | # bootstraps first server and copies configs for others/agents 15 | - name: Prepare all nodes 16 | hosts: rke2 17 | gather_facts: false # fact gathering is slow and not needed for any of our tasks 18 | become: true 19 | roles: 20 | - prepare-nodes 21 | - rke2-download 22 | 23 | # Creates RKE2 bootstrap manifests folder and copies kube-vip template over (configured with variables) 24 | - name: Deploy Kube VIP 25 | hosts: servers 26 | gather_facts: true 27 | roles: 28 | - kube-vip 29 | 30 | # bootstraps the first server, copies configs to nodes, saves token to use later 31 | - name: Prepare RKE2 on Servers and Agents 32 | hosts: servers,agents 33 | gather_facts: true 34 | roles: 35 | - rke2-prepare 36 | 37 | # Adds additional servers using the token from the previous task 38 | - name: Add additional RKE2 Servers 39 | hosts: servers 40 | gather_facts: true 41 | roles: 42 | - add-server 43 | 44 | # Adds agents to the cluster 45 | - name: Add additional RKE2 Agents 46 | hosts: agents 47 | gather_facts: true 48 | roles: 49 | - add-agent 50 | 51 | # Finish kube-vip, add metallb 52 | - name: Apply manifests after cluster is created 53 | hosts: servers 54 | gather_facts: true 55 | roles: 56 | - apply-manifests 57 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Secrets-Variables/password: -------------------------------------------------------------------------------- 1 | password -------------------------------------------------------------------------------- /Ansible/Playbooks/Secrets-Variables/secrets_file.enc: -------------------------------------------------------------------------------- 1 | api_key: SuperSecretPassword -------------------------------------------------------------------------------- /Ansible/Playbooks/Talos/collections/requirements.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: ansible.utils 4 | - name: community.general 5 | - name: ansible.posix 6 | - name: kubernetes.core -------------------------------------------------------------------------------- /Ansible/Playbooks/Talos/inventory/group_vars/all.yaml: -------------------------------------------------------------------------------- 1 | os: "linux" 2 | arch: "amd64" 3 | 4 | talos_version: v1.7.0 5 | talosctl_version: v1.7.5 6 | control_plane_ip: 192.168.200.61 7 | 8 | control_plane_2: 192.168.200.62 9 | control_plane_3: 192.168.200.63 10 | 11 | worker_1: 192.168.200.64 12 | worker_2: 192.168.200.65 13 | 14 | config_directory: "/home/{{ ansible_user }}/.talos" 15 | config_file: "/home/{{ ansible_user }}/.talos/talosconfig" 16 | 17 | kube_vip_version: "v0.8.0" 18 | vip_interface: eth0 19 | vip: 192.168.3.50 20 | 21 | metallb_version: v0.13.12 22 | lb_range: 192.168.3.80-192.168.3.90 23 | lb_pool_name: first-pool 24 | 25 | ansible_user: ubuntu 26 | ansible_become: true 27 | ansible_become_method: sudo 28 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Talos/inventory/hosts.ini: -------------------------------------------------------------------------------- 1 | # Make sure Ansible host has access to these devices 2 | # Good idea to snapshot all machines and deploy uing cloud-template 3 | [ansible] 4 | 127.0.0.1 ansible_connection=local 5 | 6 | [servers] 7 | server1 ansible_host=192.168.3.61 8 | server2 ansible_host=192.168.3.62 9 | server3 ansible_host=192.168.3.63 10 | 11 | [agents] 12 | agent1 ansible_host=192.168.3.64 13 | agent2 ansible_host=192.168.3.65 14 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Talos/roles/add-workers/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Generate Machine Configurations. This is using the qemu agent as per: https://www.talos.dev/v1.7/talos-guides/install/virtualized-platforms/proxmox/ 3 | - name: Apply config to first worker 4 | ansible.builtin.command: 5 | cmd: talosctl apply-config --insecure --nodes {{ worker_1 }} --file {{ config_directory }}/worker.yaml 6 | changed_when: true 7 | 8 | - name: Apply config to second worker 9 | ansible.builtin.command: 10 | cmd: talosctl apply-config --insecure --nodes {{ worker_2 }} --file {{ config_directory }}/worker.yaml 11 | changed_when: true 12 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Talos/roles/apply-config/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Generate Machine Configurations. This is using the qemu agent as per: https://www.talos.dev/v1.7/talos-guides/install/virtualized-platforms/proxmox/ 3 | - name: Apply config to first node 4 | ansible.builtin.command: 5 | cmd: talosctl apply-config --insecure --nodes {{ control_plane_ip }} --file {{ config_directory }}/controlplane.yaml 6 | changed_when: true 7 | 8 | - name: Apply config to second node 9 | ansible.builtin.command: 10 | cmd: talosctl apply-config --insecure --nodes {{ control_plane_2 }} --file {{ config_directory }}/controlplane.yaml 11 | changed_when: true 12 | 13 | - name: Apply config to third node 14 | ansible.builtin.command: 15 | cmd: talosctl apply-config --insecure --nodes {{ control_plane_3 }} --file {{ config_directory }}/controlplane.yaml 16 | changed_when: true 17 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Talos/roles/configure-cluster/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check that the config file doesn't already exist 3 | ansible.builtin.stat: 4 | path: "{{ config_file }}" 5 | register: stat_result 6 | 7 | # Generate Machine Configurations. This is using the qemu agent as per: https://www.talos.dev/v1.7/talos-guides/install/virtualized-platforms/proxmox/ 8 | - name: Generate config for cluster 9 | when: "not stat_result.stat.exists" 10 | ansible.builtin.command: talosctl gen config talos-proxmox-cluster https://{{ control_plane_ip }}:6443 --output-dir {{ config_directory }} --install-image factory.talos.dev/installer/ce4c980550dd2ab1b17bbf2b08801c7eb59418eafe8f279833297925d67c7515:{{ talos_version }} 11 | changed_when: true 12 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Talos/roles/configure-talosctl/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Update TalosCTL 3 | - name: Update TalosCTL configs 4 | ansible.builtin.command: talosctl config endpoint {{ control_plane_ip }} --talosconfig {{ config_file }} 5 | changed_when: true 6 | 7 | - name: Update TalosCTL configs 8 | ansible.builtin.command: talosctl config node {{ control_plane_ip }} --talosconfig {{ config_file }} 9 | changed_when: true 10 | 11 | ################################# 12 | # WAIT FOR REBOOT & BOOTSTRAP # 13 | ################################# 14 | - name: Keep trying to bootstrap 15 | ansible.builtin.command: 16 | cmd: "talosctl bootstrap --talosconfig {{ config_file }}" 17 | register: bootstrap_result 18 | retries: 10 19 | delay: 30 20 | until: bootstrap_result.rc == 0 21 | changed_when: bootstrap_result.rc == 0 22 | 23 | # Grab Kubeconfig 24 | - name: Get Kubeconfig 25 | ansible.builtin.command: talosctl kubeconfig . --talosconfig {{ config_file }} 26 | changed_when: true 27 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Talos/roles/install-talosctl/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Ansible Playbook to install Talos 3 | - name: Download talosctl for Linux (amd64) 4 | ansible.builtin.get_url: 5 | url: https://github.com/siderolabs/talos/releases/download/{{ talosctl_version }}/talosctl-linux-amd64 6 | dest: /usr/local/bin/talosctl 7 | mode: '0755' # Make the binary executable 8 | register: download_result # Register the result for debugging or verification 9 | 10 | - name: Display download result 11 | ansible.builtin.debug: 12 | var: download_result # Display the result of the download task 13 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Talos/site.yaml: -------------------------------------------------------------------------------- 1 | # Hello, thanks for using my playbook, hopefully you can help to improve it. 2 | 3 | # Install TalosCTL on Ansible node 4 | - name: Install TalosCTL 5 | hosts: ansible 6 | gather_facts: true # enables us to gather lots of useful variables: https://docs.ansible.com/ansible/latest/collections/ansible/builtin/setup_module.html 7 | become: true 8 | roles: 9 | - install-talosctl 10 | 11 | # Configure Cluster Configuration 12 | - name: Configure Cluster 13 | hosts: ansible 14 | gather_facts: true 15 | roles: 16 | - configure-cluster 17 | 18 | # Apply Cluster Configuration 19 | - name: Configure Cluster 20 | hosts: ansible 21 | gather_facts: true 22 | roles: 23 | - apply-config 24 | 25 | # Configure TalosCTL 26 | - name: Configure TalosCTL 27 | hosts: ansible 28 | gather_facts: true 29 | roles: 30 | - configure-talosctl 31 | 32 | # Add Workers 33 | - name: Add Workers 34 | hosts: ansible 35 | gather_facts: true 36 | roles: 37 | - add-workers 38 | -------------------------------------------------------------------------------- /Ansible/Playbooks/Update/readme.md: -------------------------------------------------------------------------------- 1 | # Add to Hosts File (change ansible_user if required) 2 | ``` 3 | [all:vars] 4 | ansible_user='ubuntu' 5 | ansible_become=yes 6 | ansible_become_method=sudo 7 | ``` -------------------------------------------------------------------------------- /Ansible/Playbooks/Update/update-builtin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: yes 4 | become: yes 5 | 6 | tasks: 7 | - name: Perform a distro upgrade 8 | ansible.builtin.apt: 9 | upgrade: dist 10 | update_cache: yes 11 | 12 | - name: Check if a reboot is required 13 | ansible.builtin.stat: 14 | path: /var/run/reboot-required 15 | get_checksum: no 16 | register: reboot_required_file 17 | 18 | - name: Reboot the server (if necessary) 19 | ansible.builtin.reboot: 20 | when: reboot_required_file.stat.exists == true 21 | 22 | - name: Remove dependencies that are no longer needed 23 | ansible.builtin.apt: 24 | autoremove: yes -------------------------------------------------------------------------------- /Ansible/Playbooks/Update/update.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: true 4 | tasks: 5 | - name: Update apt repo and cache on all Debian/Ubuntu boxes 6 | apt: update_cache=yes force_apt_get=yes cache_valid_time=3600 7 | 8 | - name: Upgrade all packages on servers 9 | apt: upgrade=dist force_apt_get=yes 10 | 11 | - name: Check if a reboot is needed on all servers 12 | register: reboot_required_file 13 | stat: path=/var/run/reboot-required get_checksum=false 14 | 15 | - name: Reboot the box if kernel updated 16 | reboot: 17 | msg: "Reboot initiated by Ansible for kernel updates" 18 | connect_timeout: 5 19 | reboot_timeout: 300 20 | pre_reboot_delay: 0 21 | post_reboot_delay: 30 22 | test_command: uptime 23 | when: reboot_required_file.stat.exists 24 | -------------------------------------------------------------------------------- /Ansible/SSH/readme.md: -------------------------------------------------------------------------------- 1 | # Edit Hosts File 2 | ``` 3 | sudo nano /etc/ansible/hosts 4 | ``` 5 | 6 | # Fix SSH Key Permissions 7 | ``` 8 | chmod 600 ~/.ssh/ansible 9 | ``` 10 | # Ansible Ping Command 11 | ``` 12 | ansible all -m ping 13 | ``` 14 | 15 | # Create SSH Key 16 | ``` 17 | ssh-keygen -t ed25519 -C "ansible" 18 | ``` 19 | 20 | # Copy SSH Key 21 | ``` 22 | ssh-copy-id -i ~/.ssh/ansible.pub 192.168.200.50 23 | ``` 24 | 25 | # Ansible Ping Command With New SSH Key 26 | ``` 27 | ansible all -m ping --key-file ~/.ssh/ansible 28 | ``` 29 | -------------------------------------------------------------------------------- /Authelia/Authelia/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | authelia: 5 | image: authelia/authelia 6 | container_name: authelia 7 | volumes: 8 | - /home/ubuntu/docker/authelia/config:/config 9 | networks: 10 | - proxy 11 | security_opt: 12 | - no-new-privileges:true 13 | labels: 14 | - 'traefik.enable=true' 15 | - 'traefik.http.routers.authelia.rule=Host(`auth.jimsgarage.co.uk`)' 16 | - 'traefik.http.routers.authelia.entrypoints=https' 17 | - 'traefik.http.routers.authelia.tls=true' 18 | - 'traefik.http.middlewares.authelia.forwardAuth.address=http://authelia:9091/api/verify?rd=https://auth.jimsgarage.co.uk' 19 | - 'traefik.http.middlewares.authelia.forwardAuth.trustForwardHeader=true' 20 | - 'traefik.http.middlewares.authelia.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email' 21 | - 'traefik.http.middlewares.authelia-basic.forwardAuth.address=http://authelia:9091/api/verify?auth=basic' 22 | - 'traefik.http.middlewares.authelia-basic.forwardAuth.trustForwardHeader=true' 23 | - 'traefik.http.middlewares.authelia-basic.forwardAuth.authResponseHeaders=Remote-User,Remote-Groups,Remote-Name,Remote-Email' 24 | - 'traefik.http.services.authelia.loadbalancer.server.port=9091' 25 | ports: 26 | - 9091:9091 27 | restart: unless-stopped 28 | environment: 29 | - TZ=Europe/London 30 | healthcheck: 31 | disable: true 32 | 33 | redis: 34 | image: redis:alpine 35 | container_name: redis 36 | volumes: 37 | - /home/ubuntu/docker/redis:/data 38 | networks: 39 | - proxy 40 | expose: 41 | - 6379 42 | restart: unless-stopped 43 | environment: 44 | - TZ=Europe/London 45 | 46 | 47 | networks: 48 | proxy: 49 | external: true 50 | -------------------------------------------------------------------------------- /Authelia/Authelia/users_database.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################################################### 3 | # Users Database # 4 | ############################################################### 5 | 6 | # This file can be used if you do not have an LDAP set up. 7 | 8 | # List of users 9 | users: 10 | authelia: 11 | disabled: false 12 | displayname: "Authelia User" 13 | # Password is authelia 14 | password: "$6$rounds=50000$BpLnfgDsc2WD8F2q$Zis.ixdg9s/UOJYrs56b5QEZFiZECu0qZVNsIYxBaNJ7ucIL.nlxVCT5tqh8KHG8X4tlwCFm5r6NTOZZ5qRFN/" # yamllint disable-line rule:line-length 15 | email: authelia@authelia.com 16 | groups: 17 | - admins 18 | - dev 19 | ... 20 | -------------------------------------------------------------------------------- /Authelia/Nginx/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | web: 4 | image: nginx 5 | container_name: nginx 6 | volumes: 7 | - /home/ubuntu/docker/nginx:/etc/nginx/templates 8 | environment: 9 | - NGINX_HOST=nginx.jimsgarage.co.uk 10 | - NGINX_PORT=80 11 | labels: 12 | - "traefik.enable=true" 13 | - "traefik.http.routers.nginx.entrypoints=http" 14 | - "traefik.http.routers.nginx.rule=Host(`nginx.jimsgarage.co.uk`)" 15 | - "traefik.http.middlewares.nginx-https-redirect.redirectscheme.scheme=https" 16 | - "traefik.http.routers.nginx.middlewares=nginx-https-redirect" 17 | - "traefik.http.routers.nginx-secure.entrypoints=https" 18 | - "traefik.http.routers.nginx-secure.rule=Host(`nginx.jimsgarage.co.uk`)" 19 | - "traefik.http.routers.nginx-secure.tls=true" 20 | - "traefik.http.routers.nginx-secure.service=nginx" 21 | - "traefik.http.services.nginx.loadbalancer.server.port=80" 22 | - "traefik.http.routers.nginx-secure.middlewares=authelia@docker" 23 | - "traefik.docker.network=proxy" 24 | networks: 25 | proxy: 26 | security_opt: 27 | - no-new-privileges:true 28 | 29 | networks: 30 | proxy: 31 | external: true 32 | -------------------------------------------------------------------------------- /Authentik/.env: -------------------------------------------------------------------------------- 1 | PG_PASS=password 2 | AUTHENTIK_SECRET_KEY=password" 3 | COMPOSE_PORT_HTTPS=1443 4 | COMPOSE_PORT_HTTP=7000 5 | # SMTP Host Emails are sent to 6 | AUTHENTIK_EMAIL__HOST=localhost 7 | AUTHENTIK_EMAIL__PORT=25 8 | # Optionally authenticate (don't add quotation marks to your password) 9 | AUTHENTIK_EMAIL__USERNAME=email@your-domain.com 10 | AUTHENTIK_EMAIL__PASSWORD=password 11 | # Use StartTLS 12 | AUTHENTIK_EMAIL__USE_TLS=false 13 | # Use SSL 14 | AUTHENTIK_EMAIL__USE_SSL=false 15 | AUTHENTIK_EMAIL__TIMEOUT=10 16 | # Email address authentik will send from, should have a correct @domain 17 | AUTHENTIK_EMAIL__FROM=authentik@localhost 18 | -------------------------------------------------------------------------------- /Authentik/Web-Proxies/.env: -------------------------------------------------------------------------------- 1 | ../.env -------------------------------------------------------------------------------- /Authentik/Web-Proxies/example-nginx-docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | web: 4 | image: nginx 5 | container_name: jimsgarage 6 | volumes: 7 | - /home/ubuntu/docker/nginx/templates:/etc/nginx/templates 8 | - /home/ubuntu/docker/nginx/web:/usr/share/nginx/html 9 | environment: 10 | - NGINX_HOST=nginx.jimsgarage.co.uk 11 | - NGINX_PORT=80 12 | labels: 13 | - "traefik.enable=true" 14 | - "traefik.http.routers.nginx.entrypoints=http" 15 | - "traefik.http.routers.nginx.rule=Host(`nginx.jimsgarage.co.uk`)" 16 | - "traefik.http.middlewares.nginx-https-redirect.redirectscheme.scheme=https" 17 | - "traefik.http.routers.nginx.middlewares=nginx-https-redirect" 18 | - "traefik.http.routers.nginx-secure.entrypoints=https" 19 | - "traefik.http.routers.nginx-secure.rule=Host(`nginx.jimsgarage.co.uk`)" 20 | - "traefik.http.routers.nginx-secure.tls=true" 21 | - "traefik.http.routers.nginx-secure.service=nginx" 22 | - "traefik.http.services.nginx.loadbalancer.server.port=80" 23 | - "traefik.http.routers.nginx-secure.middlewares=middlewares-authentik@file" #add this to any container you want to use the Authentik web proxy 24 | - "traefik.docker.network=proxy" 25 | networks: 26 | proxy: 27 | security_opt: 28 | - no-new-privileges:true 29 | 30 | networks: 31 | proxy: 32 | external: true -------------------------------------------------------------------------------- /Authentik/Web-Proxies/traefik-conf.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | crowdsec-bouncer: 4 | forwardauth: 5 | address: http://bouncer-traefik:8080/api/v1/forwardAuth 6 | trustForwardHeader: true 7 | # https://github.com/goauthentik/authentik/issues/2366 8 | middlewares-authentik: 9 | forwardAuth: 10 | address: "http://authentik_server:9000/outpost.goauthentik.io/auth/traefik" 11 | trustForwardHeader: true 12 | authResponseHeaders: 13 | - X-authentik-username 14 | - X-authentik-groups 15 | - X-authentik-email 16 | - X-authentik-name 17 | - X-authentik-uid 18 | - X-authentik-jwt 19 | - X-authentik-meta-jwks 20 | - X-authentik-meta-outpost 21 | - X-authentik-meta-provider 22 | - X-authentik-meta-app 23 | - X-authentik-meta-version 24 | -------------------------------------------------------------------------------- /Caddy/.env: -------------------------------------------------------------------------------- 1 | CF_API_TOKEN=1ufPvdNumd2MJd9jBQmPSPLRweLu_VrNgcW1shxy 2 | CF_EMAIL=your@email.com -------------------------------------------------------------------------------- /Caddy/Caddyfile: -------------------------------------------------------------------------------- 1 | { 2 | admin 0.0.0.0:2019 3 | } 4 | 5 | *.jimsgarage.co.uk { 6 | tls { 7 | dns cloudflare {env.CF_API_TOKEN} 8 | propagation_delay 2m 9 | resolvers 1.1.1.1 10 | } 11 | 12 | @caddy host caddy.jimsgarage.co.uk 13 | handle @caddy { 14 | root * /usr/share/caddy 15 | php_fastcgi localhost:80 16 | file_server 17 | } 18 | 19 | @trueNAS host truenas.jimsgarage.co.uk 20 | handle @trueNAS { 21 | reverse_proxy 192.168.6.2:80 22 | } 23 | 24 | @portainer host caddy-portainer.jimsgarage.co.uk 25 | handle @portainer { 26 | reverse_proxy https://portainer:9443 { 27 | transport http { 28 | tls 29 | tls_insecure_skip_verify 30 | } 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /Caddy/Dockerfile: -------------------------------------------------------------------------------- 1 | # For prod you'd want to pin the version: e.g., 2.9.1-builder 2 | FROM caddy:builder AS builder 3 | 4 | RUN xcaddy build \ 5 | --with github.com/caddy-dns/cloudflare 6 | FROM caddy:latest 7 | 8 | COPY --from=builder /usr/bin/caddy /usr/bin/caddy -------------------------------------------------------------------------------- /Caddy/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | caddy: 3 | build: 4 | context: . 5 | dockerfile: Dockerfile 6 | container_name: caddy 7 | restart: unless-stopped 8 | env_file: 9 | - .env 10 | environment: 11 | - CLOUDFLARE_EMAIL=${CF_EMAIL} 12 | - CLOUDFLARE_API_TOKEN=${CF_API_TOKEN} 13 | - ACME_AGREE=true 14 | ports: 15 | - 2019:2019 # remove if you do not want admin API 16 | - 80:80 17 | - 443:443 18 | volumes: 19 | - caddy-config:/config 20 | - caddy-data:/data 21 | - ./Caddyfile:/etc/caddy/Caddyfile 22 | - ./index.html:/usr/share/caddy/index.html 23 | networks: 24 | - caddy # add other containers onto this network to use dns name 25 | 26 | volumes: 27 | caddy-config: 28 | caddy-data: 29 | 30 | # create this first before running the docker-compose - docker network create caddy 31 | networks: 32 | caddy: 33 | external: true -------------------------------------------------------------------------------- /Caddy/index.html: -------------------------------------------------------------------------------- 1 | hello -------------------------------------------------------------------------------- /Cloudflare-HTTPS/cloudflared/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | tunnel: 3 | container_name: cloudflared-tunnel 4 | image: cloudflare/cloudflared 5 | restart: unless-stopped 6 | command: tunnel run 7 | networks: 8 | - proxy 9 | environment: 10 | - TUNNEL_TOKEN=${CLOUDFLARE_TUNNEL_TOKEN} 11 | 12 | networks: 13 | proxy: 14 | external: true -------------------------------------------------------------------------------- /Cloudflare-HTTPS/nginx/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | nginx: 3 | image: lscr.io/linuxserver/nginx:latest 4 | container_name: nginx 5 | environment: 6 | - PUID=1000 7 | - PGID=1000 8 | - TZ=Etc/UTC 9 | networks: 10 | - proxy 11 | labels: 12 | - "traefik.enable=true" 13 | - "traefik.docker.network=proxy" 14 | - "traefik.http.routers.nginx.entrypoints=http" 15 | - "traefik.http.routers.nginx.rule=Host(`nginx.jimsgarage-demos.co.uk`)" 16 | - "traefik.http.middlewares.nginx-https-redirect.redirectscheme.scheme=https" 17 | - "traefik.http.routers.nginx.middlewares=nginx-https-redirect" 18 | - "traefik.http.routers.nginx-secure.entrypoints=https" 19 | - "traefik.http.routers.nginx-secure.rule=Host(`nginx.jimsgarage-demos.co.uk`)" 20 | - "traefik.http.routers.nginx-secure.tls=true" 21 | - "traefik.http.routers.nginx-secure.tls.certresolver=cloudflare" 22 | - "traefik.http.routers.nginx-secure.service=nginx" 23 | - "traefik.http.services.nginx.loadbalancer.server.port=80" 24 | networks: 25 | proxy: 26 | external: true -------------------------------------------------------------------------------- /Cloudflare-Tunnel/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | tunnel: 4 | container_name: cloudflared-tunnel 5 | image: cloudflare/cloudflared 6 | restart: unless-stopped 7 | command: tunnel run 8 | environment: 9 | - TUNNEL_TOKEN=YOUR_KEY_HERE 10 | networks: 11 | macvlan4: # change name to whatever you like 12 | ipv4_address: 192.168.4.20 # change to your IP in your vLAN subnet 13 | 14 | networks: 15 | macvlan4: 16 | external: true 17 | -------------------------------------------------------------------------------- /Cloudflare-Tunnel/macvlan: -------------------------------------------------------------------------------- 1 | docker network create -d macvlan \ 2 | --subnet=192.168.4.0/24 \ 3 | --gateway=192.168.4.1 \ 4 | -o parent=eth0.4 \ 5 | macvlan4 6 | -------------------------------------------------------------------------------- /Code-Server/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "2.1" 3 | services: 4 | code-server: 5 | image: lscr.io/linuxserver/code-server:latest 6 | container_name: code-server 7 | environment: 8 | - PUID=1000 9 | - PGID=1000 10 | - TZ=Etc/UTC 11 | - PASSWORD=password #optional 12 | - HASHED_PASSWORD= #optional 13 | - SUDO_PASSWORD=password #optional 14 | - SUDO_PASSWORD_HASH= #optional 15 | - PROXY_DOMAIN=code-server.jimsgarage.co.uk #optional 16 | - DEFAULT_WORKSPACE=/config/workspace #optional 17 | volumes: 18 | - /home/ubuntu/docker/code-server/config:/config 19 | networks: 20 | proxy: 21 | labels: 22 | - "traefik.enable=true" 23 | - "traefik.http.routers.code-server.entrypoints=http" 24 | - "traefik.http.routers.code-server.rule=Host(`code-server.yourdomain.com`)" 25 | - "traefik.http.middlewares.code-server-https-redirect.redirectscheme.scheme=https" 26 | - "traefik.http.routers.code-server.middlewares=code-server-https-redirect" 27 | - "traefik.http.routers.code-server-secure.entrypoints=https" 28 | - "traefik.http.routers.code-server-secure.rule=Host(`code-server.yourdomain.com`)" 29 | - "traefik.http.routers.code-server-secure.tls=true" 30 | - "traefik.http.routers.code-server-secure.service=code-server" 31 | - "traefik.http.services.code-server.loadbalancer.server.port=8443" 32 | - "traefik.docker.network=proxy" 33 | restart: unless-stopped 34 | 35 | networks: 36 | proxy: 37 | external: true 38 | -------------------------------------------------------------------------------- /Crowdsec/Traefik/config.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | crowdsec-bouncer: 4 | forwardauth: 5 | address: http://bouncer-traefik:8080/api/v1/forwardAuth 6 | trustForwardHeader: true 7 | -------------------------------------------------------------------------------- /Crowdsec/Traefik/traefik.yaml: -------------------------------------------------------------------------------- 1 | api: 2 | dashboard: true 3 | debug: true 4 | entryPoints: 5 | http: 6 | address: ":80" 7 | http: 8 | middlewares: 9 | - crowdsec-bouncer@file 10 | redirections: 11 | entryPoint: 12 | to: https 13 | scheme: https 14 | https: 15 | address: ":443" 16 | http: 17 | middlewares: 18 | - crowdsec-bouncer@file 19 | serversTransport: 20 | insecureSkipVerify: true 21 | providers: 22 | docker: 23 | endpoint: "unix:///var/run/docker.sock" 24 | exposedByDefault: false 25 | file: 26 | filename: /config.yml 27 | certificatesResolvers: 28 | cloudflare: 29 | acme: 30 | email: your@email.com #add your email 31 | storage: acme.json 32 | dnsChallenge: 33 | provider: cloudflare 34 | resolvers: 35 | - "1.1.1.1:53" 36 | - "1.0.0.1:53" 37 | log: 38 | level: "INFO" 39 | filePath: "/var/log/traefik/traefik.log" 40 | accessLog: 41 | filePath: "/var/log/traefik/access.log" 42 | -------------------------------------------------------------------------------- /Crowdsec/acquis.yaml: -------------------------------------------------------------------------------- 1 | filenames: 2 | - /var/log/traefik/* 3 | labels: 4 | type: traefik 5 | -------------------------------------------------------------------------------- /Crowdsec/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | crowdsec: 4 | image: crowdsecurity/crowdsec:latest 5 | container_name: crowdsec 6 | environment: 7 | GID: "${GID-1000}" 8 | COLLECTIONS: "crowdsecurity/linux crowdsecurity/traefik" 9 | volumes: 10 | - /home/ubuntu/docker/crowdsec/acquis.yaml:/etc/crowdsec/acquis.yaml 11 | - /home/ubuntu/docker/crowdsec/db:/var/lib/crowdsec/data/ 12 | - /home/ubuntu/docker/crowdsec/config:/etc/crowdsec/ 13 | - /home/ubuntu/docker/traefik/logs:/var/log/traefik/:ro 14 | networks: 15 | - proxy 16 | security_opt: 17 | - no-new-privileges:true 18 | restart: unless-stopped 19 | 20 | bouncer-traefik: 21 | image: docker.io/fbonalair/traefik-crowdsec-bouncer:latest 22 | container_name: bouncer-traefik 23 | environment: 24 | CROWDSEC_BOUNCER_API_KEY: create_a_random_api_key 25 | CROWDSEC_AGENT_HOST: crowdsec:8080 26 | networks: 27 | - proxy 28 | depends_on: 29 | - crowdsec 30 | restart: unless-stopped 31 | security_opt: 32 | - no-new-privileges:true 33 | networks: 34 | proxy: 35 | external: true 36 | -------------------------------------------------------------------------------- /DIUN/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | 3 | services: 4 | diun: 5 | image: crazymax/diun:latest 6 | container_name: diun 7 | command: serve 8 | volumes: 9 | - "/home/ubuntu/diun/data:/data" 10 | - "/var/run/docker.sock:/var/run/docker.sock" 11 | environment: 12 | - "TZ=Europe/London" 13 | - "LOG_LEVEL=info" 14 | - "DIUN_WATCH_WORKERS=20" 15 | - "DIUN_WATCH_SCHEDULE=0 */6 * * *" 16 | - "DIUN_WATCH_JITTER=30s" 17 | - "DIUN_WATCH_RUNONSTARTUP=true" 18 | - "DIUN_PROVIDERS_DOCKER=true" 19 | - "DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT=true" 20 | 21 | - "DIUN_NOTIF_GOTIFY_ENDPOINT=https://gotify.jimsgarage.co.uk" 22 | - "DIUN_NOTIF_GOTIFY_TOKEN=AYgfdfQaRk3Pb1x" # get your token from Gotify UI 23 | - "DIUN_NOTIF_GOTIFY_PRIORITY=1" 24 | - "DIUN_NOTIF_GOTIFY_TIMEOUT=10s" 25 | 26 | - "DIUN_NOTIF_DISCORD_WEBHOOKURL=https://discord.com/api/webhooks/1230110122752217159/OWcRAUUbT3QFUSs3z35TCD9dUkM26PH0iNY1RNdgqlzoAMC81SZM_iwQ5wuyY8cyFoqL" # change to your webhook 27 | # - "DIUN_NOTIF_DISCORD_MENTIONS" # (comma separated) 28 | - "DIUN_NOTIF_DISCORD_RENDERFIELDS=true" 29 | - "DIUN_NOTIF_DISCORD_TIMEOUT=10s" 30 | # - "DIUN_NOTIF_DISCORD_TEMPLATEBODY" 31 | 32 | labels: 33 | - "diun.enable=true" 34 | restart: always 35 | -------------------------------------------------------------------------------- /Deconz/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | deconz: 4 | image: deconzcommunity/deconz:latest 5 | container_name: deconz 6 | restart: unless-stopped 7 | ports: 8 | - '8002:8002' 9 | - '5443:5443' 10 | - '5900:5900' 11 | volumes: 12 | - /home/ubuntu/docker/deconz:/opt/deCONZ 13 | - /home/ubuntu/docker/deconz/otau:/root/otau 14 | devices: 15 | - /dev/ttyACM0:/dev/ttyACM0 16 | environment: 17 | - DECONZ_DEVICE=/dev/ttyACM0 18 | - DECONZ_WEB_PORT=8002 19 | - DECONZ_WS_PORT=5443 20 | - DEBUG_INFO=1 21 | - DEBUG_APS=0 22 | - DEBUG_ZCL=0 23 | - DEBUG_ZDP=0 24 | - DEBUG_OTAU=0 25 | - DECONZ_VNC_MODE=1 26 | - DECONZ_VNC_PORT=5900 27 | - DECONZ_VNC_PASSWORD=password 28 | - TZ=Europe/London 29 | security_opt: 30 | - no-new-privileges:true 31 | -------------------------------------------------------------------------------- /Docker-Swarm/portainer-agent-stack.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | agent: 5 | image: portainer/agent:2.19.4 6 | volumes: 7 | - /var/run/docker.sock:/var/run/docker.sock 8 | - /var/lib/docker/volumes:/var/lib/docker/volumes 9 | networks: 10 | - agent_network 11 | deploy: 12 | mode: global 13 | placement: 14 | constraints: [node.platform.os == linux] 15 | 16 | portainer: 17 | image: portainer/portainer-ce:2.19.4 18 | command: -H tcp://tasks.agent:9001 --tlsskipverify 19 | ports: 20 | - "9443:9443" 21 | - "9000:9000" 22 | - "8000:8000" 23 | volumes: 24 | - type: bind 25 | source: /mnt/Portainer 26 | target: /data 27 | networks: 28 | - agent_network 29 | deploy: 30 | mode: replicated 31 | replicas: 1 32 | placement: 33 | constraints: [node.role == manager] 34 | 35 | networks: 36 | agent_network: 37 | driver: overlay 38 | attachable: true -------------------------------------------------------------------------------- /DynamicDNS/config: -------------------------------------------------------------------------------- 1 | ## WARNING: set deamon at least to 600 seconds if you use checkip or you could 2 | ## get banned from their service. 3 | daemon=600 # check every 600 seconds (10mins) 4 | ssl=yes # use ssl-support 5 | use=web # acquire current IP address via web URL 6 | 7 | # Override IP address provider since SSL=yes currently breaks 8 | # the default (non-SSL) provider in my version of ddclient. 9 | # GitHub issue: https://github.com/ddclient/ddclient/issues/597 10 | web=checkip.dyndns.org/ 11 | web-skip='Current IP Address:' # Probably not needed but doesn't hurt 12 | 13 | ## 14 | ## Cloudflare 15 | protocol=cloudflare, \ 16 | zone=example.com, \ 17 | ttl=1, \ 18 | login=email, \ 19 | password=YOUR_API_TOKEN \ 20 | example.com,sub1.example.com,sub2.example.com,sub3.example.com -------------------------------------------------------------------------------- /DynamicDNS/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | ddclient: 4 | image: lscr.io/linuxserver/ddclient:latest 5 | container_name: ddclient 6 | environment: 7 | - PUID=1000 8 | - PGID=1000 9 | - TZ=Etc/UTC 10 | volumes: 11 | - /home/ubuntu/docker/ddns/config:/config 12 | restart: unless-stopped 13 | -------------------------------------------------------------------------------- /DynamicDNS/script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Cloudflare API details 4 | ZONE_ID="your_zone_id" 5 | RECORD_ID="your_record_id" 6 | API_TOKEN="your_cloudflare_api_token" 7 | RECORD_NAME="your_domain.com" 8 | 9 | # Get the current external IP 10 | CURRENT_IP=$(curl -s http://ipv4.icanhazip.com/) 11 | 12 | # Get the IP stored in Cloudflare 13 | CLOUDFLARE_IP=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \ 14 | -H "Authorization: Bearer $API_TOKEN" \ 15 | -H "Content-Type: application/json" | jq -r '.result.content') 16 | 17 | # Compare the IPs 18 | if [ "$CURRENT_IP" != "$CLOUDFLARE_IP" ]; then 19 | echo "IP has changed from $CLOUDFLARE_IP to $CURRENT_IP. Updating DNS record..." 20 | 21 | # Update the Cloudflare DNS record 22 | UPDATE_RESPONSE=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \ 23 | -H "Authorization: Bearer $API_TOKEN" \ 24 | -H "Content-Type: application/json" \ 25 | --data '{"type":"A","name":"'"$RECORD_NAME"'","content":"'"$CURRENT_IP"'","ttl":120,"proxied":false}') 26 | 27 | # Check if the update was successful 28 | if echo "$UPDATE_RESPONSE" | jq -r '.success' | grep -q true; then 29 | echo "DNS record updated successfully." 30 | else 31 | echo "Failed to update DNS record. Response from Cloudflare: $UPDATE_RESPONSE" 32 | fi 33 | else 34 | echo "IP has not changed. No update needed." 35 | fi 36 | -------------------------------------------------------------------------------- /Enclosed/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | enclosed: 3 | image: corentinth/enclosed 4 | #ports: 5 | # - 8787:8787 6 | volumes: 7 | - /home/ubuntu/docker/enclosed:/app/.data 8 | restart: unless-stopped 9 | networks: 10 | - proxy 11 | labels: 12 | - "traefik.enable=true" 13 | - "traefik.docker.network=proxy" 14 | - "traefik.http.routers.enclosed.entrypoints=http" 15 | - "traefik.http.routers.enclosed.rule=Host(`enclosed.jimsgarage.co.uk`)" 16 | - "traefik.http.middlewares.enclosed-https-redirect.redirectscheme.scheme=https" 17 | - "traefik.http.routers.enclosed.middlewares=enclosed-https-redirect" 18 | - "traefik.http.routers.enclosed-secure.entrypoints=https" 19 | - "traefik.http.routers.enclosed-secure.rule=Host(`enclosed.jimsgarage.co.uk`)" 20 | - "traefik.http.routers.enclosed-secure.tls=true" 21 | - "traefik.http.routers.enclosed-secure.tls.certresolver=cloudflare" 22 | - "traefik.http.routers.enclosed-secure.service=enclosed" 23 | - "traefik.http.services.enclosed.loadbalancer.server.port=8787" 24 | 25 | networks: 26 | proxy: 27 | external: true -------------------------------------------------------------------------------- /Ente/config/museum.yaml: -------------------------------------------------------------------------------- 1 | # HTTP connection parameters 2 | http: 3 | # If true, bind to 443 and use TLS. 4 | # By default, this is false, and museum will bind to 8080 without TLS. 5 | # use-tls: true 6 | 7 | # Specify the base endpoints for various apps 8 | apps: 9 | # Default is https://albums.ente.io 10 | # 11 | # If you're running a self hosted instance and wish to serve public links, 12 | # set this to the URL where your albums web app is running. 13 | public-albums: https://ente.jimsgarage.co.uk 14 | 15 | # SMTP configuration (optional) 16 | # 17 | # Configure credentials here for sending mails from museum (e.g. OTP emails). 18 | # 19 | # The smtp credentials will be used if the host is specified. Otherwise it will 20 | # try to use the transmail credentials. Ideally, one of smtp or transmail should 21 | # be configured for a production instance. 22 | # 23 | # username and password are optional (e.g. if you're using a local relay server 24 | # and don't need authentication). 25 | #smtp: 26 | # host: 27 | # port: 28 | # username: 29 | # password: 30 | # # The email address from which to send the email. Set this to an email 31 | # # address whose credentials you're providing. 32 | # email: 33 | 34 | s3: 35 | are_local_buckets: true 36 | b2-eu-cen: 37 | key: test 38 | secret: testtest 39 | endpoint: https://minio.jimsgarage.co.uk 40 | region: eu-central-2 41 | bucket: b2-eu-cen 42 | 43 | # Add this once you have done the CLI part 44 | #internal: 45 | # admins: 46 | # - 1580559962386438 -------------------------------------------------------------------------------- /Ente/config/scripts/compose/credentials.yaml: -------------------------------------------------------------------------------- 1 | db: 2 | host: postgres 3 | port: 5432 4 | name: ente_db 5 | user: pguser 6 | password: pgpass 7 | 8 | s3: 9 | are_local_buckets: true 10 | b2-eu-cen: 11 | key: test 12 | secret: testtest 13 | endpoint: https://minio.jimsgarage.co.uk 14 | region: eu-central-2 15 | bucket: b2-eu-cen 16 | wasabi-eu-central-2-v3: 17 | key: test 18 | secret: testtest 19 | endpoint: localhost:3200 20 | region: eu-central-2 21 | bucket: wasabi-eu-central-2-v3 22 | compliance: false 23 | scw-eu-fr-v3: 24 | key: test 25 | secret: testtest 26 | endpoint: localhost:3200 27 | region: eu-central-2 28 | bucket: scw-eu-fr-v3 -------------------------------------------------------------------------------- /Ente/config/scripts/compose/minio-provision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Script used to prepare the minio instance that runs as part of the development 4 | # Docker compose cluster. 5 | 6 | while ! mc config host add h0 http://minio:3200 test testtest 7 | do 8 | echo "waiting for minio..." 9 | sleep 0.5 10 | done 11 | 12 | cd /data 13 | 14 | mc mb -p b2-eu-cen 15 | mc mb -p wasabi-eu-central-2-v3 16 | mc mb -p scw-eu-fr-v3 -------------------------------------------------------------------------------- /Gitea/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | server: 5 | image: gitea/gitea:1.21.4 6 | container_name: gitea 7 | environment: 8 | - USER_UID=1000 9 | - USER_GID=1000 10 | - GITEA__database__DB_TYPE=postgres 11 | - GITEA__database__HOST=db:5432 12 | - GITEA__database__NAME=gitea 13 | - GITEA__database__USER=gitea 14 | - GITEA__database__PASSWD=gitea 15 | restart: always 16 | volumes: 17 | - ./gitea:/data 18 | - /etc/timezone:/etc/timezone:ro 19 | - /etc/localtime:/etc/localtime:ro 20 | depends_on: 21 | - db 22 | networks: 23 | - proxy 24 | labels: 25 | - "traefik.enable=true" 26 | - "traefik.http.routers.gitea-secure.entrypoints=https" 27 | - "traefik.http.routers.gitea-secure.rule=Host(`gitea.jimsgarage.co.uk`)" 28 | - "traefik.http.routers.gitea-secure.tls=true" 29 | - "traefik.http.routers.gitea-secure.service=gitea@docker" 30 | - "traefik.http.services.gitea.loadbalancer.server.port=3000" 31 | - "traefik.docker.network=proxy" 32 | security_opt: 33 | - no-new-privileges:true 34 | 35 | db: 36 | image: postgres:14 37 | restart: always 38 | environment: 39 | - POSTGRES_USER=gitea 40 | - POSTGRES_PASSWORD=gitea 41 | - POSTGRES_DB=gitea 42 | volumes: 43 | - ./postgres:/var/lib/postgresql/data 44 | networks: 45 | - proxy 46 | 47 | networks: 48 | proxy: 49 | external: true -------------------------------------------------------------------------------- /Gotify/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | services: 3 | gotify: 4 | image: gotify/server 5 | container_name: gotify 6 | volumes: 7 | - /home/ubuntu/docker/gotify:/app/data 8 | restart: unless-stopped 9 | security_opt: 10 | - no-new-privileges:true 11 | networks: 12 | proxy: 13 | environment: 14 | - TZ=Europe/London 15 | labels: 16 | - "traefik.enable=true" 17 | - "traefik.http.routers.gotify.entrypoints=http" 18 | - "traefik.http.routers.gotify.rule=Host(`gotify.yourdomain.com`)" 19 | - "traefik.http.middlewares.gotify-https-redirect.redirectscheme.scheme=https" 20 | - "traefik.http.routers.gotify.middlewares=gotify-https-redirect" 21 | - "traefik.http.routers.gotify-secure.entrypoints=https" 22 | - "traefik.http.routers.gotify-secure.rule=Host(`gotify.yourdomain.com`)" 23 | - "traefik.http.routers.gotify-secure.tls=true" 24 | - "traefik.http.routers.gotify-secure.service=gotify" 25 | - "traefik.http.services.gotify.loadbalancer.server.port=80" 26 | - "traefik.docker.network=proxy" 27 | 28 | networks: 29 | proxy: 30 | external: true 31 | -------------------------------------------------------------------------------- /Grafana-Monitoring/Part-2/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s 4 | evaluation_interval: 30s 5 | body_size_limit: 15MB 6 | sample_limit: 1500 7 | target_limit: 30 8 | label_limit: 30 9 | label_name_length_limit: 200 10 | label_value_length_limit: 200 11 | # scrape_timeout is set to the global default (10s). 12 | 13 | scrape_configs: 14 | - job_name: crowdsec_myMachine 15 | static_configs: 16 | - targets: ["192.168.7.114:6060"] # change this to your crowdsec IP. Be sure to enable port 6060 first 17 | -------------------------------------------------------------------------------- /Grafana-Monitoring/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s 4 | evaluation_interval: 30s 5 | body_size_limit: 15MB 6 | sample_limit: 1500 7 | target_limit: 30 8 | label_limit: 30 9 | label_name_length_limit: 200 10 | label_value_length_limit: 200 11 | # scrape_timeout is set to the global default (10s). 12 | -------------------------------------------------------------------------------- /Headscale/Tailscale-Client/docker-compose,yaml: -------------------------------------------------------------------------------- 1 | services: 2 | tailscale: 3 | container_name: tailscale 4 | image: tailscale/tailscale:stable 5 | hostname: headtailscale 6 | volumes: 7 | - /home/ubuntu/docker/tailscale/data:/var/lib/tailscale 8 | - /dev/net/tun:/dev/net/tun 9 | network_mode: "host" 10 | cap_add: 11 | - NET_ADMIN 12 | - NET_RAW 13 | environment: 14 | - TS_STATE_DIR=/var/lib/tailscale 15 | - TS_EXTRA_ARGS=--login-server=https://headscale.jimsgarage.co.uk --advertise-exit-node --advertise-routes=192.168.0.0/16 --accept-dns=true 16 | - TS_NO_LOGS_NO_SUPPORT=true 17 | # - TS_AUTHKEY=e6f46b99f2ddsfsf3easdf125590e415db007 # generate this key inside your headscale server container 18 | restart: unless-stopped 19 | -------------------------------------------------------------------------------- /Headscale/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | services: 3 | headscale: 4 | container_name: headscale 5 | volumes: 6 | - /home/ubuntu/docker/headscale/config:/etc/headscale/ 7 | - /home/ubuntu/docker/headscale/keys:/var/lib/headscale/ 8 | ports: 9 | - 8080:8080 10 | - 9090:9090 11 | image: headscale/headscale:0.22.3 12 | command: headscale serve 13 | restart: unless-stopped 14 | 15 | headscale-ui: 16 | image: ghcr.io/gurucomputing/headscale-ui:latest 17 | pull_policy: always 18 | container_name: headscale-ui 19 | restart: unless-stopped 20 | ports: 21 | - 9999:80 22 | -------------------------------------------------------------------------------- /Headscale/with-Traefik/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | 3 | services: 4 | headscale: 5 | image: headscale/headscale:latest 6 | pull_policy: always 7 | container_name: headscale 8 | restart: unless-stopped 9 | command: headscale serve 10 | volumes: 11 | - /home/ubuntu/docker/headscale/config:/etc/headscale 12 | - /home/ubuntu/docker/headscale/data:/var/lib/headscale 13 | labels: 14 | - traefik.enable=true 15 | - traefik.http.routers.headscale-rtr.rule=PathPrefix(`/`) # you might want to add: && Host(`your.domain.name`)" 16 | - traefik.http.services.headscale-svc.loadbalancer.server.port=8080 17 | 18 | headscale-ui: 19 | image: ghcr.io/gurucomputing/headscale-ui:latest 20 | pull_policy: always 21 | container_name: headscale-ui 22 | restart: unless-stopped 23 | labels: 24 | - traefik.enable=true 25 | - traefik.http.routers.headscale-ui-rtr.rule=PathPrefix(`/web`) # you might want to add: && Host(`your.domain.name`)" 26 | - traefik.http.services.headscale-ui-svc.loadbalancer.server.port=80 27 | 28 | # If you are following my guides you will already have the below configured 29 | # 30 | # traefik: 31 | # image: traefik:latest 32 | # pull_policy: always 33 | # restart: unless-stopped 34 | # container_name: traefik 35 | # command: 36 | # - --api.insecure=true # remove in production 37 | # - --providers.docker 38 | # - --entrypoints.web.address=:80 39 | # - --entrypoints.websecure.address=:443 40 | # - --global.sendAnonymousUsage=false 41 | # ports: 42 | # - 80:80 43 | # - 443:443 44 | # - 8080:8080 # web UI (enabled with api.insecure) 45 | # volumes: 46 | # - /var/run/docker.sock:/var/run/docker.sock:ro 47 | # - ./traefik/certificates:/certificates 48 | -------------------------------------------------------------------------------- /Hoarder/.env: -------------------------------------------------------------------------------- 1 | # See https://docs.hoarder.app/configuration for more information 2 | DATA_DIR=/data 3 | MEILI_ADDR=http://127.0.0.1:7700 4 | MEILI_MASTER_KEY=UMcGUSb2u9F-!bXoVQrpBiWee^bG�gMGzts6X2!c 5 | NEXTAUTH_URL=https://hoarder.jimsgarage.co.uk 6 | NEXTAUTH_SECRET=x5v*@Hs7suWm&Xcv5i498wRwK4fN7Sk4uZJ3@ptH -------------------------------------------------------------------------------- /Home-Assistant/Kubernetes/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: homeassistant 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /Home-Assistant/Kubernetes/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: homeassistant 6 | namespace: homeassistant 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.ha.yourdomain.co.uk`) 14 | kind: Rule 15 | services: 16 | - name: homeassistant 17 | port: 80 18 | - match: Host(`ha.yourdomain.co.uk`) 19 | kind: Rule 20 | services: 21 | - name: homeassistant 22 | port: 80 23 | middlewares: 24 | - name: default-headers 25 | tls: 26 | secretName: yourdomain-tls 27 | -------------------------------------------------------------------------------- /Home-Assistant/Kubernetes/sealed-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: bitnami.com/v1alpha1 2 | kind: SealedSecret 3 | metadata: 4 | creationTimestamp: null 5 | name: secrets 6 | namespace: homeassistant 7 | spec: 8 | encryptedData: 9 | POSTGRES_DB: some-secret 10 | POSTGRES_PASSWORD: some-secret 11 | POSTGRES_USER: some-secret 12 | template: 13 | metadata: 14 | creationTimestamp: null 15 | name: secrets 16 | namespace: homeassistant 17 | type: Opaque 18 | 19 | -------------------------------------------------------------------------------- /Home-Assistant/Kubernetes/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: secrets 5 | namespace: homeassistant 6 | type: Opaque 7 | data: 8 | POSTGRES_USER: some-secret 9 | POSTGRES_PASSWORD: some-secret 10 | POSTGRES_DB: some-secret 11 | -------------------------------------------------------------------------------- /Homepage/Homepage/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.3" 2 | services: 3 | homepage: 4 | image: ghcr.io/benphelps/homepage:latest 5 | container_name: homepage 6 | # uncomment if you do not want to run as root 7 | #user: 1000:1000 8 | # uncomment if you are not using a reverse proxy 9 | #ports: 10 | # - 3000:3000 11 | volumes: 12 | - /home/ubuntu/docker/homepage/config:/app/config # Make sure your local config directory exists 13 | - /var/run/docker.sock:/var/run/docker.sock # (optional) For docker integrations 14 | networks: 15 | proxy: 16 | labels: 17 | - "traefik.enable=true" 18 | - "traefik.http.routers.homepage.entrypoints=http" 19 | - "traefik.http.routers.homepage.rule=Host(`homepage.jimsgarage.co.uk`)" 20 | - "traefik.http.routers.homepage.middlewares=default-whitelist@file" 21 | - "traefik.http.middlewares.homepage-https-redirect.redirectscheme.scheme=https" 22 | - "traefik.http.routers.homepage.middlewares=homepage-https-redirect" 23 | - "traefik.http.routers.homepage-secure.entrypoints=https" 24 | - "traefik.http.routers.homepage-secure.rule=Host(`homepage.jimsgarage.co.uk`)" 25 | - "traefik.http.routers.homepage-secure.tls=true" 26 | - "traefik.http.routers.homepage-secure.service=homepage" 27 | - "traefik.http.services.homepage.loadbalancer.server.port=3000" 28 | # - "traefik.http.routers.homepage-secure.middlewares=default-whitelist@file" # uncomment if you want to use a Traefik whitelist to restrict access 29 | # - "traefik.http.routers.homepage-secure.middlewares=authelia@docker" # uncomment if you want to use authelia 30 | # - "traefik.docker.network=proxy" 31 | security_opt: 32 | - no-new-privileges:true 33 | 34 | networks: 35 | proxy: 36 | external: true 37 | -------------------------------------------------------------------------------- /Homepage/Homepage/services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # For configuration options and examples, please see: 3 | # https://gethomepage.dev/en/configs/services 4 | 5 | - My First Group: 6 | - My First Service: 7 | href: http://localhost/ 8 | description: Homepage is awesome 9 | 10 | - Traefik: 11 | icon: traefik.png 12 | href: "http://traefik.jimsgarage.co.uk" 13 | description: Reverse Proxy 14 | server: my-docker # The docker server that was configured 15 | container: traefik # The name of the container you'd like to connect 16 | widget: 17 | type: traefik 18 | url: https://traefik.jimsgarage.co.uk 19 | username: admin 20 | password: gT8ni3iX6QkKreWfAdYKe4xqVsaMRUQ4GG7xn59Q 21 | 22 | - PiHole: 23 | icon: pi-hole.png 24 | href: "http://pihole.jimsgarage.co.uk" 25 | description: DNS Ad Blocker 26 | server: my-docker # The docker server that was configured 27 | container: pihole # The name of the container you'd like to connect 28 | widget: 29 | type: pihole 30 | url: http://192.168.8.2 31 | key: 73T8oBs9MFKLVAC3mAs2KQbWSsqA7oe2PN9r9H4TQWg2TXNAdq4ZPzvy8oEv 32 | 33 | - My Second Group: 34 | - My Second Service: 35 | href: http://localhost/ 36 | description: Homepage is the best 37 | 38 | - My Third Group: 39 | - My Third Service: 40 | href: http://localhost/ 41 | description: Homepage is 😎 42 | -------------------------------------------------------------------------------- /IT-Tools/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | services: 3 | it-tools: 4 | image: 'corentinth/it-tools:latest' 5 | #ports: 6 | # - '8080:80' 7 | restart: unless-stopped 8 | container_name: it-tools 9 | networks: 10 | - proxy 11 | labels: 12 | - "traefik.enable=true" 13 | - "traefik.docker.network=proxy" 14 | - "traefik.http.routers.it-tools.entrypoints=http" 15 | - "traefik.http.routers.it-tools.rule=Host(`it-tools.jimsgarage.co.uk`)" 16 | - "traefik.http.middlewares.it-tools-https-redirect.redirectscheme.scheme=https" 17 | - "traefik.http.routers.it-tools.middlewares=it-tools-https-redirect" 18 | - "traefik.http.routers.it-tools-secure.entrypoints=https" 19 | - "traefik.http.routers.it-tools-secure.rule=Host(`it-tools.jimsgarage.co.uk`)" 20 | - "traefik.http.routers.it-tools-secure.tls=true" 21 | - "traefik.http.routers.it-tools-secure.tls.certresolver=cloudflare" 22 | - "traefik.http.routers.it-tools-secure.service=it-tools" 23 | - "traefik.http.services.it-tools.loadbalancer.server.port=80" 24 | 25 | networks: 26 | proxy: 27 | external: true -------------------------------------------------------------------------------- /Immich/.env: -------------------------------------------------------------------------------- 1 | # You can find documentation for all the supported env variables at https://immich.app/docs/install/environment-variables 2 | 3 | # The location where your uploaded files are stored 4 | UPLOAD_LOCATION=/home/ubuntu/docker/immich/upload 5 | 6 | # The Immich version to use. You can pin this to a specific version like "v1.71.0" 7 | IMMICH_VERSION=release 8 | 9 | # Connection secrets for postgres and typesense. You should change these to random passwords 10 | TYPESENSE_API_KEY=some-random-text 11 | DB_PASSWORD=postgres 12 | 13 | # The values below this line do not need to be changed 14 | ################################################################################### 15 | DB_HOSTNAME=immich_postgres 16 | DB_USERNAME=postgres 17 | DB_DATABASE_NAME=immich 18 | 19 | REDIS_HOSTNAME=immich_redis 20 | 21 | IMMICH_SERVER_URL=https://immich.yourdomain.com 22 | IMMICH_WEB_URL=https://immich.yourdomain.com 23 | -------------------------------------------------------------------------------- /Immich/hwaccel.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | # Hardware acceleration for transcoding - Optional 4 | # This is only needed if you want to use hardware acceleration for transcoding. 5 | # Depending on your hardware, you should uncomment the relevant lines below. 6 | 7 | services: 8 | hwaccel: 9 | devices: 10 | - /dev/dri:/dev/dri # If using Intel QuickSync or VAAPI 11 | # volumes: 12 | # - /usr/lib/wsl:/usr/lib/wsl # If using VAAPI in WSL2 13 | # environment: 14 | # - NVIDIA_DRIVER_CAPABILITIES=all # If using NVIDIA GPU 15 | # - LD_LIBRARY_PATH=/usr/lib/wsl/lib # If using VAAPI in WSL2 16 | # - LIBVA_DRIVER_NAME=d3d12 # If using VAAPI in WSL2 17 | # deploy: # Uncomment this section if using NVIDIA GPU 18 | # resources: 19 | # reservations: 20 | # devices: 21 | # - driver: nvidia 22 | # count: 1 23 | # capabilities: [gpu,video] 24 | -------------------------------------------------------------------------------- /Jellyfin/Kubernetes/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: jellyfin 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /Jellyfin/Kubernetes/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: jellyfin 6 | namespace: jellyfin 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.jellyfin.yourdomain.co.uk`) 14 | kind: Rule 15 | services: 16 | - name: jellyfin 17 | port: 8096 18 | - match: Host(`jellyfin.yourdomain.co.uk`) 19 | kind: Rule 20 | services: 21 | - name: jellyfin 22 | port: 8096 23 | middlewares: 24 | - name: default-headers 25 | tls: 26 | secretName: yourdomain-tls -------------------------------------------------------------------------------- /Jellyfin/Kubernetes/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | kind: NetworkPolicy 2 | apiVersion: networking.k8s.io/v1 3 | metadata: 4 | name: allow-internet-only 5 | namespace: jellyfin 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Egress 10 | egress: 11 | - to: 12 | - ipBlock: 13 | cidr: "0.0.0.0/0" 14 | except: 15 | - "10.0.0.0/8" 16 | - "172.16.0.0/12" 17 | - "192.168.0.0/16" 18 | - to: 19 | - namespaceSelector: 20 | matchLabels: 21 | kubernetes.io/metadata.name: "kube-system" 22 | - podSelector: 23 | matchLabels: 24 | k8s-app: "kube-dns" -------------------------------------------------------------------------------- /Jellyfin/Kubernetes/pv-smb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | annotations: 5 | pv.kubernetes.io/provisioned-by: smb.csi.k8s.io 6 | name: pv-jellyfin-smb 7 | spec: 8 | capacity: 9 | storage: 100Gi 10 | accessModes: 11 | - ReadWriteMany 12 | persistentVolumeReclaimPolicy: Retain 13 | storageClassName: smb 14 | mountOptions: 15 | - dir_mode=0777 16 | - file_mode=0777 17 | csi: 18 | driver: smb.csi.k8s.io 19 | readOnly: false 20 | # volumeHandle format: {smb-server-address}#{sub-dir-name}#{share-name} 21 | # make sure this value is unique for every share in the cluster 22 | volumeHandle: jellyfin 23 | volumeAttributes: 24 | source: "//192.168.x.x/your-nas" 25 | nodeStageSecretRef: 26 | name: smbcreds 27 | namespace: default -------------------------------------------------------------------------------- /Jellyfin/Kubernetes/pvc-smb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: pvc-jellyfin-smb 6 | namespace: jellyfin 7 | spec: 8 | accessModes: 9 | - ReadWriteMany 10 | resources: 11 | requests: 12 | storage: 10Gi 13 | volumeName: pv-jellyfin-smb 14 | storageClassName: smb -------------------------------------------------------------------------------- /Jitsi/gen-passwords.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function generatePassword() { 4 | openssl rand -hex 16 5 | } 6 | 7 | JICOFO_AUTH_PASSWORD=$(generatePassword) 8 | JVB_AUTH_PASSWORD=$(generatePassword) 9 | JIGASI_XMPP_PASSWORD=$(generatePassword) 10 | JIBRI_RECORDER_PASSWORD=$(generatePassword) 11 | JIBRI_XMPP_PASSWORD=$(generatePassword) 12 | 13 | sed -i.bak \ 14 | -e "s#JICOFO_AUTH_PASSWORD=.*#JICOFO_AUTH_PASSWORD=${JICOFO_AUTH_PASSWORD}#g" \ 15 | -e "s#JVB_AUTH_PASSWORD=.*#JVB_AUTH_PASSWORD=${JVB_AUTH_PASSWORD}#g" \ 16 | -e "s#JIGASI_XMPP_PASSWORD=.*#JIGASI_XMPP_PASSWORD=${JIGASI_XMPP_PASSWORD}#g" \ 17 | -e "s#JIBRI_RECORDER_PASSWORD=.*#JIBRI_RECORDER_PASSWORD=${JIBRI_RECORDER_PASSWORD}#g" \ 18 | -e "s#JIBRI_XMPP_PASSWORD=.*#JIBRI_XMPP_PASSWORD=${JIBRI_XMPP_PASSWORD}#g" \ 19 | "$(dirname "$0")/.env" 20 | -------------------------------------------------------------------------------- /Kubernetes/Cloud-Init/readme.md: -------------------------------------------------------------------------------- 1 | 1. Download the ISO using the GUI (tested on https://cloud-images.ubuntu.com/lunar/current/lunar-server-cloudimg-amd64-disk-kvm.img) 2 | 1. Create the VM via CLI 3 | ```bash 4 | qm create 5000 --memory 2048 --core 2 --name ubuntu-cloud --net0 virtio,bridge=vmbr0 5 | cd /var/lib/vz/template/iso/ 6 | qm importdisk 5000 lunar-server-cloudimg-amd64-disk-kvm.img 7 | qm set 5000 --scsihw virtio-scsi-pci --scsi0 :5000/vm-5000-disk-0.raw 8 | qm set 5000 --ide2 :cloudinit 9 | qm set 5000 --boot c --bootdisk scsi0 10 | qm set 5000 --serial0 socket --vga serial0 11 | ``` 12 | 3. Expand the VM disk size to a suitable size (suggested 10 GB) 13 | ```bash 14 | qm disk resize 5000 scsi0 10G 15 | ``` 16 | 4. Create the Cloud-Init template 17 | 5. Deploy new VMs by cloning the template (full clone) 18 | -------------------------------------------------------------------------------- /Kubernetes/Create-VMS/readme.md: -------------------------------------------------------------------------------- 1 | # Simple script to create multiple Virtual Machines automatically 2 | 3 | 1. It will ask you some questions about your wished Virtual Machines. 4 | - You can select Debian or Ubuntu image 5 | 2. Prints a detailed info with about the VMs going tyo be created. 6 | 3. Let you confirm if You want to continue 7 | 4. You can chose to start all VMs at the end 🚀 8 | 9 | Enjoy 🙂 10 | -------------------------------------------------------------------------------- /Kubernetes/Create-manifest-helm/Portainer/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: portainer 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /Kubernetes/Create-manifest-helm/Portainer/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: portainer 6 | namespace: portainer 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.portainer.yourdomain.com`) # change me 14 | kind: Rule 15 | services: 16 | - name: portainer 17 | port: 9443 18 | - match: Host(`portainer.yourdomain.com`) # change me 19 | kind: Rule 20 | services: 21 | - name: portainer 22 | port: 9443 23 | scheme: https 24 | passHostHeader: true 25 | middlewares: 26 | - name: default-headers 27 | tls: 28 | secretName: yourdomain-tls # change me 29 | -------------------------------------------------------------------------------- /Kubernetes/Create-manifest-helm/Portainer/values.yaml: -------------------------------------------------------------------------------- 1 | nodeSelector: 2 | worker: "true" 3 | 4 | service: 5 | enabled: true 6 | type: LoadBalancer 7 | annotations: {} 8 | labels: {} 9 | loadBalancerSourceRanges: [] 10 | externalIPs: [] 11 | -------------------------------------------------------------------------------- /Kubernetes/Create-manifest-helm/WireGuard-Easy/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: wg-easy 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /Kubernetes/Create-manifest-helm/WireGuard-Easy/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: wg-easy 6 | namespace: wg-easy 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.wg-easy.yourdomain.com`) # change me 14 | kind: Rule 15 | services: 16 | - name: wg-easy-web 17 | port: 51821 18 | - match: Host(`wg-easy.yourdomain.com`) # change me 19 | kind: Rule 20 | services: 21 | - name: wg-easy-web 22 | port: 51821 23 | middlewares: 24 | - name: default-headers 25 | tls: 26 | secretName: yourdomain-tls # change me 27 | -------------------------------------------------------------------------------- /Kubernetes/Create-manifest-helm/WireGuard-Easy/ingressRouteUDP.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: IngressRouteUDP 3 | metadata: 4 | name: wg-easy 5 | namespace: wg-easy 6 | annotations: 7 | kubernetes.io/ingress.class: traefik-external 8 | spec: 9 | entryPoints: 10 | - wireguard 11 | routes: 12 | - services: 13 | - name: wg-easy-udp 14 | port: 51820 -------------------------------------------------------------------------------- /Kubernetes/Create-manifest-helm/readme.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Kubernetes/Create-manifest-helm/readme.md -------------------------------------------------------------------------------- /Kubernetes/CrowdSec/Bouncer/bouncer-middleware.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: bouncer 5 | namespace: traefik 6 | spec: 7 | plugin: 8 | bouncer: 9 | enabled: true 10 | crowdsecMode: none 11 | crowdsecLapiScheme: https 12 | crowdsecLapiHost: crowdsec-service.crowdsec:8080 13 | crowdsecLapiTLSCertificateAuthorityFile: /etc/traefik/crowdsec-certs/ca.crt 14 | crowdsecLapiTLSCertificateBouncerFile: /etc/traefik/crowdsec-certs/tls.crt 15 | crowdsecLapiTLSCertificateBouncerKeyFile: /etc/traefik/crowdsec-certs/tls.key -------------------------------------------------------------------------------- /Kubernetes/CrowdSec/Reflector/values.yaml: -------------------------------------------------------------------------------- 1 | nodeSelector: 2 | worker: "true" -------------------------------------------------------------------------------- /Kubernetes/CrowdSec/readme.md: -------------------------------------------------------------------------------- 1 | # Instructions 2 | 1. ``` 3 | helm install \ 4 | cert-manager jetstack/cert-manager \ 5 | --create-namespace \ 6 | --namespace cert-manager \ 7 | --set installCRDs=true 8 | 9 | helm install \ 10 | reflector emberstack/reflector \ 11 | --create-namespace \ 12 | --namespace reflector 13 | ``` 14 | 2. ``` 15 | helm upgrade \ 16 | traefik traefik/traefik \ 17 | --namespace traefik \ 18 | -f traefik-values.yaml 19 | 20 | helm install \ 21 | crowdsec crowdsec/crowdsec \ 22 | --create-namespace \ 23 | --namespace crowdsec \ 24 | -f crowdsec-values.yaml 25 | ``` -------------------------------------------------------------------------------- /Kubernetes/Docker-Kubernetes-Data-Migration/readme.md: -------------------------------------------------------------------------------- 1 | # Instructions 2 | 1. Create Longhorn volume 3 | 2. Mount longhorn volume to a node (recommend worker node) 4 | 3. Log into selected worker node 5 | 4. Create temporary folder for migration 6 | > sudo mkdir /tmp/folder 7 | 5. List the disks to format 8 | > sudo fdisk -l 9 | 6. Format the disk 10 | > sudo mkfs -t ext4 /dev/sdx 11 | 7. Mount the new disk to tmp folder 12 | > sudo mount /dev/sdx /tmp/folder 13 | 8. Copy data from Docker Host to new drive (substitute your values below) 14 | > sudo rsync -avxHAX username@DockerHostIP:/home/ubuntu/docker/some-directory/* /tmp/folder -------------------------------------------------------------------------------- /Kubernetes/GPU-Passthrough/jellyfin.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: jellyfin 7 | app.kubernetes.io/instance: jellyfin 8 | app.kubernetes.io/name: jellyfin 9 | name: jellyfin 10 | namespace: jellyfin 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | app: jellyfin 16 | template: 17 | metadata: 18 | labels: 19 | app: jellyfin 20 | app.kubernetes.io/name: jellyfin 21 | spec: 22 | nodeSelector: 23 | worker: "true" 24 | containers: 25 | - image: jellyfin/jellyfin 26 | imagePullPolicy: Always 27 | name: jellyfin 28 | resources: 29 | limits: 30 | gpu.intel.com/i915: "1" # requesting 1 GPU 31 | ports: 32 | - containerPort: 8096 33 | name: web 34 | protocol: TCP 35 | env: 36 | - name: TZ 37 | value: Europe/London 38 | volumeMounts: 39 | - mountPath: /config 40 | name: jellyfin 41 | subPath: config 42 | - mountPath: /cache 43 | name: jellyfin 44 | subPath: cache 45 | volumes: 46 | - name: jellyfin 47 | persistentVolumeClaim: 48 | claimName: jellyfin 49 | --- 50 | apiVersion: v1 51 | kind: Service 52 | metadata: 53 | labels: 54 | app: jellyfin 55 | name: jellyfin 56 | namespace: jellyfin 57 | spec: 58 | ports: 59 | - name: web-tcp 60 | port: 8096 61 | protocol: TCP 62 | targetPort: 8096 63 | - name: web-udp 64 | port: 8096 65 | protocol: UDP 66 | targetPort: 8096 67 | selector: 68 | app: jellyfin -------------------------------------------------------------------------------- /Kubernetes/GPU-Passthrough/readme.md: -------------------------------------------------------------------------------- 1 | # Create directory 2 | ``` 3 | mkdir -p /etc/rancher/rke2 4 | ``` 5 | # Create File for RKE2 - Config 6 | ``` 7 | sudo nano /etc/rancher/rke2/config.yaml 8 | ``` 9 | # Add values 10 | ``` 11 | token: 12 | server: https://:9345 13 | node-label: 14 | - worker=true 15 | - longhorn=true 16 | ``` 17 | # Install RKE2 18 | ``` 19 | sudo su 20 | curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh - 21 | ``` 22 | # Enable RKE2 23 | ``` 24 | systemctl enable rke2-agent.service 25 | systemctl start rke2-agent.service 26 | ``` -------------------------------------------------------------------------------- /Kubernetes/GitOps/Gotify/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: gotify 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /Kubernetes/GitOps/Gotify/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: gotify 5 | namespace: gotify 6 | labels: 7 | app: gotify 8 | app.kubernetes.io/instance: gotify 9 | app.kubernetes.io/name: gotify 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: gotify 14 | replicas: 1 15 | template: 16 | metadata: 17 | labels: 18 | app: gotify 19 | app.kubernetes.io/instance: gotify 20 | app.kubernetes.io/name: gotify 21 | spec: 22 | nodeSelector: 23 | worker: "true" 24 | containers: 25 | - name: gotify 26 | image: gotify/server:latest 27 | env: 28 | - name: GOTIFY_DB_SQLITE 29 | value: /app/data/gotify.db 30 | - name: GOTIFY_PORT 31 | value: "80" 32 | ports: 33 | - containerPort: 80 34 | volumeMounts: 35 | - name: gotify 36 | mountPath: /app/data 37 | volumes: 38 | - name: gotify 39 | persistentVolumeClaim: 40 | claimName: gotify 41 | --- 42 | apiVersion: v1 43 | kind: Service 44 | metadata: 45 | name: gotify 46 | namespace: gotify 47 | labels: 48 | app: gotify 49 | app.kubernetes.io/instance: gotify 50 | app.kubernetes.io/name: gotify 51 | spec: 52 | type: ClusterIP 53 | ports: 54 | - name: http 55 | port: 80 56 | protocol: TCP 57 | targetPort: 80 58 | selector: 59 | app: gotify 60 | --- 61 | apiVersion: v1 62 | kind: PersistentVolumeClaim 63 | metadata: 64 | name: gotify 65 | namespace: gotify 66 | labels: 67 | app: gotify 68 | app.kubernetes.io/instance: gotify 69 | app.kubernetes.io/name: gotify 70 | spec: 71 | accessModes: 72 | - ReadWriteOnce 73 | resources: 74 | requests: 75 | storage: 1Gi 76 | storageClassName: longhorn 77 | -------------------------------------------------------------------------------- /Kubernetes/GitOps/Gotify/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: gotify 6 | namespace: gotify 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.gotify.jimsgarage.co.uk`) # change to your domain 14 | kind: Rule 15 | services: 16 | - name: gotify 17 | port: 80 18 | - match: Host(`gotify.jimsgarage.co.uk`) # change to your domain 19 | kind: Rule 20 | services: 21 | - name: gotify 22 | port: 80 23 | middlewares: 24 | - name: default-headers 25 | tls: 26 | secretName: jimsgarage-tls # change to your cert 27 | -------------------------------------------------------------------------------- /Kubernetes/GitOps/Grafana/fleet.yaml: -------------------------------------------------------------------------------- 1 | defaultNamespace: grafana 2 | 3 | helm: 4 | chart: grafana 5 | repo: https://grafana.github.io/helm-charts 6 | releaseName: "grafana" 7 | version: "" 8 | namespace: grafana 9 | valuesFiles: 10 | - values.yaml 11 | -------------------------------------------------------------------------------- /Kubernetes/GitOps/readme.md: -------------------------------------------------------------------------------- 1 | # Instructions 2 | 1. Create GitHub (or whatever) repository 3 | 2. Create granular access token 4 | 3. Add details to Fleet in Rancher via 'Continuous Delivery' button (left panel) 5 | 4. Ensure you are in 'Fleet-local' or whatever your cluster is called 6 | 5. Ensure the login details are correct, branch is set to main (default) 7 | 6. Once created, it should pull any files you have in that repository and deploy on your cluster 8 | -------------------------------------------------------------------------------- /Kubernetes/K3S-Deploy/ipAddressPool: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: first-pool 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - $lbrange -------------------------------------------------------------------------------- /Kubernetes/K3S-Deploy/l2Advertisement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: example 5 | namespace: metallb-system 6 | spec: 7 | ipAddressPools: 8 | - first-pool -------------------------------------------------------------------------------- /Kubernetes/K3S-Deploy/readme.md: -------------------------------------------------------------------------------- 1 | 1. Snapshot your VMs! 2 | 1. Add certificates to your home directory 3 | 1. copy script to home directory and make executable 4 | 5 | 1. Run the script, grab a coffee and enjoy :) (hopefully!) 6 | -------------------------------------------------------------------------------- /Kubernetes/Longhorn/readme.md: -------------------------------------------------------------------------------- 1 | 1. Create new VMs (3 in my example) 2 | 2. Remember to increase storage from default template (you should also increase Master and Worker nodes if you haven't). 3 | 3. Make static IPs for each 4 | 4. Copy script (longhorn.sh) to your admin machine (make executable) 5 | 5. Run script 6 | 6. Enjoy Longhorn 7 | 8 | Let me know if it doesn't work! 9 | -------------------------------------------------------------------------------- /Kubernetes/NetworkPolicies/allow-all-ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: allow-all-ingress 6 | spec: 7 | podSelector: {} 8 | ingress: 9 | - {} 10 | policyTypes: 11 | - Ingress 12 | -------------------------------------------------------------------------------- /Kubernetes/NetworkPolicies/default-deny-all-ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: default-deny-ingress 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Ingress 10 | -------------------------------------------------------------------------------- /Kubernetes/NetworkPolicies/example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: test-network-policy 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | role: db 10 | policyTypes: 11 | - Ingress 12 | - Egress 13 | ingress: 14 | - from: 15 | - ipBlock: 16 | cidr: 172.17.0.0/16 17 | except: 18 | - 172.17.1.0/24 19 | - namespaceSelector: 20 | matchLabels: 21 | project: myproject 22 | - podSelector: 23 | matchLabels: 24 | role: frontend 25 | ports: 26 | - protocol: TCP 27 | port: 6379 28 | egress: 29 | - to: 30 | - ipBlock: 31 | cidr: 10.0.0.0/24 32 | ports: 33 | - protocol: TCP 34 | port: 5978 35 | 36 | -------------------------------------------------------------------------------- /Kubernetes/NetworkPolicies/namespace-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: egress-namespaces 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | app: myapp 9 | policyTypes: 10 | - Egress 11 | egress: 12 | - to: 13 | - namespaceSelector: 14 | matchExpressions: 15 | - key: namespace 16 | operator: In 17 | values: ["frontend", "backend"] -------------------------------------------------------------------------------- /Kubernetes/NetworkPolicies/networkpolicy-egress.yaml: -------------------------------------------------------------------------------- 1 | kind: NetworkPolicy 2 | apiVersion: networking.k8s.io/v1 3 | metadata: 4 | name: allow-internet-only 5 | namespace: pihole 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Egress 10 | egress: 11 | - to: 12 | - ipBlock: 13 | cidr: 0.0.0.0/0 14 | except: 15 | - 10.0.0.0/8 16 | - 192.168.0.0/16 17 | - 172.16.0.0/20 18 | - to: 19 | - namespaceSelector: 20 | matchLabels: 21 | kubernetes.io/metadata.name: "kube-system" 22 | - podSelector: 23 | matchLabels: 24 | k8s-app: "kube-dns" -------------------------------------------------------------------------------- /Kubernetes/NetworkPolicies/networkpolicy-ingress.yaml: -------------------------------------------------------------------------------- 1 | kind: NetworkPolicy 2 | apiVersion: networking.k8s.io/v1 3 | metadata: 4 | name: restrict-internal 5 | namespace: pihole 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Ingress 10 | ingress: 11 | - from: 12 | - ipBlock: 13 | cidr: 0.0.0.0/0 14 | except: 15 | - 10.0.0.0/8 16 | - 192.168.0.0/16 17 | - 172.16.0.0/20 -------------------------------------------------------------------------------- /Kubernetes/NetworkPolicies/port-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: multi-port-egress 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | role: db 10 | policyTypes: 11 | - Egress 12 | egress: 13 | - to: 14 | - ipBlock: 15 | cidr: 10.0.0.0/24 16 | ports: 17 | - protocol: TCP 18 | port: 32000 19 | endPort: 32768 20 | 21 | -------------------------------------------------------------------------------- /Kubernetes/RKE2-Cilium/rke2-cilium-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.cattle.io/v1 3 | kind: HelmChartConfig 4 | metadata: 5 | name: rke2-cilium 6 | namespace: kube-system 7 | spec: 8 | valuesContent: |- 9 | kubeProxyReplacement: strict 10 | k8sServiceHost: 11 | k8sServicePort: 6443 12 | cni: 13 | chainingMode: "none" -------------------------------------------------------------------------------- /Kubernetes/RKE2/ipAddressPool: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: first-pool 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - $lbrange -------------------------------------------------------------------------------- /Kubernetes/RKE2/l2Advertisement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: example 5 | namespace: metallb-system 6 | spec: 7 | ipAddressPools: 8 | - first-pool -------------------------------------------------------------------------------- /Kubernetes/SMB/pv-smb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | annotations: 5 | pv.kubernetes.io/provisioned-by: smb.csi.k8s.io 6 | name: pv-jellyfin-smb 7 | spec: 8 | capacity: 9 | storage: 100Gi 10 | accessModes: 11 | - ReadWriteMany 12 | persistentVolumeReclaimPolicy: Retain 13 | storageClassName: smb 14 | mountOptions: 15 | - dir_mode=0777 16 | - file_mode=0777 17 | csi: 18 | driver: smb.csi.k8s.io 19 | readOnly: false 20 | # volumeHandle format: {smb-server-address}#{sub-dir-name}#{share-name} 21 | # make sure this value is unique for every share in the cluster 22 | volumeHandle: jellyfin 23 | volumeAttributes: 24 | source: "//192.168.6.2/FreeNAS" # Change this to your SMB IP and share name 25 | nodeStageSecretRef: 26 | name: smbcreds 27 | namespace: default -------------------------------------------------------------------------------- /Kubernetes/SMB/pvc-smb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: pvc-jellyfin-smb 6 | namespace: jellyfin 7 | spec: 8 | accessModes: 9 | - ReadWriteMany 10 | resources: 11 | requests: 12 | storage: 10Gi 13 | volumeName: pv-jellyfin-smb 14 | storageClassName: smb -------------------------------------------------------------------------------- /Kubernetes/SMB/readme.md: -------------------------------------------------------------------------------- 1 | # Install CSI driver 2 | ``` 3 | curl -skSL https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/v1.13.0/deploy/install-driver.sh | bash -s v1.13.0 -- 4 | ``` 5 | 6 | # Create SMB creds 7 | ``` 8 | kubectl create secret generic smbcreds --from-literal username=USERNAME --from-literal password="PASSWORD" 9 | ``` 10 | 11 | # Create storage class 12 | ``` 13 | kubectl create -f https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/master/deploy/example/storageclass-smb.yaml 14 | ``` 15 | 16 | # Check status 17 | ``` 18 | kubectl -n kube-system get pod -o wide --watch -l app=csi-smb-controller 19 | kubectl -n kube-system get pod -o wide --watch -l app=csi-smb-node 20 | ``` -------------------------------------------------------------------------------- /Kubernetes/Traefik-External-Service/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: default 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /Kubernetes/Traefik-External-Service/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: proxmox 6 | namespace: default 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.proxmox.jimsgarage.co.uk`) 14 | kind: Rule 15 | services: 16 | - name: proxmox 17 | port: 8006 18 | scheme: https 19 | passHostHeader: true 20 | - match: Host(`proxmox.jimsgarage.co.uk`) 21 | kind: Rule 22 | services: 23 | - name: proxmox 24 | port: 8006 25 | scheme: https 26 | passHostHeader: true 27 | middlewares: 28 | - name: default-headers 29 | tls: 30 | secretName: jimsgarage-tls 31 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-External-Service/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: proxmox 5 | namespace: default 6 | spec: 7 | externalName: 192.168.200.75 8 | type: ExternalName 9 | ports: 10 | - name: websecure 11 | port: 8006 12 | targetPort: 8006 -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Helm/Traefik/Cert-Manager/Certificates/Production/jimsgarage-production.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: Certificate 4 | metadata: 5 | name: jimsgarage # change to your domain 6 | namespace: traefik # add to traefik namespace so it can use it (you DO NOT need it in each app namespace!!!) 7 | spec: 8 | secretName: jimsgarage-tls # change to your secretname 9 | issuerRef: 10 | name: letsencrypt-production 11 | kind: ClusterIssuer 12 | commonName: "*.jimsgarage.co.uk" # change to your domain 13 | dnsNames: 14 | - "*.jimsgarage.co.uk" # change to your domain 15 | - jimsgarage.co.uk # change to your domain 16 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Helm/Traefik/Cert-Manager/Issuers/letsencrypt-production.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-production 6 | spec: 7 | acme: 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | email: your@email.com # add your email 10 | privateKeySecretRef: 11 | name: letsencrypt-production 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | email: your@email.com # add your email to your cloudflare account 16 | apiTokenSecretRef: 17 | name: cloudflare-token-secret 18 | key: cloudflare-token 19 | selector: 20 | dnsZones: 21 | - "your-domain.com" # change to your zone on CloudFlare 22 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Helm/Traefik/Cert-Manager/Issuers/secret-cf-token.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: cloudflare-token-secret 6 | namespace: cert-manager 7 | type: Opaque 8 | stringData: 9 | cloudflare-token: # MUST be an API token, NOT a Global API key!!! https://cert-manager.io/docs/configuration/acme/dns01/cloudflare/#api-tokens 10 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Helm/Traefik/Cert-Manager/values.yaml: -------------------------------------------------------------------------------- 1 | installCRDs: false 2 | replicaCount: 3 # change to number of masternodes 3 | extraArgs: # required for querying for certificate 4 | - --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53 5 | - --dns01-recursive-nameservers-only 6 | podDnsPolicy: None 7 | podDnsConfig: 8 | nameservers: 9 | - 1.1.1.1 10 | - 9.9.9.9 11 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Helm/Traefik/Dashboard/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: IngressRoute 3 | metadata: 4 | name: traefik-dashboard 5 | namespace: traefik 6 | annotations: 7 | kubernetes.io/ingress.class: traefik-external 8 | spec: 9 | entryPoints: 10 | - websecure 11 | routes: 12 | - match: Host(`traefik2.jimsgarage.co.uk`) # change this to your domain 13 | kind: Rule 14 | middlewares: 15 | - name: traefik-dashboard-basicauth 16 | namespace: traefik 17 | services: 18 | - name: api@internal 19 | kind: TraefikService 20 | tls: 21 | secretName: jimsgarage-tls # change this to your secret!!! 22 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Helm/Traefik/Dashboard/middleware.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: traefik-dashboard-basicauth 5 | namespace: traefik 6 | spec: 7 | basicAuth: 8 | secret: traefik-dashboard-auth 9 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Helm/Traefik/Dashboard/secret-dashboard.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: traefik-dashboard-auth 6 | namespace: traefik 7 | type: Opaque 8 | data: 9 | users: YWRtaW46JGFwcjEkdHgxckxyMm8kMkNvMnkuQnNHdHlMbURURm5FMlFDLwoK # admin:password 10 | # to generate the above, first create the base64 password as you previously did for Docker (or just copy it). You then need to base64 that again and paste above. 11 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Helm/Traefik/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: traefik 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https 17 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Helm/Traefik/values.yaml: -------------------------------------------------------------------------------- 1 | globalArguments: 2 | - "--global.sendanonymoususage=false" 3 | - "--global.checknewversion=false" 4 | 5 | additionalArguments: 6 | - "--serversTransport.insecureSkipVerify=true" 7 | - "--log.level=INFO" 8 | 9 | deployment: 10 | enabled: true 11 | replicas: 2 # match with number of workers 12 | annotations: {} 13 | podAnnotations: {} 14 | additionalContainers: [] 15 | initContainers: [] 16 | 17 | nodeSelector: 18 | worker: "true" # add these labels to your worker nodes before running - see video 19 | 20 | ports: 21 | web: 22 | redirectTo: 23 | port: websecure 24 | priority: 10 25 | websecure: 26 | tls: 27 | enabled: true 28 | 29 | ingressRoute: 30 | dashboard: 31 | enabled: false 32 | 33 | providers: 34 | kubernetesCRD: 35 | enabled: true 36 | ingressClass: traefik-external 37 | allowExternalNameServices: true 38 | kubernetesIngress: 39 | enabled: true 40 | allowExternalNameServices: true 41 | publishedService: 42 | enabled: false 43 | 44 | rbac: 45 | enabled: true 46 | 47 | service: 48 | enabled: true 49 | type: LoadBalancer 50 | annotations: {} 51 | labels: {} 52 | spec: 53 | loadBalancerIP: 192.168.3.65 # this should be an IP in the Kube-VIP range 54 | loadBalancerSourceRanges: [] 55 | externalIPs: [] 56 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Manifest/PiHole/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: pihole 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https 17 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/Manifest/PiHole/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: pihole 6 | namespace: pihole 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.pihole3.jimsgarage.co.uk`) # change to your domain 14 | kind: Rule 15 | services: 16 | - name: pihole 17 | port: 80 18 | - match: Host(`pihole3.jimsgarage.co.uk`) # change to your domain 19 | kind: Rule 20 | services: 21 | - name: pihole 22 | port: 80 23 | middlewares: 24 | - name: default-headers 25 | tls: 26 | secretName: jimsgarage-tls # change to your cert name 27 | -------------------------------------------------------------------------------- /Kubernetes/Traefik-PiHole/readme.md: -------------------------------------------------------------------------------- 1 | # IMPORTANT # 2 | Make sure that you watch the video instructions carefully as you need to amend the files correctly. 3 | YOU CANNOT JUST RUN THIS SCRIPT! 4 | Incorrect use can result in you being locked out of Lets Encrypt for a period of time. 5 | 6 | # NOTE FOR TRAEFIK v3 # 7 | Many guides out there (including, until recently, this repo) reference an older version of the Kubernetes CRDs API group. 8 | This older version is [deprecated](https://doc.traefik.io/traefik/master/migration/v2-to-v3/#kubernetes-crds-api-group-traefikcontainous) 9 | as of Traefik v3 (released [29 April 2024](https://github.com/traefik/traefik/releases/tag/v3.0.0)) and must be updated to the new version 10 | in your IngressRoute, Middleware, ServersTransport, etc. yaml manifests for Traefik. Any resources with the deprecated version will not 11 | be recognized by Traefik v3. 12 | 13 | Old, deprecated version: 14 | ```yaml 15 | apiVersion: traefik.containo.us/v1alpha1 16 | ``` 17 | 18 | New, supported version: 19 | ```yaml 20 | apiVersion: traefik.io/v1alpha1 21 | ``` 22 | This new version is also supported in later releases of Traefik v2, so you can update your Traefik-related manifests 23 | to the new version and apply the updated manifests before upgrading your Traefik deployment. 24 | 25 | It may be worth reviewing other v2 to v3 migration notes provided by Traefik: 26 | [Traefik v2 to v3 Migration](https://doc.traefik.io/traefik/master/migration/v2-to-v3/) 27 | -------------------------------------------------------------------------------- /LXC/Jellyfin/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | jellyfin: 4 | image: jellyfin/jellyfin 5 | container_name: jellyfin 6 | group_add: 7 | - '107' # This needs to be the group id of your GPU, e.g., `stat -c '%g' /dev/dri/renderD128` on the docker host for iGPU 8 | environment: 9 | - TZ=Europe/London 10 | volumes: 11 | - ./jellyfin/config:/config 12 | - ./jellyfin/cache:/cache 13 | - /films:/films 14 | # - /home/ubuntu/YOUR_NAS/Films:/Films:ro 15 | # - /home/ubuntu/YOUR_NAS/TVShows:/TVShows:ro 16 | # - /home/ubuntu/YOUR_NAS/Audiobooks:/Audiobooks:ro 17 | # - /home/ubuntu/YOUR_NAS/Music:/Music:ro 18 | ports: # You will need to uncomment if you aren't running through a proxy 19 | - 8096:8096 20 | - 8920:8920 #optional 21 | - 7359:7359/udp #optional 22 | - 1900:1900/udp #optional 23 | devices: # uncomment these and amend if you require GPU accelerated transcoding 24 | - /dev/dri/renderD128:/dev/dri/renderD128 25 | restart: unless-stopped -------------------------------------------------------------------------------- /LXC/Jellyfin/readme.md: -------------------------------------------------------------------------------- 1 | # Find Device Numbers 2 | ``` 3 | ls -l /dev/dri 4 | ``` 5 | 6 | # Find Group Numbers 7 | ``` 8 | cat /etc/group 9 | ``` 10 | 11 | # Add Group Numbers Values to subgid 12 | Change values to map the to above ^^ 13 | ``` 14 | nano /etc/subgid 15 | ``` 16 | Paste at the bottom, for example: 17 | ``` 18 | root:44:1 19 | root:104:1 20 | ``` 21 | 22 | # Create CT Using Wizard. Edit .conf In /etc/pve/lxc 23 | Edit your device IDs and renderD*** 24 | Ensure you match the idmap values 25 | ``` 26 | arch: amd64 27 | cores: 2 28 | cpulimit: 2 29 | features: nesting=1 30 | hostname: test-gpu-04 31 | memory: 3000 32 | net0: name=eth0,bridge=vmbr0,firewall=1,hwaddr=BC:24:11:06:18:78,ip=dhcp,type=veth 33 | ostype: debian 34 | rootfs: local-lvm:vm-104-disk-0,size=20G 35 | swap: 512 36 | unprivileged: 1 37 | lxc.cgroup2.devices.allow: c 226:0 rwm 38 | lxc.cgroup2.devices.allow: c 226:128 rwm 39 | lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file 40 | lxc.idmap: u 0 100000 65536 41 | lxc.idmap: g 0 100000 44 42 | lxc.idmap: g 44 44 1 43 | lxc.idmap: g 45 100045 62 44 | lxc.idmap: g 107 104 1 45 | lxc.idmap: g 108 100108 65428 46 | ``` 47 | 48 | # Add Root to Groups 49 | Do this on your Proxmox Host 50 | ``` 51 | usermod -aG render,video root 52 | ``` 53 | 54 | # Whatever You Want... 55 | Install Docker, run apps, even change your LXC for a Linux Desktop!!! 56 | -------------------------------------------------------------------------------- /LXC/NAS/readme.md: -------------------------------------------------------------------------------- 1 | # On LXC Create User Group 2 | ``` 3 | groupadd -g 10000 lxc_shares 4 | ``` 5 | 6 | # Optional: Add Other Users to Group (e.g., Jellyfin, Plex) 7 | ``` 8 | usermod -aG lxc_shares USERNAME 9 | ``` 10 | 11 | # On Proxmox Host 12 | Create a folder to mount the NAS 13 | ``` 14 | mkdir -p /mnt/lxc_shares/nas_rwx 15 | ``` 16 | 17 | # Add NAS CIFS share to /etc/fstab 18 | Replace //NAS-IP-ADDRESS with your NAS IP 19 | Replace Username and Passwords 20 | ``` 21 | { echo '' ; echo '# Mount CIFS share on demand with rwx permissions for use in LXCs ' ; echo '//NAS-IP-ADDRESS/nas/ /mnt/lxc_shares/nas_rwx cifs _netdev,x-systemd.automount,noatime,uid=100000,gid=110000,dir_mode=0770,file_mode=0770,user=smb_username,pass=smb_password 0 0' ; } | tee -a /etc/fstab 22 | 23 | ``` 24 | 25 | # Mount the NAS to the Proxmox Host 26 | ``` 27 | mount /mnt/lxc_shares/nas_rwx 28 | ``` 29 | 30 | # Add Bind Mount of the Share to the LXC Config 31 | Be sure to change the LXC_ID 32 | ``` 33 | You can mount it in the LXC with read+write+execute (rwx) permissions. 34 | { echo 'mp0: /mnt/lxc_shares/nas_rwx/,mp=/mnt/nas' ; } | tee -a /etc/pve/lxc/LXC_ID.conf 35 | 36 | You can also mount it in the LXC with read-only (ro) permissions. 37 | { echo 'mp0: /mnt/lxc_shares/nas_rwx/,mp=/mnt/nas,ro=1' ; } | tee -a /etc/pve/lxc/LXC_ID.conf 38 | ``` 39 | 40 | Thanks to https://forum.proxmox.com/members/thehellsite.88343/ for tips -------------------------------------------------------------------------------- /Linkwarden/.env: -------------------------------------------------------------------------------- 1 | NEXTAUTH_URL=https://linkwarden.jimsgarage.co.uk/api/v1/auth 2 | # NEXTAUTH_URL=http://localhost:3000/api/v1/auth # Uncomment this if you don't want to use another Identity Provider 3 | NEXTAUTH_SECRET=linkwarden 4 | POSTGRES_PASSWORD=wGZM%rg9%J5Vl*sALx^h7roKPXN@JcqZ 5 | 6 | # SMTP Settings 7 | #NEXT_PUBLIC_EMAIL_PROVIDER= 8 | #EMAIL_FROM= 9 | #EMAIL_SERVER= 10 | #BASE_URL= 11 | 12 | ################# 13 | # SSO Providers # 14 | ################# 15 | 16 | # Authentik 17 | NEXT_PUBLIC_AUTHENTIK_ENABLED=true 18 | AUTHENTIK_CUSTOM_NAME= 19 | AUTHENTIK_ISSUER=https://authentik.jimsgarage.co.uk/application/o/linkwarden 20 | AUTHENTIK_CLIENT_ID=hWmxt6sLvDmqNFdl4WsE3IvLVv2VMmPhv6dGX1lZ 21 | AUTHENTIK_CLIENT_SECRET=a6ersBnO48CfmbIcri7zMxIDgZynTSuDTsRYsb5bppvc2OAH6U9ho8CtQLLqM0o3cUaU4mtElvOD3xjeDUt7VXhd19VRrtLliaexFGv48vltMYAVg413wbquZtQ7Gx1J -------------------------------------------------------------------------------- /Linkwarden/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | postgres: 3 | image: postgres:16-alpine 4 | env_file: .env 5 | restart: always 6 | volumes: 7 | - /home/ubuntu/docker/linkwarden/pgdata:/var/lib/postgresql/data 8 | networks: 9 | - linkwarden 10 | linkwarden: 11 | env_file: .env 12 | environment: 13 | - DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgres:5432/postgres 14 | restart: always 15 | # build: . # uncomment this line to build from source 16 | image: ghcr.io/linkwarden/linkwarden:latest # comment this line to build from source 17 | #ports: 18 | # - 3000:3000 19 | volumes: 20 | - /home/ubuntu/docker/linkwarden/data:/data/data 21 | depends_on: 22 | - postgres 23 | networks: 24 | - proxy 25 | - linkwarden 26 | labels: 27 | - "traefik.enable=true" 28 | - "traefik.docker.network=proxy" 29 | - "traefik.http.routers.linkwarden.entrypoints=http" 30 | - "traefik.http.routers.linkwarden.rule=Host(`linkwarden.jimsgarage.co.uk`)" 31 | - "traefik.http.middlewares.linkwarden-https-redirect.redirectscheme.scheme=https" 32 | - "traefik.http.routers.linkwarden.middlewares=linkwarden-https-redirect" 33 | - "traefik.http.routers.linkwarden-secure.entrypoints=https" 34 | - "traefik.http.routers.linkwarden-secure.rule=Host(`linkwarden.jimsgarage.co.uk`)" 35 | - "traefik.http.routers.linkwarden-secure.tls=true" 36 | - "traefik.http.routers.linkwarden-secure.tls.certresolver=cloudflare" 37 | - "traefik.http.routers.linkwarden-secure.service=linkwarden" 38 | - "traefik.http.services.linkwarden.loadbalancer.server.port=3000" # make sure the loadbalancer is the last line!!! 39 | 40 | networks: 41 | proxy: 42 | external: true 43 | linkwarden: -------------------------------------------------------------------------------- /Logo/Jim's Garage-1 (1).mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Logo/Jim's Garage-1 (1).mp4 -------------------------------------------------------------------------------- /Logo/Jim's Garage-1 (1).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Logo/Jim's Garage-1 (1).png -------------------------------------------------------------------------------- /Logo/Jim's Garage-1 (2).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Logo/Jim's Garage-1 (2).png -------------------------------------------------------------------------------- /Logo/Jim's Garage-1 (3).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Logo/Jim's Garage-1 (3).png -------------------------------------------------------------------------------- /Logo/Jim's Garage-1 (4).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Logo/Jim's Garage-1 (4).png -------------------------------------------------------------------------------- /Logo/Jim's Garage-1 (5).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Logo/Jim's Garage-1 (5).png -------------------------------------------------------------------------------- /Logo/Jim's Garage-1.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Logo/Jim's Garage-1.mp4 -------------------------------------------------------------------------------- /Logo/Jim's Garage-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Logo/Jim's Garage-1.png -------------------------------------------------------------------------------- /Logo/Jim'sGarage-1(2).png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Logo/Jim'sGarage-1(2).png -------------------------------------------------------------------------------- /Minecraft/Kubernetes/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: minecraft 7 | app.kubernetes.io/instance: minecraft 8 | name: minecraft 9 | namespace: minecraft 10 | spec: 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | app: minecraft 15 | template: 16 | metadata: 17 | labels: 18 | app: minecraft 19 | spec: 20 | nodeSelector: 21 | worker: "true" 22 | containers: 23 | - env: 24 | - name: EULA 25 | value: "TRUE" 26 | image: itzg/minecraft-server 27 | name: minecraft-server 28 | ports: 29 | - containerPort: 25565 30 | resources: {} 31 | stdin: true 32 | tty: true 33 | volumeMounts: 34 | - mountPath: /data 35 | name: minecraft 36 | restartPolicy: Always 37 | volumes: 38 | - name: minecraft 39 | persistentVolumeClaim: 40 | claimName: minecraft 41 | --- 42 | apiVersion: v1 43 | kind: Service 44 | metadata: 45 | labels: 46 | app: minecraft 47 | name: minecraft 48 | namespace: minecraft 49 | spec: 50 | ports: 51 | - name: minecraft-tcp 52 | port: 25565 53 | protocol: TCP 54 | targetPort: 25565 55 | selector: 56 | app: minecraft 57 | externalTrafficPolicy: Local 58 | loadBalancerIP: 192.168.200.19 59 | type: LoadBalancer -------------------------------------------------------------------------------- /Minecraft/Kubernetes/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | kind: NetworkPolicy 2 | apiVersion: networking.k8s.io/v1 3 | metadata: 4 | name: allow-internet-only 5 | namespace: minecraft 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Egress 10 | egress: 11 | - to: 12 | - ipBlock: 13 | cidr: "0.0.0.0/0" 14 | except: 15 | - "10.0.0.0/8" 16 | - "172.16.0.0/12" 17 | - "192.168.0.0/16" 18 | - to: 19 | - namespaceSelector: 20 | matchLabels: 21 | kubernetes.io/metadata.name: "kube-system" 22 | - podSelector: 23 | matchLabels: 24 | k8s-app: "kube-dns" -------------------------------------------------------------------------------- /Minecraft/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | services: 3 | minecraft-server: 4 | container_name: minecraft-server 5 | image: itzg/minecraft-server 6 | ports: 7 | - 25565:25565 8 | environment: 9 | - TYPE=FORGE 10 | - EULA=TRUE 11 | volumes: 12 | - /home/ubuntu/docker/minecraft:/data 13 | stdin_open: true 14 | tty: true 15 | restart: unless-stopped 16 | networks: 17 | macvlan4: # change name to whatever you like 18 | ipv4_address: 192.168.4.20 # change to your IP in your vLAN subnet 19 | 20 | networks: 21 | macvlan4: 22 | external: true -------------------------------------------------------------------------------- /Minecraft/macvlan: -------------------------------------------------------------------------------- 1 | docker network create -d macvlan \ 2 | --subnet=192.168.4.0/24 \ 3 | --gateway=192.168.4.1 \ 4 | -o parent=eth0.4 \ 5 | macvlan4 6 | -------------------------------------------------------------------------------- /MiroTalk/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | mirotalk: 5 | image: mirotalk/p2p:latest 6 | container_name: mirotalk 7 | hostname: mirotalk 8 | volumes: 9 | - .env:/src/.env:ro 10 | # These volumes are not mandatory, uncomment if you want to use it 11 | # - ./app/:/src/app/:ro # useful for changing the UI JS 12 | # - ./public/:/src/public/:ro 13 | restart: unless-stopped 14 | networks: 15 | proxy: 16 | # Uncomment ports and comment labels if you're not using a reverse proxy 17 | #ports: 18 | # - '${PORT}:${PORT}' 19 | labels: 20 | - "traefik.enable=true" 21 | - "traefik.docker.network=proxy" 22 | - "traefik.http.routers.mirotalk.entrypoints=http" 23 | - "traefik.http.routers.mirotalk.rule=Host(`mirotalk.yourdomain.com`)" 24 | - "traefik.http.middlewares.mirotalk-https-redirect.redirectscheme.scheme=https" 25 | - "traefik.http.routers.mirotalk.middlewares=mirotalk-https-redirect" 26 | - "traefik.http.routers.mirotalk-secure.entrypoints=https" 27 | - "traefik.http.routers.mirotalk-secure.rule=Host(`mirotalk.yourdomain.com`)" 28 | - "traefik.http.routers.mirotalk-secure.tls=true" 29 | - "traefik.http.routers.mirotalk-secure.tls.certresolver=cloudflare" 30 | - "traefik.http.routers.mirotalk-secure.service=mirotalk" 31 | - "traefik.http.services.mirotalk.loadbalancer.server.port=3000" # make sure the loadbalancer is the last line!!! 32 | 33 | networks: 34 | proxy: 35 | external: true 36 | -------------------------------------------------------------------------------- /Mosquitto/Kubernetes/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: mosquitto 7 | name: mosquitto 8 | namespace: mosquitto 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: mosquitto 14 | template: 15 | metadata: 16 | labels: 17 | app: mosquitto 18 | app.kubernetes.io/name: mosquitto 19 | spec: 20 | nodeSelector: 21 | worker: "true" 22 | containers: 23 | - image: eclipse-mosquitto:latest 24 | imagePullPolicy: IfNotPresent 25 | resources: 26 | limits: 27 | cpu: 0.5 28 | memory: 500Mi 29 | name: mosquitto 30 | ports: 31 | - containerPort: 1883 32 | name: mqtt 33 | protocol: TCP 34 | - containerPort: 9001 35 | name: websocket 36 | protocol: TCP 37 | volumeMounts: 38 | - mountPath: /mosquitto/config/ 39 | name: mosquitto 40 | subPath: config 41 | - mountPath: "/mosquitto/data" 42 | name: mosquitto 43 | subPath: "data" 44 | - mountPath: "/mosquitto/log" 45 | name: mosquitto 46 | subPath: log 47 | volumes: 48 | - name: mosquitto 49 | persistentVolumeClaim: 50 | claimName: mosquitto -------------------------------------------------------------------------------- /Mosquitto/Kubernetes/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: mosquitto 5 | labels: 6 | name: mosquitto -------------------------------------------------------------------------------- /Mosquitto/Kubernetes/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mosquitto 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | volumeMode: Filesystem 9 | accessModes: 10 | - ReadWriteOnce 11 | persistentVolumeReclaimPolicy: Retain 12 | storageClassName: longhorn 13 | csi: 14 | driver: driver.longhorn.io 15 | fsType: ext4 16 | volumeAttributes: 17 | numberOfReplicas: '2' 18 | staleReplicaTimeout: '2880' 19 | volumeHandle: mosquitto -------------------------------------------------------------------------------- /Mosquitto/Kubernetes/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mosquitto 5 | namespace: mosquitto 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | volumeName: mosquitto -------------------------------------------------------------------------------- /Mosquitto/Kubernetes/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: mosquitto 6 | name: mosquitto 7 | namespace: mosquitto 8 | spec: 9 | ports: 10 | - name: mqtt 11 | port: 1883 12 | protocol: TCP 13 | targetPort: 1883 14 | - name: websocket 15 | port: 9001 16 | protocol: TCP 17 | targetPort: 9001 18 | selector: 19 | app: mosquitto 20 | externalTrafficPolicy: Local 21 | loadBalancerIP: 192.168.200.14 22 | type: LoadBalancer -------------------------------------------------------------------------------- /Mosquitto/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | mosquitto: 4 | container_name: mosquitto 5 | image: eclipse-mosquitto:latest 6 | restart: always 7 | deploy: 8 | resources: 9 | limits: 10 | memory: 256M 11 | ports: 12 | - "1883:1883" 13 | - "9001:9001" 14 | volumes: 15 | - /home/ubuntu/docker/mosquitto/config/mosquitto.conf:/mosquitto/config/mosquitto.conf 16 | - /home/ubuntu/docker/mosquitto/data:/mosquitto/data 17 | - /home/ubuntu/docker/mosquitto/log:/mosquitto/log 18 | security_opt: 19 | - no-new-privileges:true 20 | -------------------------------------------------------------------------------- /Mosquitto/mosquitto.conf: -------------------------------------------------------------------------------- 1 | allow_anonymous false 2 | listener 1883 3 | listener 9001 4 | protocol websockets 5 | persistence true 6 | password_file /mosquitto/config/pwfile 7 | persistence_file mosquitto.db 8 | persistence_location /mosquitto/data/ -------------------------------------------------------------------------------- /Nextcloud/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | services: 3 | nextcloud: 4 | image: lscr.io/linuxserver/nextcloud:latest 5 | container_name: nextcloud 6 | environment: 7 | - PUID=1000 8 | - PGID=1000 9 | - TZ=Etc/UTC 10 | volumes: 11 | - /home/ubuntu/docker/nextcloud/appdata:/config 12 | - /home/ubuntu/docker/nextcloud/data:/data 13 | # ports: 14 | # - 443:443 15 | restart: unless-stopped 16 | labels: 17 | - "traefik.enable=true" 18 | - "traefik.http.routers.nextcloud.entrypoints=http" 19 | - "traefik.http.routers.nextcloud.rule=Host(`nextcloud.jimsgarage.co.uk`)" 20 | - "traefik.http.middlewares.nextcloud-https-redirect.redirectscheme.scheme=https" 21 | - "traefik.http.routers.nextcloud.middlewares=nextcloud-https-redirect" 22 | - "traefik.http.routers.nextcloud-secure.entrypoints=https" 23 | - "traefik.http.routers.nextcloud-secure.rule=Host(`nextcloud.jimsgarage.co.uk`)" 24 | - "traefik.http.routers.nextcloud-secure.tls=true" 25 | - "traefik.http.routers.nextcloud-secure.service=nextcloud" 26 | - "traefik.http.services.nextcloud.loadbalancer.server.port=80" 27 | - "traefik.docker.network=proxy" 28 | networks: 29 | proxy: 30 | 31 | networks: 32 | proxy: 33 | external: true 34 | -------------------------------------------------------------------------------- /Nginx/cloudflare.ini: -------------------------------------------------------------------------------- 1 | # Cloudflare API credentials used by Certbot 2 | 3 | # How to generate API token: 4 | # https://developers.cloudflare.com/api/tokens/create 5 | dns_cloudflare_api_token = sdfjuhSDFjkh_sdfjSDFSD3sddjh -------------------------------------------------------------------------------- /Nginx/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | certbot: 3 | image: certbot/dns-cloudflare 4 | volumes: 5 | - ./certs:/etc/letsencrypt 6 | - ./cloudflare.ini:/root/cloudflare.ini 7 | command: >- 8 | certonly --dns-cloudflare 9 | --dns-cloudflare-credentials /root/cloudflare.ini 10 | --dns-cloudflare-propagation-seconds 15 11 | --email your@email.com 12 | --agree-tos --no-eff-email 13 | -d *.jimsgarage.co.uk 14 | 15 | nginx: 16 | image: nginx:latest 17 | ports: 18 | - "80:80" 19 | - "443:443" 20 | restart: "always" 21 | logging: 22 | driver: "json-file" 23 | options: 24 | max-size: "10m" 25 | max-file: "10" 26 | volumes: 27 | - ./nginx.conf:/etc/nginx/nginx.conf 28 | - ./certs:/etc/letsencrypt 29 | networks: 30 | - nginx-proxy 31 | 32 | networks: 33 | nginx-proxy: 34 | external: true -------------------------------------------------------------------------------- /Nginx/it-tools/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | it-tools: 3 | image: 'corentinth/it-tools:latest' 4 | #ports: 5 | # - '8080:80' 6 | restart: unless-stopped 7 | container_name: it-tools 8 | networks: 9 | - nginx-proxy 10 | 11 | networks: 12 | nginx-proxy: 13 | external: true -------------------------------------------------------------------------------- /NordVPN-Wireguard/wireguard.ps1: -------------------------------------------------------------------------------- 1 | # Gain your token by heading to your NordVPN account and going to "Get Access Token" 2 | # URL: https://my.nordaccount.com/dashboard/nordvpn/access-tokens/authorize/ 3 | $username = "token" 4 | $password = "my-token-from-nordvpn" 5 | $auth = "$($username):$($Password)" 6 | $bytes = [System.Text.Encoding]::ASCII.GetBytes($auth) 7 | $encodedCredentials = [Convert]::ToBase64String($bytes) 8 | $url = "https://api.nordvpn.com/v1/users/services/credentials" 9 | 10 | $headers = @{ 11 | Authorization = "Basic $encodedCredentials" 12 | } 13 | 14 | # Prints out Username, Password, and Nordlynx Private Key (this is what you need for Wireguard) 15 | Invoke-RestMethod -Uri $url -Headers $headers -Method Get 16 | 17 | # ****IGNORE - MIGHT BE OF USE FOR SCRIPTING******* 18 | # Send the GET request and capture the result 19 | # $response = Invoke-RestMethod -Uri $url -Headers $headers -Method Get 20 | 21 | # Output specific properties 22 | # $response | Select-Object id, created_at, updated_at, username, password, nordlynx_private_key 23 | 24 | # Optionally, you can access individual properties like this: 25 | # Write-Output "ID: $($response.id)" 26 | # Write-Output "Username: $($response.username)" 27 | # Write-Output "Password: $($response.password)" 28 | # Write-Output "NordLynx Private Key: $($response.nordlynx_private_key)" -------------------------------------------------------------------------------- /Ollama/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.6' 2 | 3 | services: 4 | ollama: 5 | # Uncomment below for GPU support 6 | # deploy: 7 | # resources: 8 | # reservations: 9 | # devices: 10 | # - driver: nvidia 11 | # count: 1 12 | # capabilities: 13 | # - gpu 14 | volumes: 15 | - ollama:/root/.ollama 16 | # Uncomment below to expose Ollama API outside the container stack 17 | # ports: 18 | # - 11434:11434 19 | container_name: ollama 20 | pull_policy: always 21 | tty: true 22 | restart: unless-stopped 23 | image: ollama/ollama:latest 24 | 25 | ollama-webui: 26 | build: 27 | context: . 28 | args: 29 | OLLAMA_API_BASE_URL: '/ollama/api' 30 | dockerfile: Dockerfile 31 | image: ollama-webui:latest 32 | container_name: ollama-webui 33 | depends_on: 34 | - ollama 35 | ports: 36 | - 3000:8080 37 | environment: 38 | - "OLLAMA_API_BASE_URL=http://ollama:11434/api" 39 | extra_hosts: 40 | - host.docker.internal:host-gateway 41 | restart: unless-stopped 42 | 43 | volumes: 44 | ollama: {} 45 | -------------------------------------------------------------------------------- /Ollama/readme.md: -------------------------------------------------------------------------------- 1 | 1. Clone the repo from: https://github.com/ollama-webui/ollama-webui 2 | 2. Tweak the docker-compose to your liking 3 | 3. Run the container: sudo docker compose up -d 4 | 5 | Let it build :) -------------------------------------------------------------------------------- /Omni-Tools/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | omni-tools: 3 | image: iib0011/omni-tools:latest 4 | container_name: omni-tools 5 | restart: unless-stopped 6 | #ports: 7 | # - "8080:80" 8 | networks: 9 | - proxy 10 | labels: 11 | - "traefik.enable=true" 12 | - "traefik.docker.network=proxy" 13 | - "traefik.http.routers.omni-tools.entrypoints=http" 14 | - "traefik.http.routers.omni-tools.rule=Host(`omni-tools.jimsgarage.co.uk`)" 15 | - "traefik.http.middlewares.omni-tools-https-redirect.redirectscheme.scheme=https" 16 | - "traefik.http.routers.omni-tools.middlewares=omni-tools-https-redirect" 17 | - "traefik.http.routers.omni-tools-secure.entrypoints=https" 18 | - "traefik.http.routers.omni-tools-secure.rule=Host(`omni-tools.jimsgarage.co.uk`)" 19 | - "traefik.http.routers.omni-tools-secure.tls=true" 20 | - "traefik.http.routers.omni-tools-secure.tls.certresolver=cloudflare" 21 | - "traefik.http.routers.omni-tools-secure.service=omni-tools" 22 | - "traefik.http.services.omni-tools.loadbalancer.server.port=80" 23 | 24 | networks: 25 | proxy: 26 | external: true -------------------------------------------------------------------------------- /OpenHands/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | all-hands-ai: 3 | image: 'docker.all-hands.dev/all-hands-ai/openhands:0.21' 4 | container_name: openhands-app 5 | extra_hosts: 6 | - 'host.docker.internal:host-gateway' 7 | ports: 8 | - '3000:3000' 9 | volumes: 10 | - '.openhands-state:/.openhands-state' 11 | - '/var/run/docker.sock:/var/run/docker.sock' 12 | environment: 13 | - LOG_ALL_EVENTS=true 14 | - 'SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.21-nikolaik' 15 | # - LLM_API_KEY: "NA" # You can use a self hosted LLM if you like 16 | # - LLM_BASE_URL: "Ollama address:11434" # You can use a self hosted LLM if you like 17 | pull_policy: always 18 | tty: true 19 | stdin_open: true -------------------------------------------------------------------------------- /Paperless-ngx/.env: -------------------------------------------------------------------------------- 1 | COMPOSE_PROJECT_NAME=paperless -------------------------------------------------------------------------------- /Pihole/Kubernetes/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: pihole 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /Pihole/Kubernetes/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: pihole 6 | namespace: pihole 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`pihole.yourdomain.co.uk`) 14 | kind: Rule 15 | services: 16 | - name: pihole-web 17 | port: 80 18 | middlewares: 19 | - name: default-headers 20 | - name: dashboard-redirect 21 | - name: dashboard-prefix 22 | 23 | tls: 24 | secretName: yourdomain-tls 25 | -------------------------------------------------------------------------------- /Pihole/Kubernetes/middleware.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: dashboard-redirect 5 | namespace: pihole 6 | spec: 7 | redirectRegex: 8 | regex: /admin/$ 9 | replacement: / 10 | --- 11 | apiVersion: traefik.io/v1alpha1 12 | kind: Middleware 13 | metadata: 14 | name: dashboard-prefix 15 | namespace: pihole 16 | spec: 17 | addPrefix: 18 | prefix: /admin -------------------------------------------------------------------------------- /Pihole/Kubernetes/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | kind: NetworkPolicy 2 | apiVersion: networking.k8s.io/v1 3 | metadata: 4 | name: allow-internet-only 5 | namespace: pihole 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Ingress 10 | ingress: 11 | - from: 12 | - ipBlock: 13 | cidr: 0.0.0.0/0 14 | except: 15 | - 10.0.0.0/8 16 | - 192.168.0.0/16 17 | - 172.16.0.0/20 -------------------------------------------------------------------------------- /Pihole/Kubernetes/sealed-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: bitnami.com/v1alpha1 2 | kind: SealedSecret 3 | metadata: 4 | creationTimestamp: null 5 | name: web-pass 6 | namespace: pihole 7 | spec: 8 | encryptedData: 9 | WEBPASSWORD: some-secret 10 | template: 11 | metadata: 12 | creationTimestamp: null 13 | name: web-pass 14 | namespace: pihole 15 | type: Opaque 16 | 17 | -------------------------------------------------------------------------------- /Pihole/ubuntu port 53 fix: -------------------------------------------------------------------------------- 1 | #run these commands to fix port bind error 2 | 3 | sudo sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf 4 | 5 | sudo sh -c 'rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf' 6 | 7 | systemctl restart systemd-resolved 8 | -------------------------------------------------------------------------------- /Plex/Kubernetes/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: plex 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /Plex/Kubernetes/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: plex 6 | namespace: plex 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.plex.yourdomain.co.uk`) 14 | kind: Rule 15 | services: 16 | - name: plex 17 | port: 32400 18 | - match: Host(`plex.yourdomain.co.uk`) 19 | kind: Rule 20 | services: 21 | - name: plex 22 | port: 32400 23 | middlewares: 24 | - name: default-headers 25 | tls: 26 | secretName: yourdomain-tls 27 | -------------------------------------------------------------------------------- /Plex/Kubernetes/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | kind: NetworkPolicy 2 | apiVersion: networking.k8s.io/v1 3 | metadata: 4 | name: allow-internet-only 5 | namespace: plex 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Egress 10 | egress: 11 | - to: 12 | - ipBlock: 13 | cidr: "0.0.0.0/0" 14 | except: 15 | - "10.0.0.0/8" 16 | - "172.16.0.0/12" 17 | - "192.168.0.0/16" 18 | - to: 19 | - namespaceSelector: 20 | matchLabels: 21 | kubernetes.io/metadata.name: "kube-system" 22 | - podSelector: 23 | matchLabels: 24 | k8s-app: "kube-dns" -------------------------------------------------------------------------------- /Plex/Kubernetes/pms-docker-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: ./kompose convert -f plex/docker-compose.yml 6 | kompose.version: 1.27.0 (b0ed6a2c9) 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: pms-docker 10 | name: plex 11 | namespace: plex 12 | spec: 13 | ports: 14 | - name: "32400" 15 | port: 32400 16 | protocol: TCP 17 | targetPort: 32400 18 | - name: "32400-udp" 19 | port: 32400 20 | protocol: UDP 21 | targetPort: 32400 22 | - name: "3005" 23 | port: 3005 24 | targetPort: 3005 25 | - name: "8324" 26 | port: 8324 27 | targetPort: 8324 28 | - name: "32469" 29 | port: 32469 30 | targetPort: 32469 31 | - name: "1900" 32 | port: 1900 33 | protocol: UDP 34 | targetPort: 1900 35 | - name: "32410" 36 | port: 32410 37 | protocol: UDP 38 | targetPort: 32410 39 | - name: "32412" 40 | port: 32412 41 | protocol: UDP 42 | targetPort: 32412 43 | - name: "32413" 44 | port: 32413 45 | protocol: UDP 46 | targetPort: 32413 47 | - name: "32414" 48 | port: 32414 49 | protocol: UDP 50 | targetPort: 32414 51 | selector: 52 | app: plex 53 | externalTrafficPolicy: Local 54 | loadBalancerIP: 192.168.200.12 55 | type: LoadBalancer -------------------------------------------------------------------------------- /Plex/Kubernetes/pv-smb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | annotations: 5 | pv.kubernetes.io/provisioned-by: smb.csi.k8s.io 6 | name: pv-plex-smb 7 | spec: 8 | capacity: 9 | storage: 100Gi 10 | accessModes: 11 | - ReadWriteMany 12 | persistentVolumeReclaimPolicy: Retain 13 | storageClassName: smb 14 | mountOptions: 15 | - dir_mode=0777 16 | - file_mode=0777 17 | csi: 18 | driver: smb.csi.k8s.io 19 | readOnly: false 20 | # volumeHandle format: {smb-server-address}#{sub-dir-name}#{share-name} 21 | # make sure this value is unique for every share in the cluster 22 | volumeHandle: plex 23 | volumeAttributes: 24 | source: "//192.168.x.x/your-nas" 25 | nodeStageSecretRef: 26 | name: smbcreds 27 | namespace: default -------------------------------------------------------------------------------- /Plex/Kubernetes/pvc-smb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: pvc-plex-smb 6 | namespace: plex 7 | spec: 8 | accessModes: 9 | - ReadWriteMany 10 | resources: 11 | requests: 12 | storage: 10Gi 13 | volumeName: pv-plex-smb 14 | storageClassName: smb -------------------------------------------------------------------------------- /Pocket-ID/.env: -------------------------------------------------------------------------------- 1 | # See the documentation for more information: https://pocket-id.org/docs/configuration/environment-variables 2 | PUBLIC_APP_URL=https://pocket-id.jimsgarage.co.uk 3 | TRUST_PROXY=true 4 | MAXMIND_LICENSE_KEY= 5 | PUID=1000 6 | PGID=1000 -------------------------------------------------------------------------------- /Pocket-ID/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | pocket-id: 3 | image: ghcr.io/pocket-id/pocket-id 4 | restart: unless-stopped 5 | env_file: .env 6 | ports: 7 | - 3000:80 8 | volumes: 9 | - "./data:/app/backend/data" 10 | # Optional healthcheck 11 | healthcheck: 12 | test: "curl -f http://localhost/health" 13 | interval: 1m30s 14 | timeout: 5s 15 | retries: 2 16 | start_period: 10s 17 | networks: 18 | - proxy 19 | labels: 20 | - "traefik.enable=true" 21 | - "traefik.http.routers.pocket-id.entrypoints=http" 22 | - "traefik.http.routers.pocket-id.rule=Host(`pocket-id.jimsgarage.co.uk`)" 23 | - "traefik.http.middlewares.pocket-id-https-redirect.redirectscheme.scheme=https" 24 | - "traefik.http.routers.pocket-id.middlewares=pocket-id-https-redirect" 25 | - "traefik.http.routers.pocket-id-secure.entrypoints=https" 26 | - "traefik.http.routers.pocket-id-secure.rule=Host(`pocket-id.jimsgarage.co.uk`)" 27 | - "traefik.http.routers.pocket-id-secure.tls=true" 28 | - "traefik.http.routers.pocket-id-secure.service=pocket-id" 29 | - "traefik.http.services.pocket-id.loadbalancer.server.port=80" 30 | - "traefik.docker.network=proxy" 31 | 32 | networks: 33 | proxy: 34 | external: true -------------------------------------------------------------------------------- /Popup-Homelab/acquis.yaml: -------------------------------------------------------------------------------- 1 | filenames: 2 | - /var/log/traefik/* 3 | labels: 4 | type: traefik -------------------------------------------------------------------------------- /Popup-Homelab/cf-token: -------------------------------------------------------------------------------- 1 | cf-token-here -------------------------------------------------------------------------------- /Popup-Homelab/custom.list: -------------------------------------------------------------------------------- 1 | 192.168.200.118 traefik.jimsgarage.co.uk 2 | 192.168.200.118 portainer.jimsgarage.co.uk 3 | -------------------------------------------------------------------------------- /Popup-Homelab/docker/traefik/acme.json: -------------------------------------------------------------------------------- 1 | remember to chmod this to 600!!! -------------------------------------------------------------------------------- /Popup-Homelab/docker/traefik/traefik.yaml: -------------------------------------------------------------------------------- 1 | api: 2 | dashboard: true 3 | debug: true 4 | entryPoints: 5 | http: 6 | address: ":80" 7 | http: 8 | middlewares: 9 | - crowdsec-bouncer@file 10 | redirections: 11 | entrypoint: 12 | to: https 13 | scheme: https 14 | https: 15 | address: ":443" 16 | http: 17 | middlewares: 18 | - crowdsec-bouncer@file 19 | tcp: 20 | address: ":10000" 21 | apis: 22 | address: ":33073" 23 | serversTransport: 24 | insecureSkipVerify: true 25 | providers: 26 | docker: 27 | endpoint: "unix:///var/run/docker.sock" 28 | exposedByDefault: false 29 | file: 30 | filename: /config.yaml 31 | certificatesResolvers: 32 | cloudflare: 33 | acme: 34 | # caServer: https://acme-v02.api.letsencrypt.org/directory # production (default) 35 | caServer: https://acme-staging-v02.api.letsencrypt.org/directory # staging (testing) 36 | email: your@email.com 37 | storage: acme.json 38 | dnsChallenge: 39 | provider: cloudflare 40 | # disablePropagationCheck: true # Some people using Cloudflare note this can solve DNS propagation issues. 41 | resolvers: 42 | - "1.1.1.1:53" 43 | - "1.0.0.1:53" 44 | 45 | log: 46 | level: "INFO" 47 | filePath: "/var/log/traefik/traefik.log" 48 | accessLog: 49 | filePath: "/var/log/traefik/access.log" 50 | -------------------------------------------------------------------------------- /Portainer/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | services: 3 | portainer: 4 | image: portainer/portainer-ce:latest 5 | container_name: portainer 6 | ports: 7 | - 8000:8000 8 | - 9443:9443 9 | - 9000:9000 # for http 10 | volumes: 11 | - portainer_data:/data 12 | - /var/run/docker.sock:/var/run/docker.sock 13 | restart: unless-stopped 14 | networks: 15 | proxy: 16 | labels: 17 | - "traefik.enable=true" 18 | - "traefik.http.routers.portainer.entrypoints=http" 19 | - "traefik.http.routers.portainer.rule=Host(`portainer.yourdomain.com`)" 20 | - "traefik.http.middlewares.portainer-https-redirect.redirectscheme.scheme=https" 21 | - "traefik.http.routers.portainer.middlewares=portainer-https-redirect" 22 | - "traefik.http.routers.portainer-secure.entrypoints=https" 23 | - "traefik.http.routers.portainer-secure.rule=Host(`portainer.yourdomain.com`)" 24 | - "traefik.http.routers.portainer-secure.tls=true" 25 | - "traefik.http.routers.portainer-secure.service=portainer" 26 | - "traefik.http.services.portainer.loadbalancer.server.port=9000" 27 | - "traefik.docker.network=proxy" 28 | volumes: 29 | portainer_data: 30 | 31 | networks: 32 | proxy: 33 | external: true 34 | -------------------------------------------------------------------------------- /PrivateBin/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | services: 3 | privatebin: 4 | image: privatebin/nginx-fpm-alpine 5 | restart: always 6 | read_only: true 7 | user: "1000:1000" # Run the container with the UID:GID of your Docker user 8 | #ports: 9 | # - "8080:8080" 10 | volumes: 11 | - /home/ubuntu/docker/private-bin:/srv/data 12 | networks: 13 | - proxy 14 | labels: 15 | - "traefik.enable=true" 16 | - "traefik.docker.network=proxy" 17 | - "traefik.http.routers.privatebin.entrypoints=http" 18 | - "traefik.http.routers.privatebin.rule=Host(`privatebin.jimsgarage.co.uk`)" 19 | - "traefik.http.middlewares.privatebin-https-redirect.redirectscheme.scheme=https" 20 | - "traefik.http.routers.privatebin.middlewares=privatebin-https-redirect" 21 | - "traefik.http.routers.privatebin-secure.entrypoints=https" 22 | - "traefik.http.routers.privatebin-secure.rule=Host(`privatebin.jimsgarage.co.uk`)" 23 | - "traefik.http.routers.privatebin-secure.tls=true" 24 | - "traefik.http.routers.privatebin-secure.tls.certresolver=cloudflare" 25 | - "traefik.http.routers.privatebin-secure.service=privatebin" 26 | - "traefik.http.services.privatebin.loadbalancer.server.port=8080" 27 | networks: 28 | proxy: 29 | external: true 30 | -------------------------------------------------------------------------------- /Proxmox-Backup-Server/readme.md: -------------------------------------------------------------------------------- 1 | # Video Commands: 2 | 1. Mount command: mount -t cifs -o rw,vers=3.0,credentials=/etc/samba/.smbcreds,uid=34,gid=34 //IP-OF-NAS/SHARE-NAME /mnt/truenas 3 | 2. fstab: //IP-OF-NAS/SHARE-NAME /mnt/test-pbs cifs vers=3.0,credentials=/etc/samba/.smbcreds,uid=34,gid=34,defaults 0 0 -------------------------------------------------------------------------------- /Proxmox-NAS/config.yml: -------------------------------------------------------------------------------- 1 | auth: 2 | - user: foo 3 | group: foo 4 | uid: 1000 5 | gid: 1000 6 | password: bar 7 | # - user: baz 8 | # group: xxx 9 | # uid: 1100 10 | # gid: 1200 11 | # password_file: /run/secrets/baz_password 12 | 13 | global: 14 | - "force user = foo" 15 | - "force group = foo" 16 | 17 | share: 18 | - name: public 19 | comment: Public 20 | path: /samba/public 21 | browsable: yes 22 | readonly: no 23 | guestok: yes 24 | veto: no 25 | recycle: yes 26 | # - name: share 27 | # path: /samba/share 28 | # browsable: yes 29 | # readonly: no 30 | # guestok: yes 31 | # writelist: foo 32 | # veto: no 33 | # - name: foo 34 | # path: /samba/foo 35 | # browsable: yes 36 | # readonly: no 37 | # guestok: no 38 | # validusers: foo 39 | # writelist: foo 40 | # veto: no 41 | # hidefiles: /_*/ 42 | # - name: foo-baz 43 | # path: /samba/foo-baz 44 | # browsable: yes 45 | # readonly: no 46 | # guestok: no 47 | # validusers: foo,baz 48 | # writelist: foo,baz 49 | # veto: no -------------------------------------------------------------------------------- /Proxmox-NAS/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | name: samba 2 | 3 | services: 4 | samba: 5 | image: crazymax/samba 6 | container_name: samba 7 | network_mode: host 8 | volumes: 9 | - "./data:/data" # Contains cache, configuration and runtime data 10 | - "/smb:/samba/public" 11 | # - "./share:/samba/share" - optional additional share - see config.yml for permissions 12 | # - "./foo:/samba/foo" - optional additional share - see config.yml for permissions 13 | # - "./foo-baz:/samba/foo-baz" - optional additional share - see config.yml for permissions 14 | environment: 15 | - "TZ=Europe/London" 16 | # - "CONFIG_FILE=/your-location" this can be anywhere you want. Default is /data 17 | # - "SAMBA_WORKGROUP=WORKGROUP" change to your workgroup, default it WORKGROUP 18 | # - "SAMBA_SERVER_STRING=some string" is the equivalent of the NT Description field 19 | - "SAMBA_LOG_LEVEL=0" 20 | # - "SAMBA_FOLLOW_SYMLINKS=NO" default is yes 21 | # - "SAMBA_WIDE_LINKS=NO" default is yes 22 | # - "SAMBA_HOSTS_ALLOW=0.0.0.0/0" default 127.0.0.0/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 23 | # - "SAMBA_INTERFACES=some-interface" default all 24 | # - "WSDD2_ENABLE=1" default is 0 25 | # - "WSDD2_HOSTNAME=string" Override hostname (default to host or container name) 26 | # - "WSDD2_NETBIOS_NAME=some-name" Set NetBIOS name (default to hostname) 27 | # - "WSDD2_INTERFANCE=interface-name" Reply only on this interface 28 | restart: always -------------------------------------------------------------------------------- /Pterodactyl/config.yml: -------------------------------------------------------------------------------- 1 | debug: false 2 | uuid: e4c68ecc-3c2a-4c30-a3e7-d631969b8bc5 3 | token_id: hDeKbBQIOzKNOu4T 4 | token: P3IgxW490amCoA3J3W7csEXV6XRxrf5kb7QgMboTn0Drtf7AKSJYdsOKOIwenUVR 5 | api: 6 | host: 0.0.0.0 7 | port: 443 8 | ssl: 9 | enabled: false 10 | cert: /etc/letsencrypt/live/wings0.jimsgarage.co.uk/fullchain.pem 11 | key: /etc/letsencrypt/live/wings0.jimsgarage.co.uk/privkey.pem 12 | upload_limit: 100 13 | system: 14 | data: /var/lib/pterodactyl/volumes 15 | sftp: 16 | bind_port: 2022 17 | allowed_mounts: [] 18 | remote: 'https://panel.jimsgarage.co.uk' 19 | docker: 20 | network: 21 | interface: 172.50.0.1 22 | dns: 23 | - 192.168.200.11 24 | - 1.0.0.1 25 | name: wings0 26 | ispn: false 27 | driver: bridge 28 | network_mode: wings0 29 | is_internal: false 30 | enable_icc: true 31 | network_mtu: 1500 32 | interfaces: 33 | v4: 34 | subnet: 172.50.0.0/16 35 | gateway: 172.50.0.1 36 | v6: 37 | subnet: fdba:17c8:6c94::/64 38 | gateway: fdba:17c8:6c94::1011 39 | allowed_mounts: [] 40 | allowed_origins: [] 41 | allow_cors_private_network: false 42 | ignore_panel_config_updates: false -------------------------------------------------------------------------------- /Pterodactyl/readme.md: -------------------------------------------------------------------------------- 1 | 1. docker compose run --rm panel php artisan p:user:make --email=admin@domain.com --username=admin --name-first=admin --name-last=user --password=admin --admin=1 --no-password -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [Discord](https://discord.gg/qW5vEBekz5)
2 | [Twitter](https://twitter.com/jimsgarage_)
3 | [Reddit](https://www.reddit.com/user/Jims-Garage) 4 | 5 | ![alt text](https://github.com/JamesTurland/JimsGarage/blob/main/Logo/Jim'sGarage-1(2).png?raw=true) 6 | 7 | # Jim's Garage 8 | Here's a collection of Docker Compose and config files for use in my videos. Simply tweak to your environment and deploy! 9 | 10 | ## Star History 11 | 12 | 13 | 14 | 15 | 16 | Star History Chart 17 | 18 | 19 | -------------------------------------------------------------------------------- /SafeLine/.env: -------------------------------------------------------------------------------- 1 | SAFELINE_DIR=/home/ubuntu/docker/safeline 2 | IMAGE_TAG=latest 3 | MGT_PORT=4443 4 | POSTGRES_PASSWORD=safeline 5 | SUBNET_PREFIX=172.22.222 6 | IMAGE_PREFIX=chaitin -------------------------------------------------------------------------------- /SearXNG/.env: -------------------------------------------------------------------------------- 1 | # Be sure to check out the official docs and setup at: https://github.com/searxng/searxng-docker 2 | # This is my version running locally with Traefik. There is no caddy or redis. 3 | # If you wish to host publicly you probably want redis, amongst some additional security... 4 | # The official image comes with caddy and automatic certificate pulls 5 | 6 | SEARXNG_HOSTNAME=search.jimsgarage.co.uk 7 | 8 | # Optional: 9 | # If you run a very small or a very large instance, you might want to change the amount of used uwsgi workers and threads per worker 10 | # More workers (= processes) means that more search requests can be handled at the same time, but it also causes more resource usage 11 | 12 | SEARXNG_UWSGI_WORKERS=8 13 | SEARXNG_UWSGI_THREADS=8 14 | 15 | # Get from: https://github.com/JamesTurland/JimsGarage/blob/main/NordVPN-Wireguard/wireguard.ps1 16 | WIREGUARD_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXX -------------------------------------------------------------------------------- /SearXNG/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | searxng: 3 | image: searxng/searxng:latest 4 | restart: unless-stopped 5 | # ports: 6 | # - '${PORT}:8080' 7 | networks: 8 | - proxy 9 | environment: 10 | - SEARXNG_BASE_URL=https://${SEARXNG_HOSTNAME}/ 11 | - UWSGI_WORKERS=${SEARXNG_UWSGI_WORKERS:-4} 12 | - UWSGI_THREADS=${SEARXNG_UWSGI_THREADS:-4} 13 | volumes: 14 | - '/home/ubuntu/docker/searxng/searxng-data:/etc/searxng:rw' 15 | cap_drop: 16 | - ALL 17 | cap_add: 18 | - CHOWN 19 | - SETGID 20 | - SETUID 21 | logging: 22 | driver: "json-file" 23 | options: 24 | max-size: "1m" 25 | max-file: "1" 26 | labels: 27 | - "traefik.enable=true" 28 | - "traefik.docker.network=proxy" 29 | - "traefik.http.routers.search.entrypoints=http" 30 | - "traefik.http.routers.search.rule=Host(`search.jimsgarage.co.uk`)" 31 | - "traefik.http.middlewares.search-https-redirect.redirectscheme.scheme=https" 32 | - "traefik.http.routers.search.middlewares=search-https-redirect" 33 | - "traefik.http.routers.search-secure.entrypoints=https" 34 | - "traefik.http.routers.search-secure.rule=Host(`search.jimsgarage.co.uk`)" 35 | - "traefik.http.routers.search-secure.tls=true" 36 | - "traefik.http.routers.search-secure.tls.certresolver=cloudflare" 37 | - "traefik.http.routers.search-secure.service=search" 38 | - "traefik.http.services.search.loadbalancer.server.port=8080" 39 | 40 | networks: 41 | proxy: 42 | external: true -------------------------------------------------------------------------------- /SearXNG/settings.yaml: -------------------------------------------------------------------------------- 1 | # see https://docs.searxng.org/admin/settings/settings.html#settings-use-default-settings 2 | use_default_settings: true 3 | server: 4 | # base_url is defined in the SEARXNG_BASE_URL environment variable, see .env and docker-compose.yml 5 | secret_key: "myultrasecretkey" # change this! 6 | limiter: false # can be disabled for a private instance 7 | image_proxy: true 8 | ui: 9 | static_use_hash: true 10 | #redis: 11 | # url: redis://redis:6379/0 -------------------------------------------------------------------------------- /Synapse/mautrix-discord-bridge/example-registration.yaml: -------------------------------------------------------------------------------- 1 | id: discord 2 | url: http://:29334 3 | as_token: vpVJTrHGB6ZyVScf2SD4RzRLHcBeEM6fe9UhuQtsWd9JyFDmvN7mrqQMHBRn 4 | hs_token: VqENuUGBb2NjChatnA7e36CZN7esjpL57mhvmKREQAH7Pj2ux6H835UXBDUC 5 | sender_localpart: zHypwcFgaEKamUdbGyBgvibKpW 6 | rate_limited: false 7 | namespaces: 8 | users: 9 | - regex: ^@discordbot:matrix\.jimsgarage\.co\.uk$ 10 | exclusive: true 11 | - regex: ^@discord_.*:matrix\.jimsgarage\.co\.uk$ 12 | exclusive: true 13 | de.sorunome.msc2409.push_ephemeral: true 14 | push_ephemeral: true 15 | -------------------------------------------------------------------------------- /Synapse/readme.md: -------------------------------------------------------------------------------- 1 | 1) Create a config file 2 | 3 | sudo docker run -it --rm \ 4 | --mount type=volume,src=synapse-data,dst=/data \ 5 | -e SYNAPSE_SERVER_NAME=matrix.jimsgarage.co.uk \ 6 | -e SYNAPSE_REPORT_STATS=no \ 7 | matrixdotorg/synapse:latest generate 8 | 9 | 2) become root and access the file 10 | 11 | sudo -i 12 | 13 | 3) copy config file to your docker volume mount 14 | 15 | 4) become non-root user 16 | 17 | 5) change owner and permissions of configs so that we can edit them 18 | 19 | su username 20 | 21 | sudo chown ubuntu:ubuntu * (or whatever your user is) 22 | 23 | 6) edit config 24 | 25 | change database section 26 | 27 | name: psycopg2 28 | args: 29 | user: 30 | password: 31 | database: 32 | host: 33 | cp_min: 5 34 | cp_max: 10 35 | 36 | copy over the credentials from the docker compose 37 | 38 | 7) create admin user 39 | 40 | docker exec -it synapse register_new_matrix_user http://localhost:8008 -c /data/homeserver.yaml --help #remove help once ready 41 | 42 | 8) add record to dns server (remember needs to be external as well!) 43 | 44 | 9) check page to see it's up 45 | 46 | 10) element and profit 47 | 48 | 11) Add emails, recaptcha if you want to (recommended!) 49 | -------------------------------------------------------------------------------- /Terraform/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.14" 3 | required_providers { 4 | proxmox = { 5 | source = "registry.example.com/telmate/proxmox" 6 | version = ">= 1.0.0" 7 | } 8 | } 9 | } 10 | 11 | provider "proxmox" { 12 | pm_tls_insecure = true 13 | pm_api_url = "https://proxmox.jimsgarage.co.uk/api2/json" 14 | pm_api_token_secret = "112e04a7-4f15-45c5-b1e1-624e90a55f8b" 15 | pm_api_token_id = "root@pam!terraform" 16 | } -------------------------------------------------------------------------------- /Tinyauth/.env: -------------------------------------------------------------------------------- 1 | # generate with openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | head -c 32 2 | SECRET=5vvAGg1lfdsDO3lPtkj5CRv3RXEk44HN 3 | USERS=alice:$$2y$$05$$y/TnBJ.Zal5PM2Xu43NCRu0hP.STmZzb0vFiqr6LPRBc4cG0jwJr.,bob:$$2y$$05$$44DactVuy2Sqzd329hIehOp1T6JYhcnDF1r.I7TxSA8MQUwygkqV. # pw = password 4 | 5 | # Important to set this when using OAuth otherwise anyone with an account can use it to log in 6 | OAUTH_WHITELIST=your@email.com 7 | 8 | GITHUB_CLIENT_ID=XXXXXXXXXXXXXXX 9 | GITHUB_CLIENT_SECRET=XXXXXXXXXXXXXXXX -------------------------------------------------------------------------------- /Tinyauth/users: -------------------------------------------------------------------------------- 1 | alice:$2y$10$k0YEfRqPD4Cgu1Bu2BR5je4s6HSqhBRqr7wW4VfYIeYn5bVaUX5lq:XAP2AEQMCO2OHGU56AT5Y5PW3UA6CKXR 2 | bob:$2y$10$5OABhZc49FA0mVWo.a8tmeNd/FGf/d3..hcqwJL0w3lXJozIVIwtq 3 | ADD GITHUB OAUTH HERE -------------------------------------------------------------------------------- /Traefik-Secure/config.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | crowdsec-bouncer: 4 | forwardauth: 5 | address: http://bouncer-traefik:8080/api/v1/forwardAuth 6 | trustForwardHeader: true 7 | ip-whitelist: 8 | ipWhiteList: 9 | sourceRange: 10 | - "1.2.3.4" # Add the IPs or networks you want to be able to access 11 | -------------------------------------------------------------------------------- /Traefik-Secure/traefik.yaml: -------------------------------------------------------------------------------- 1 | api: 2 | dashboard: true 3 | debug: true 4 | entryPoints: 5 | http: 6 | address: ":80" 7 | http: 8 | middlewares: 9 | - crowdsec-bouncer@file 10 | redirections: 11 | entrypoint: 12 | to: https 13 | scheme: https 14 | https: 15 | address: ":443" 16 | http: 17 | middlewares: 18 | - crowdsec-bouncer@file 19 | http-external: 20 | address: ":81" 21 | http: 22 | middlewares: 23 | - crowdsec-bouncer@file 24 | redirections: 25 | entrypoint: 26 | to: https-external 27 | scheme: https 28 | https-external: 29 | address: ":444" 30 | http: 31 | middlewares: 32 | - crowdsec-bouncer@file 33 | 34 | serversTransport: 35 | insecureSkipVerify: true 36 | providers: 37 | docker: 38 | endpoint: "unix:///var/run/docker.sock" 39 | exposedByDefault: false 40 | file: 41 | filename: /config.yml 42 | certificatesResolvers: 43 | cloudflare: 44 | acme: 45 | email: your@email.com 46 | storage: acme.json 47 | dnsChallenge: 48 | provider: cloudflare 49 | #disablePropagationCheck: true # uncomment this if you have issues pulling certificates through cloudflare, By setting this flag to true disables the need to wait for the propagation of the TXT record to all authoritative name servers. 50 | resolvers: 51 | - "1.1.1.1:53" 52 | - "1.0.0.1:53" 53 | 54 | log: 55 | level: "INFO" 56 | filePath: "/var/log/traefik/traefik.log" 57 | accessLog: 58 | filePath: "/var/log/traefik/access.log" 59 | -------------------------------------------------------------------------------- /Traefik/traefik-config/acme.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Traefik/traefik-config/acme.json -------------------------------------------------------------------------------- /Traefik/traefik-config/config.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Traefik/traefik-config/config.yml -------------------------------------------------------------------------------- /Traefik/traefik-config/traefik.yml: -------------------------------------------------------------------------------- 1 | api: 2 | dashboard: true 3 | debug: true 4 | entryPoints: 5 | http: 6 | address: ":80" 7 | http: 8 | redirections: 9 | entryPoint: 10 | to: https 11 | scheme: https 12 | https: 13 | address: ":443" 14 | serversTransport: 15 | insecureSkipVerify: true 16 | providers: 17 | docker: 18 | endpoint: "unix:///var/run/docker.sock" 19 | exposedByDefault: false 20 | file: 21 | filename: /config.yml 22 | certificatesResolvers: 23 | cloudflare: 24 | acme: 25 | email: your@email.com #add your email 26 | storage: acme.json 27 | dnsChallenge: 28 | provider: cloudflare 29 | #disablePropagationCheck: true # uncomment this if you have issues pulling certificates through cloudflare, By setting this flag to true disables the need to wait for the propagation of the TXT record to all authoritative name servers. 30 | resolvers: 31 | - "1.1.1.1:53" 32 | - "1.0.0.1:53" -------------------------------------------------------------------------------- /Traefikv3/.env: -------------------------------------------------------------------------------- 1 | TRAEFIK_DASHBOARD_CREDENTIALS=admin:$$2y$$05$$3A1ctqF6JF4F4Jk2UsMhnevo6DHogXKb5IrnJyz53F3xUqoWvVx.i -------------------------------------------------------------------------------- /Traefikv3/cf-token: -------------------------------------------------------------------------------- 1 | # replace this line entirely with your API token -------------------------------------------------------------------------------- /Traefikv3/config/acme.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Traefikv3/config/acme.json -------------------------------------------------------------------------------- /Traefikv3/config/config.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | default-security-headers: 4 | headers: 5 | customBrowserXSSValue: 0 # X-XSS-Protection=1; mode=block 6 | contentTypeNosniff: true # X-Content-Type-Options=nosniff 7 | forceSTSHeader: true # Add the Strict-Transport-Security header even when the connection is HTTP 8 | frameDeny: false # X-Frame-Options=deny 9 | referrerPolicy: "strict-origin-when-cross-origin" 10 | stsIncludeSubdomains: true # Add includeSubdomains to the Strict-Transport-Security header 11 | stsPreload: true # Add preload flag appended to the Strict-Transport-Security header 12 | stsSeconds: 3153600 # Set the max-age of the Strict-Transport-Security header (63072000 = 2 years) 13 | contentSecurityPolicy: "default-src 'self'" 14 | customRequestHeaders: 15 | X-Forwarded-Proto: https 16 | https-redirectscheme: 17 | redirectScheme: 18 | scheme: https 19 | permanent: true 20 | 21 | routers: 22 | portainer: 23 | entryPoints: 24 | - "https" 25 | rule: "Host(`portainer-demo.jimsgarage.co.uk`)" 26 | middlewares: 27 | - default-security-headers 28 | - https-redirectscheme 29 | tls: {} 30 | service: portainer 31 | 32 | services: 33 | portainer: 34 | loadBalancer: 35 | servers: 36 | - url: "https://192.168.200.122:9443" 37 | passHostHeader: true 38 | -------------------------------------------------------------------------------- /Traefikv3/config/traefik.yaml: -------------------------------------------------------------------------------- 1 | api: 2 | dashboard: true 3 | debug: true 4 | entryPoints: 5 | http: 6 | address: ":80" 7 | http: 8 | # middlewares: # uncomment if using CrowdSec - see my video 9 | # - crowdsec-bouncer@file 10 | redirections: 11 | entrypoint: 12 | to: https 13 | scheme: https 14 | https: 15 | address: ":443" 16 | # http: 17 | # middlewares: # uncomment if using CrowdSec - see my video 18 | # - crowdsec-bouncer@file 19 | # tcp: 20 | # address: ":10000" 21 | # apis: 22 | # address: ":33073" 23 | serversTransport: 24 | insecureSkipVerify: true 25 | providers: 26 | docker: 27 | endpoint: "unix:///var/run/docker.sock" 28 | exposedByDefault: false 29 | file: 30 | filename: /config.yaml # example provided gives A+ rating https://www.ssllabs.com/ssltest/ 31 | certificatesResolvers: 32 | cloudflare: 33 | acme: 34 | # caServer: https://acme-v02.api.letsencrypt.org/directory # production (default) 35 | # caServer: https://acme-staging-v02.api.letsencrypt.org/directory # staging (testing) 36 | email: your@email.com # Cloudflare email (or other provider) 37 | storage: acme.json 38 | dnsChallenge: 39 | provider: cloudflare # change as required 40 | # disablePropagationCheck: true # Some people using Cloudflare note this can solve DNS propagation issues. 41 | resolvers: 42 | - "1.1.1.1:53" 43 | - "1.0.0.1:53" 44 | 45 | log: 46 | level: "INFO" 47 | filePath: "/var/log/traefik/traefik.log" 48 | accessLog: 49 | filePath: "/var/log/traefik/access.log" 50 | -------------------------------------------------------------------------------- /Trilium/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '2.1' 2 | services: 3 | trilium: 4 | image: zadam/trilium 5 | restart: always 6 | environment: 7 | - TRILIUM_DATA_DIR=/home/node/trilium-data 8 | volumes: 9 | - /home/ubuntu/docker/trilium:/home/node/trilium-data 10 | networks: 11 | proxy: 12 | labels: 13 | - "traefik.enable=true" 14 | - "traefik.http.routers.trilium.entrypoints=http" 15 | - "traefik.http.routers.trilium.rule=Host(`trilium.yourdomain.com`)" 16 | - "traefik.http.middlewares.trilium-https-redirect.redirectscheme.scheme=https" 17 | - "traefik.http.routers.trilium.middlewares=trilium-https-redirect" 18 | - "traefik.http.routers.trilium-secure.entrypoints=https" 19 | - "traefik.http.routers.trilium-secure.rule=Host(`trilium.yourdomain.com`)" 20 | - "traefik.http.routers.trilium-secure.tls=true" 21 | - "traefik.http.routers.trilium-secure.service=trilium" 22 | - "traefik.http.services.trilium.loadbalancer.server.port=8080" 23 | - "traefik.docker.network=proxy" 24 | 25 | networks: 26 | proxy: 27 | external: true 28 | -------------------------------------------------------------------------------- /UltimateVPS/.env: -------------------------------------------------------------------------------- 1 | WIREGUARD_TOKEN=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -------------------------------------------------------------------------------- /UltimateVPS/traefik/readme.md: -------------------------------------------------------------------------------- 1 | see: https://youtu.be/CmUzMi5QLzI & https://github.com/JamesTurland/JimsGarage/tree/main/Traefikv3 -------------------------------------------------------------------------------- /Unbound/a-records.conf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Unbound/a-records.conf -------------------------------------------------------------------------------- /Unbound/srv-records.conf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesTurland/JimsGarage/c4f20395b3818e51866ecc4669f157e2d5677810/Unbound/srv-records.conf -------------------------------------------------------------------------------- /Unifi-Controller/init-mongo.js: -------------------------------------------------------------------------------- 1 | db.getSiblingDB("unifi").createUser({user: "unifi", pwd: "5nHgg3G0cH9d", roles: [{role: "dbOwner", db: "unifi"}]}); 2 | db.getSiblingDB("unifi_stat").createUser({user: "unifi", pwd: "5nHgg3G0cH9d", roles: [{role: "dbOwner", db: "unifi_stat"}]}); -------------------------------------------------------------------------------- /Unifi-Controller/kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Deployment 2 | 3 | You can't just deploy the whole folder. You have to apply the files in the following order: 4 | 5 | 1. Create the namespace and the secrets using ´kubectl apply -f namespaceAndSecret.yaml ´ 6 | 2. Apply the init-script using ´kubectl create configmap create-db-configmap --from-file=init-mongo.js --namespace unifi-controller´ 7 | 3. Create two persistent volumes and two persistent volume claims in Longhorn 8 | 9 | - unifi-db 10 | - unifi-config 11 | 12 | 4. Deploy the pod and the service using ´kubectl apply -f deployment.yaml ´ 13 | 5. If you want to access the GUI via Traefik you can add an ingress using ´kubectl apply -f ingress.yaml ´ 14 | 6. Check if the MongoDB Container is running and delete the configmap ´create-db-configmap´ for security reasons 15 | -------------------------------------------------------------------------------- /Unifi-Controller/kubernetes/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: Middleware 4 | metadata: 5 | name: default-headers 6 | namespace: unifi-controller 7 | spec: 8 | headers: 9 | browserXssFilter: true 10 | contentTypeNosniff: true 11 | forceSTSHeader: true 12 | stsIncludeSubdomains: true 13 | stsPreload: true 14 | stsSeconds: 15552000 15 | customFrameOptionsValue: SAMEORIGIN 16 | customRequestHeaders: 17 | X-Forwarded-Proto: https 18 | --- 19 | apiVersion: traefik.io/v1alpha1 20 | kind: IngressRoute 21 | metadata: 22 | name: unifi-controller 23 | namespace: unifi-controller 24 | annotations: 25 | kubernetes.io/ingress.class: traefik-external 26 | spec: 27 | entryPoints: 28 | - websecure 29 | routes: 30 | - match: Host(`unifi.yourdomain.com`) # change to your domain 31 | kind: Rule 32 | services: 33 | - name: unifi-tcp 34 | port: 8443 35 | scheme: https 36 | middlewares: 37 | - name: default-headers 38 | tls: 39 | secretName: ffth-tls # change to your cert name 40 | -------------------------------------------------------------------------------- /Unifi-Controller/kubernetes/init-mongo.js: -------------------------------------------------------------------------------- 1 | db.getSiblingDB("unifi").createUser({ 2 | user: "unifi", 3 | pwd: "5nHgg3G0cH9d", 4 | roles: [{ role: "dbOwner", db: "unifi" }], 5 | }); 6 | db.getSiblingDB("unifi_stat").createUser({ 7 | user: "unifi", 8 | pwd: "5nHgg3G0cH9d", 9 | roles: [{ role: "dbOwner", db: "unifi_stat" }], 10 | }); 11 | -------------------------------------------------------------------------------- /Unifi-Controller/kubernetes/namespaceAndSecret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: unifi-controller 6 | --- 7 | apiVersion: v1 8 | kind: Secret 9 | metadata: 10 | name: unifi-env 11 | namespace: unifi-controller 12 | type: Opaque 13 | stringData: 14 | PUID: "1000" 15 | PGID: "1000" 16 | TZ: "Europe/London" 17 | MONGO_USER: "unifi" 18 | MONGO_PASS: "5nHgg3G0cH9d" 19 | MONGO_DBNAME: unifi 20 | -------------------------------------------------------------------------------- /UptimeKuma/Kubernetes/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: uptime-kuma 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /UptimeKuma/Kubernetes/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: uptime-kuma 6 | namespace: uptime-kuma 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.uptime-kuma.yourdomain.co.uk`) 14 | kind: Rule 15 | services: 16 | - name: uptime-kuma 17 | port: 80 18 | - match: Host(`uptime-kuma.yourdomain.co.uk`) 19 | kind: Rule 20 | services: 21 | - name: uptime-kuma 22 | port: 80 23 | middlewares: 24 | - name: default-headers 25 | tls: 26 | secretName: yourdomain-tls 27 | -------------------------------------------------------------------------------- /UptimeKuma/Kubernetes/longhorn-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: uptime-kuma 5 | labels: 6 | app: uptime-kuma 7 | app.kubernetes.io/instance: uptime-kuma 8 | app.kubernetes.io/name: uptime-kuma 9 | spec: 10 | capacity: 11 | storage: 5Gi 12 | accessModes: 13 | - ReadWriteOnce 14 | persistentVolumeReclaimPolicy: Retain 15 | storageClassName: longhorn 16 | csi: 17 | driver: driver.longhorn.io 18 | volumeHandle: uptime-kuma 19 | -------------------------------------------------------------------------------- /UptimeKuma/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | services: 3 | uptime-kuma: 4 | image: louislam/uptime-kuma:1 5 | container_name: uptime-kuma 6 | volumes: 7 | - /home/ubuntu/docker/uptime-kuma:/app/data 8 | restart: unless-stopped 9 | security_opt: 10 | - no-new-privileges:true 11 | networks: 12 | proxy: 13 | labels: 14 | - "traefik.enable=true" 15 | - "traefik.http.routers.uptime-kuma.entrypoints=http" 16 | - "traefik.http.routers.uptime-kuma.rule=Host(`uptime-kuma.yourdomain.com`)" 17 | - "traefik.http.middlewares.uptime-kuma-https-redirect.redirectscheme.scheme=https" 18 | - "traefik.http.routers.uptime-kuma.middlewares=uptime-kuma-https-redirect" 19 | - "traefik.http.routers.uptime-kuma-secure.entrypoints=https" 20 | - "traefik.http.routers.uptime-kuma-secure.rule=Host(`uptime-kuma.yourdomain.com`)" 21 | - "traefik.http.routers.uptime-kuma-secure.tls=true" 22 | - "traefik.http.routers.uptime-kuma-secure.service=uptime-kuma" 23 | - "traefik.http.services.uptime-kuma.loadbalancer.server.port=3001" 24 | - "traefik.docker.network=proxy" 25 | 26 | networks: 27 | proxy: 28 | external: true 29 | -------------------------------------------------------------------------------- /Vaultwarden/Kubernetes/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: vaultwarden 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /Vaultwarden/Kubernetes/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: vaultwarden 7 | app.kubernetes.io/instance: vaultwarden 8 | name: vaultwarden 9 | namespace: vaultwarden 10 | spec: 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | app: vaultwarden 15 | template: 16 | metadata: 17 | labels: 18 | app: vaultwarden 19 | app.kubernetes.io/name: vaultwarden 20 | spec: 21 | nodeSelector: 22 | worker: "true" 23 | containers: 24 | - image: vaultwarden/server:latest 25 | imagePullPolicy: Always 26 | name: vaultwarden 27 | ports: 28 | - containerPort: 80 29 | name: web 30 | protocol: TCP 31 | volumeMounts: 32 | - mountPath: /data/ 33 | name: vaultwarden 34 | volumes: 35 | - name: vaultwarden 36 | persistentVolumeClaim: 37 | claimName: vaultwarden 38 | --- 39 | apiVersion: v1 40 | kind: Service 41 | metadata: 42 | labels: 43 | app: vaultwarden 44 | name: vaultwarden 45 | namespace: vaultwarden 46 | spec: 47 | ports: 48 | - name: web-tcp 49 | port: 80 50 | protocol: TCP 51 | targetPort: 80 52 | selector: 53 | app: vaultwarden -------------------------------------------------------------------------------- /Vaultwarden/Kubernetes/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: vaultwarden 6 | namespace: vaultwarden 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.vaultwarden.yourdomain.co.uk`) 14 | kind: Rule 15 | services: 16 | - name: vaultwarden 17 | port: 80 18 | - match: Host(`vaultwarden.yourdomain.co.uk`) 19 | kind: Rule 20 | services: 21 | - name: vaultwarden 22 | port: 80 23 | middlewares: 24 | - name: default-headers 25 | - name: local-ipwhitelist 26 | tls: 27 | secretName: yourdomain-tls -------------------------------------------------------------------------------- /Vaultwarden/Kubernetes/ipwhitelist.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: local-ipwhitelist 5 | namespace: vaultwarden 6 | spec: 7 | ipWhiteList: 8 | sourceRange: 9 | - 10.0.0.0/8 10 | - 172.16.0.0/12 11 | - 192.168.0.0/16 12 | -------------------------------------------------------------------------------- /Vaultwarden/Kubernetes/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | kind: NetworkPolicy 2 | apiVersion: networking.k8s.io/v1 3 | metadata: 4 | name: allow-internet-only 5 | namespace: vaultwarden 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Egress 10 | - Ingress 11 | egress: 12 | - to: 13 | - ipBlock: 14 | cidr: "0.0.0.0/0" 15 | except: 16 | - "10.0.0.0/8" 17 | - "172.16.0.0/12" 18 | - "192.168.0.0/16" 19 | - to: 20 | - namespaceSelector: 21 | matchLabels: 22 | kubernetes.io/metadata.name: "kube-system" 23 | - podSelector: 24 | matchLabels: 25 | k8s-app: "kube-dns" 26 | ingress: 27 | - from: 28 | - ipBlock: 29 | cidr: "10.0.0.0/8" 30 | - from: 31 | - ipBlock: 32 | cidr: "172.16.0.0/12" 33 | - from: 34 | - ipBlock: 35 | cidr: "192.168.0.0/16" 36 | -------------------------------------------------------------------------------- /Vaultwarden/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | vaultwarden: 5 | container_name: vaultwarden 6 | image: vaultwarden/server:latest 7 | volumes: 8 | - '/home/ubuntu/docker/vaultwarden/:/data/' 9 | restart: unless-stopped 10 | networks: 11 | proxy: 12 | labels: 13 | - "traefik.enable=true" 14 | - "traefik.http.routers.vaultwarden.entrypoints=http" 15 | - "traefik.http.routers.vaultwarden.rule=Host(`vaultwarden.yourdomain.com`)" 16 | - "traefik.http.middlewares.vaultwarden-https-redirect.redirectscheme.scheme=https" 17 | - "traefik.http.routers.vaultwarden.middlewares=vaultwarden-https-redirect" 18 | - "traefik.http.routers.vaultwarden-secure.entrypoints=https" 19 | - "traefik.http.routers.vaultwarden-secure.rule=Host(`vaultwarden.yourdomain.com`)" 20 | - "traefik.http.routers.vaultwarden-secure.tls=true" 21 | - "traefik.http.routers.vaultwarden-secure.service=vaultwarden" 22 | - "traefik.http.services.vaultwarden.loadbalancer.server.port=80" 23 | - "traefik.docker.network=proxy" 24 | security_opt: 25 | - no-new-privileges:true 26 | 27 | networks: 28 | proxy: 29 | external: true 30 | -------------------------------------------------------------------------------- /Watchtower/access_token: -------------------------------------------------------------------------------- 1 | Ab9avP0o90UVYp8 -------------------------------------------------------------------------------- /Web-Servers/Hugo/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | ## MAKE SURE YOU RUN THE SITE BUILD COMMAND FIRST, BEFORE DEPLOYING THIS CONTAINER ## 2 | ## YOU NEED TO MOUNT THE SITE DURING THE HUGO DEPLOYMENT ## 3 | 4 | version: '3.8' 5 | services: 6 | server: 7 | image: klakegg/hugo:0.101.0 8 | container_name: hugo 9 | command: server 10 | volumes: 11 | - "/home/ubuntu/docker/hugo/your-website:/src" #mount your site here after you've created a new site! 12 | networks: 13 | proxy: 14 | labels: 15 | - "traefik.enable=true" 16 | - "traefik.http.routers.hugo.entrypoints=http" 17 | - "traefik.http.routers.hugo.rule=Host(`mywebsite.yourdomain.com`)" 18 | - "traefik.http.middlewares.hugo-https-redirect.redirectscheme.scheme=https" 19 | - "traefik.http.routers.hugo.middlewares=hugo-https-redirect" 20 | - "traefik.http.routers.hugo-secure.entrypoints=https" 21 | - "traefik.http.routers.hugo-secure.rule=Host(`mywebsite.yourdomain.com`)" 22 | - "traefik.http.routers.hugo-secure.tls=true" 23 | - "traefik.http.routers.hugo-secure.service=hugo" 24 | - "traefik.http.services.hugo.loadbalancer.server.port=1313" 25 | - "traefik.docker.network=proxy" 26 | 27 | networks: 28 | proxy: 29 | external: true -------------------------------------------------------------------------------- /Web-Servers/Hugo/site-build-command: -------------------------------------------------------------------------------- 1 | sudo docker run --rm -v $(pwd):/src klakegg/hugo:0.101.0-ext-alpine new site MyWebsite --format yaml -------------------------------------------------------------------------------- /Web-Servers/Nginx/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | web: 4 | image: nginx 5 | container_name: nginx 6 | volumes: 7 | - /home/ubuntu/docker/nginx/templates:/etc/nginx/templates 8 | - /home/ubuntu/docker/nginx/web:/usr/share/nginx/html 9 | environment: 10 | - NGINX_HOST=mydomain.com 11 | - NGINX_PORT=80 12 | labels: 13 | - "traefik.enable=true" 14 | - "traefik.http.routers.nginx.entrypoints=http" 15 | - "traefik.http.routers.nginx.rule=Host(`my.website.com`)" 16 | - "traefik.http.middlewares.nginx-https-redirect.redirectscheme.scheme=https" 17 | - "traefik.http.routers.nginx.middlewares=nginx-https-redirect" 18 | - "traefik.http.routers.nginx-secure.entrypoints=https" 19 | - "traefik.http.routers.nginx-secure.rule=Host(`my.website.com`)" 20 | - "traefik.http.routers.nginx-secure.tls=true" 21 | - "traefik.http.routers.nginx-secure.service=nginx" 22 | - "traefik.http.services.nginx.loadbalancer.server.port=80" 23 | # - "traefik.http.routers.nginx-secure.middlewares=middlewares-authentik@file" 24 | - "traefik.docker.network=proxy" 25 | networks: 26 | proxy: 27 | security_opt: 28 | - no-new-privileges:true 29 | 30 | networks: 31 | proxy: 32 | external: true -------------------------------------------------------------------------------- /Web-Servers/WordPress/.env: -------------------------------------------------------------------------------- 1 | IP=192.168.200.50 2 | PORT=8074 3 | DB_ROOT_PASSWORD=password 4 | DB_NAME=wordpress -------------------------------------------------------------------------------- /Wireguard/Kubernetes/default-headers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: Middleware 3 | metadata: 4 | name: default-headers 5 | namespace: wg-easy 6 | spec: 7 | headers: 8 | browserXssFilter: true 9 | contentTypeNosniff: true 10 | forceSTSHeader: true 11 | stsIncludeSubdomains: true 12 | stsPreload: true 13 | stsSeconds: 15552000 14 | customFrameOptionsValue: SAMEORIGIN 15 | customRequestHeaders: 16 | X-Forwarded-Proto: https -------------------------------------------------------------------------------- /Wireguard/Kubernetes/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.io/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: wg-easy 6 | namespace: wg-easy 7 | annotations: 8 | kubernetes.io/ingress.class: traefik-external 9 | spec: 10 | entryPoints: 11 | - websecure 12 | routes: 13 | - match: Host(`www.wg-easy.yourdomain.co.uk`) 14 | kind: Rule 15 | services: 16 | - name: wg-easy-web 17 | port: 51821 18 | - match: Host(`wg-easy.yourdomain.co.uk`) 19 | kind: Rule 20 | services: 21 | - name: wg-easy-web 22 | port: 51821 23 | middlewares: 24 | - name: default-headers 25 | tls: 26 | secretName: yourdomain-tls 27 | -------------------------------------------------------------------------------- /Wireguard/Kubernetes/ingressRouteUDP.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: IngressRouteUDP 3 | metadata: 4 | name: wg-easy 5 | namespace: wg-easy 6 | annotations: 7 | kubernetes.io/ingress.class: traefik-external 8 | spec: 9 | entryPoints: 10 | - wireguard 11 | routes: 12 | - services: 13 | - name: wg-easy-udp 14 | port: 51820 -------------------------------------------------------------------------------- /Wireguard/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | services: 3 | wg-easy: 4 | environment: 5 | # ⚠️ Required: 6 | # Change this to your host's public address 7 | - WG_HOST=mydomain.com # The public hostname of your VPN server. 8 | 9 | # Optional: 10 | # - PASSWORD=foobar123 # When set, requires a password when logging in to the Web UI. 11 | # - WG_PORT=51820 # The public UDP port of your VPN server. WireGuard will always listen on 51820 inside the Docker container. 12 | # - WG_DEFAULT_ADDRESS=10.8.0.x # Clients IP address range. 13 | # - WG_DEFAULT_DNS=1.1.1.1 # DNS server clients will use. 14 | # - WG_MTU=1420 # The MTU the clients will use. Server uses default WG MTU. 15 | # - WG_ALLOWED_IPS=192.168.15.0/24, 10.0.1.0/24 # Allowed IPs clients will use. 16 | # - WG_PRE_UP=echo "Pre Up" > /etc/wireguard/pre-up.txt 17 | # - WG_POST_UP=echo "Post Up" > /etc/wireguard/post-up.txt 18 | # - WG_PRE_DOWN=echo "Pre Down" > /etc/wireguard/pre-down.txt 19 | # - WG_POST_DOWN=echo "Post Down" > /etc/wireguard/post-down.txt 20 | 21 | image: weejewel/wg-easy 22 | container_name: wg-easy 23 | volumes: 24 | - /home/ubuntu/docker/wireguard:/etc/wireguard 25 | ports: 26 | - "51820:51820/udp" 27 | - "51821:51821/tcp" 28 | restart: unless-stopped 29 | cap_add: 30 | - NET_ADMIN 31 | - SYS_MODULE 32 | sysctls: 33 | - net.ipv4.ip_forward=1 34 | - net.ipv4.conf.all.src_valid_mark=1 35 | -------------------------------------------------------------------------------- /Zitadel/example-zitadel-config.yaml: -------------------------------------------------------------------------------- 1 | # All possible options and their defaults: https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml 2 | Log: 3 | Level: 'info' 4 | 5 | # Make ZITADEL accessible over HTTP, not HTTPS 6 | ExternalSecure: true 7 | ExternalDomain: zitadel.jimsgarage.co.uk # change this to your domain 8 | ExternalPort: 443 9 | 10 | # If not using the docker compose example, adjust these values for connecting ZITADEL to your CockroachDB 11 | Database: 12 | cockroach: 13 | Host: 'my-cockroach-db' 14 | User: 15 | SSL: 16 | Mode: 'verify-full' 17 | RootCert: "/crdb-certs/ca.crt" 18 | Cert: "/crdb-certs/client.zitadel_user.crt" 19 | Key: "/crdb-certs/client.zitadel_user.key" 20 | Admin: 21 | SSL: 22 | Mode: 'verify-full' 23 | RootCert: "/crdb-certs/ca.crt" 24 | Cert: "/crdb-certs/client.root.crt" 25 | Key: "/crdb-certs/client.root.key" 26 | -------------------------------------------------------------------------------- /Zitadel/example-zitadel-init-steps.yaml: -------------------------------------------------------------------------------- 1 | # All possible options and their defaults: https://github.com/zitadel/zitadel/blob/main/cmd/setup/steps.yaml 2 | FirstInstance: 3 | Org: 4 | Human: 5 | # use the loginname root@zitadel.localhost 6 | Username: 'root' 7 | Password: 'RootPassword1!' 8 | -------------------------------------------------------------------------------- /Zitadel/example-zitadel-secrets.yaml: -------------------------------------------------------------------------------- 1 | # All possible options and their defaults: https://github.com/zitadel/zitadel/blob/main/cmd/defaults.yaml 2 | 3 | # If not using the docker compose example, adjust these values for connecting ZITADEL to your CockroachDB 4 | Database: 5 | cockroach: 6 | User: 7 | # If the user doesn't exist already, it is created 8 | Username: 'zitadel_user' 9 | Admin: 10 | Username: 'root' 11 | -------------------------------------------------------------------------------- /rClone/mount/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | services: 3 | 4 | rclone: 5 | image: rclone/rclone 6 | container_name: rclone_mount 7 | security_opt: 8 | - apparmor:unconfined 9 | restart: unless-stopped 10 | volumes: 11 | - type: bind 12 | source: /home/ubuntu/GoogleDrive_NAS_crypt #change this to the folder location you want to mount to (on your host) 13 | target: /data 14 | bind: 15 | propagation: shared 16 | - /etc/passwd:/etc/passwd:ro 17 | - /etc/group:/etc/group:ro 18 | - /home/ubuntu/docker/rclone/config:/config/rclone #rclone config location (i.e., the remote credentials) 19 | - /home/ubuntu/docker/rclone/log:/log 20 | - /home/ubuntu/docker/rclone/cache:/cache 21 | privileged: true 22 | cap_add: 23 | - SYS_ADMIN 24 | devices: 25 | - /dev/fuse #IMPORTANT need to install FUSE on the host first. This is used to mount the remote to the host 26 | command: "mount NAME_OF_YOUR_REMOTE:/NAME_OF_FOLDER_TO_MOUNT /data --log-file /log/rclone.log --log-level ERROR --umask 002 --buffer-size 128M --checkers 20 --transfers 5 --vfs-read-ahead 1G --vfs-cache-mode writes --allow-other --allow-non-empty &" 27 | -------------------------------------------------------------------------------- /rClone/mount/windows_mount.bat: -------------------------------------------------------------------------------- 1 | rclone mount YOUR_REMOTE: G: --dir-cache-time 1440m --poll-interval 15s --fast-list --buffer-size 256M --vfs-cache-mode writes --vfs-read-ahead 256M --vfs-read-chunk-size-limit=off --vfs-read-chunk-size=128M --tpslimit 10 2 | -------------------------------------------------------------------------------- /rClone/remote-upload: -------------------------------------------------------------------------------- 1 | #this script will initiate a remote upload using the docker container. Example use case: you can run this on a windows machine that will send a command to the docker container to tell it to perform a backup. 2 | #you can track the progress of the operation using the rclone dashboard 3 | #the exluderule will allow you to skip certain folders (example below) 4 | rclone rc sync/copy srcFs="YOUR_SOURCE:/data" _filter={\"ExcludeRule\":[\"rClone/**\",\"Frigate/**\",\"dump/**\",\"ISOConvert/**\",\"GooglePhotosBackup/**\"]} dstFs="YOUR_REMOTE:FOLDER" --rc-addr=https://IP-OR-DNS-OF-RCLONE --rc-user=james --rc-pass=rclone _async=true -vv --checksum --transfers=1 --checkers=4 --contimeout=60s --timeout=300s --retries=3 --low-level-retries=10 --stats=1s --stats-file-name-length=0 --fast-list 5 | -------------------------------------------------------------------------------- /rClone/sync_script: -------------------------------------------------------------------------------- 1 | rclone rc sync/copy srcFS=FOLDER_ON_HOST:/ dstFs=REMOTE_NAME:REMOTE_FOLDER --rc-addr=:5572 --rc-user=user --rc-pass=rclone _async=true/sync_script 2 | --------------------------------------------------------------------------------