├── .gitignore ├── LICENSE ├── README.md ├── ansible_docker_management ├── README.md ├── ansible.cfg ├── docker_delete_container.yml ├── docker_deploy_containers.yml ├── docker_initial_configuration.yml ├── docker_redeploy_containers.yml ├── docker_sync_files.yml ├── files │ └── containers │ │ ├── certbot │ │ └── cloudflare.ini │ │ ├── homepage │ │ ├── bookmarks.yaml │ │ ├── services.yaml │ │ ├── settings.yaml │ │ └── widgets.yaml │ │ └── traefik │ │ ├── dynamic │ │ ├── 10000-infrastructure.yml │ │ ├── 10200-monitoring.yml │ │ ├── 10300-management.yml │ │ ├── 10400-documents.yml │ │ ├── 10500-download.yml │ │ ├── http-routers.yml │ │ ├── legacy.yml │ │ ├── middlewares.yml │ │ ├── tcp.yml │ │ └── tls.yml │ │ └── traefik.yml ├── group_vars │ └── all │ │ ├── vars │ │ └── vault ├── host_vars │ ├── docker-host-01 │ │ ├── authentik │ │ │ └── docker-compose.yml │ │ ├── dns1 │ │ │ └── docker-compose.yml │ │ ├── portainer-agent │ │ │ └── docker-compose.yml │ │ └── traefik │ │ │ └── docker-compose.yml │ ├── docker-host-02 │ │ ├── grafana │ │ │ └── docker-compose.yml │ │ ├── influxdb │ │ │ └── docker-compose.yml │ │ ├── it-tools │ │ │ └── docker-compose.yml │ │ ├── portainer-agent │ │ │ └── docker-compose.yml │ │ └── uptime-kuma │ │ │ └── docker-compose.yml │ ├── docker-host-03 │ │ ├── certbot │ │ │ └── docker-compose.yml │ │ ├── dns2 │ │ │ └── docker-compose.yml │ │ ├── omada │ │ │ └── docker-compose.yml │ │ ├── portainer-agent │ │ │ └── docker-compose.yml │ │ └── portainer │ │ │ └── docker-compose.yml │ ├── docker-host-04 │ │ ├── homepage │ │ │ └── docker-compose.yml │ │ └── portainer-agent │ │ │ └── docker-compose.yml │ └── docker-host-05 │ │ ├── flaresolverr │ │ └── docker-compose.yml │ │ ├── overseerr │ │ └── docker-compose.yml │ │ └── portainer-agent │ │ └── docker-compose.yml ├── inventory.yml ├── roles │ ├── docker_create_network │ │ ├── README.md │ │ └── tasks │ │ │ └── main.yml │ ├── docker_deploy_containers │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── deploy_compose.yml │ │ │ └── main.yml │ ├── docker_install_requirements │ │ ├── README.md │ │ └── tasks │ │ │ └── main.yml │ ├── docker_remove_container │ │ ├── README.md │ │ └── tasks │ │ │ ├── main.yml │ │ │ └── remove_compose.yml │ ├── docker_remove_container_files │ │ ├── README.md │ │ └── tasks │ │ │ └── main.yml │ ├── docker_sync_files │ │ ├── README.md │ │ └── tasks │ │ │ └── main.yml │ ├── docker_update_containers │ │ ├── README.md │ │ └── tasks │ │ │ ├── deploy_compose.yml │ │ │ └── main.yml │ ├── update_hosts │ │ ├── README.md │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── vm_configure_docker │ │ ├── README.md │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ └── docker-daemon.j2 │ │ └── vars │ │ │ └── main.yml │ ├── vm_create_docker_non-root_user │ │ ├── README.md │ │ └── tasks │ │ │ └── main.yml │ └── vm_mount_nfs_shares │ │ ├── README.md │ │ └── tasks │ │ └── main.yml └── update_host.yml └── ansible_proxmox_management ├── README.md ├── ansible.cfg ├── group_vars ├── all │ ├── vars │ └── vault ├── proxmox │ ├── vars.yml │ └── vms │ │ ├── docker-host-test-01.yml │ │ ├── home-assistant.yml │ │ ├── ubuntu-docker-host-01.yml │ │ └── ubuntu-docker-host-02.yml └── pve │ ├── README.md │ ├── vars.yml │ └── vms │ ├── docker-host-01.yml │ ├── docker-host-02.yml │ ├── docker-host-03.yml │ ├── docker-host-04.yml │ ├── docker-host-05.yml │ └── home-assistant.yml ├── inventory.yml ├── pve_configure_cluster.yml ├── pve_configure_network_bridge.yml ├── pve_configure_sdn.yml ├── pve_create_virtual_machines.yml ├── pve_delete_vm.yaml ├── pve_post_install.yml ├── pve_udpate_hosts.yml ├── pve_vm_post_install_cleanup.yml └── roles ├── pve_add_users ├── README.md ├── tasks │ ├── fetch_from_vault.yml │ ├── main.yml │ ├── pve_add_admin_user.yml │ ├── pve_add_ansible_user.yml │ ├── pve_create_authentik_realm.yml │ └── ssh_configuration.yml └── templates │ └── sudoers.j2 ├── pve_configure_bridges ├── tasks │ ├── README.md │ └── main.yml └── vars │ └── main.yml ├── pve_create_cluster ├── README.md ├── handlers │ └── main.yml └── tasks │ ├── configure_ssh.yml │ ├── join_cluster.yml │ └── main.yml ├── pve_create_vm ├── README.md ├── handlers │ └── main.yaml ├── tasks │ ├── create_vm.yml │ ├── fetch_from_vault.yml │ ├── main.yml │ ├── start_vm.yml │ ├── update_disk_size.yml │ └── upload_cloud_init_file.yml └── templates │ └── docker-host.j2 ├── pve_delete_vm ├── README.md ├── handlers │ └── main.yml └── tasks │ └── main.yml ├── pve_download_image ├── README.md └── tasks │ └── main.yml ├── pve_post_install ├── README.md ├── handlers │ └── main.yml ├── tasks │ ├── dns.yml │ ├── fail2ban.yml │ ├── main.yml │ ├── network.yml │ └── post_install_configuration.yml └── templates │ └── jail.local ├── pve_sdn_vlan_setup ├── README.md └── tasks │ └── main.yml ├── pve_update_firewall ├── README.md ├── tasks │ └── main.yml └── templates │ └── cluster_fw.j2 ├── pve_update_hosts ├── README.md └── tasks │ └── main.yml └── pve_vm_post_install_cleanup ├── README.md ├── handlers └── main.yml └── tasks └── main.yml /.gitignore: -------------------------------------------------------------------------------- 1 | settings.json 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Homelab Infrastructure 2 | 3 | This repository contains a collection of information, Ansible playbooks and roles for managing my homelab infrastructure. The goal is to document and automate as much of the homelab setup and management as possible, minimizing manual SSH intervention and ensuring configurations are tracked and repeatable. 4 | 5 | ## Overview 6 | 7 | My homelab is managed using a Raspberry Pi as a dedicated management node. All Infrastructure as Code (IaC) operations are initiated from this Raspberry Pi, leveraging Ansible to configure and maintain the environment. 8 | 9 | The infrastructure consists of: 10 | 11 | * **Raspberry Pi (Management Node):** Acts as the Ansible control node, orchestrating all configuration changes. I have ansible, HashiCorp Vault installed. The necessary ssh keys are generated from this node and stored in it to be used in the different playbooks. 12 | * **Proxmox VE Cluster:** A cluster of Proxmox VE nodes for virtualization. I have 2 Beelink EQR5 and one Dell Optiplex 13 | * **Standalone Proxmox VE Node:** A single Proxmox VE server, using a still going strong 12 years old Sony Vaio Laptop. It was what I started by homelab with many years ago and it's being put to a different use ( I don't know yet haha) 14 | * **Ubuntu Server Virtual Machines:** Virtual machines running Ubuntu Server, used as Docker hosts. I have been using Ubuntu for almost 20 years now, so that is why I continue using it. Debian is also a good choice. I will have to adopt the playbooks for RedHat based OS's 15 | 16 | This Ansible project is structured to manage both the Proxmox VE infrastructure directly and the virtual machines running on it, particularly for Docker deployments. 17 | 18 | The Readme files have been generated with Gemini using AI Studio, as test to evaluate how good it could be for this type of task. I'll have some cleanups to do to provide a better explanation on how everything works. 19 | 20 | Please provide comments, feedback and if you want to provide code contributions I'm all available for it. 21 | 22 | Hope you'll find this a bit useful :) 23 | 24 | ## Proxmox | ansible_proxmox_management 25 | 26 | The proxmox roles are created to be ran once at least and then if any changes needs to happen you can run them again and it *shouldn't* break anything. 27 | 28 | ### Before you Start 29 | 30 | * You need to have proxmx installed in one or multiple nodes. Ideally you have setup all nodes with the same password. 31 | * Make sure to update the `inventory.yml` to reflect your current proxmox configuration. 32 | * Put all nodes that needs to be in a cluster in the same group. In my case, I called it `pve` 33 | * Refer to the Ansible documentation to learn about how you can define the inventory at your liking: https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html#inventory-basics-formats-hosts-and-groups 34 | 35 | * If you are going to create a cluster, define a `pve_primary_node` it's a variable that indicate which node will create the cluster for the others to join. And then some roles will use this variable to run on this node instead of running on all of them. For example, the SDN configuration and Virtual Machine management doesn't need to run on all nodes. 36 | * Make sure to create the necessary ssh-keys for: 37 | * **The ansible user on Proxmox**: I create a specific user that will run most of the playbook instead of logging via ssh as root Also the root logging via SSH will be disabled, but: 38 | * **The pve-root**: you can create a root ssh keys if needed to register them and therefore add a bit of security 39 | * **The administrator user on proxmox (you)**: This is needed if you want to have some sort of access via ssh, I would not recommend for it. 40 | * **The Ansible User on the virtual machines**: I create a different user to run the playbook on the virtual machines. You can even, if you feel like it, have a different user per vm ! 41 | * **The normal user on the virtual machines**: You again, so you can connect to the virtual machines using SSH or via the console on proxmox. Useful in many cases. 42 | 43 | ### Running the playbook 44 | * Now that we have all we need, we can start with `pve_post_install.yml` 45 | * This role will make sure you have all you need to start using proxmox in a good way: the users will be set, it'll be updated with the right repositories (if you don't have a subscription) 46 | * You can then create a network bridge you have 2 interfaces with `pve_configure_network_bridge.yml`. I do that because I want to have one physical interface for the proxmox management UI and the cluster traffic, and the other for the virtual machine traffic. 47 | * If, like me, you don't have 2 NIC's on of your node, you can create one and add the bridge port as `eno1.1` or `eth0.1`. It'll basically default to the VLAN interface. Do I recommend this in mission critical environments? No, but it's a lab, so if it works, it works. 48 | * Then you can run `pve_configure_sdn.yml` It'll create the VLAN interfaces while benefiting from the SDN feature. 49 | * Now you can create the virtual machines as you want. Make sure to define them fully in `group_vars/pve/vms`. You can define each virtual machines in a file, make sure to define the node in the variable. As you are running with a cluster, it'll use this value to recreate the node, if the state is defined as `new` 50 | 51 | 52 | ## Docker | ansible_docker_management 53 | 54 | ### Before you start 55 | 56 | * Define all the containers you want to have in the `host_vars` directory the same way I did. I do it this way because it's easier for me to define where I want to have the container running (which node). 57 | * If you have a container with configuration files, you can add them in the `files/containers` directory. 58 | * If you have the same containers in multiple nodes, with the same directory name, it'll copy the files on each node. Make sure to modify the directory name. 59 | * As we use HashiCorp vault, make sure to have a vault called `docker` and store each secrets the same way you'll define them in a `.env` file. 60 | 61 | ### Running the playbooks 62 | 63 | * Start by making sure to install docker with `docker_initial_configuration.yml` as it'll install docker and add the necessary requirements. 64 | * You can then either use `docker_deploy_containers.yml` to deploy all at once or `docker_redeploy_containers.yml` if you want to deploy a specific container. 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /ansible_docker_management/README.md: -------------------------------------------------------------------------------- 1 | Ansible Docker Management 2 | 3 | This repository provides Ansible playbooks and roles to automate the deployment, configuration, and management of Docker containers on multiple hosts. It simplifies the process of setting up and maintaining containerized applications, handling tasks such as network creation, container deployment, file synchronization, and updates. 4 | 5 | ## Purpose 6 | 7 | This project aims to streamline the management of Docker environments, enabling efficient and consistent deployments across your infrastructure. It's particularly beneficial for managing complex container setups, ensuring that configurations are synchronized, and simplifying updates. 8 | 9 | ## How to Use 10 | 11 | These playbooks are designed to be executed in a specific order for optimal setup and management of your Docker environment. It is crucial that the network configuration on the Docker hosts aligns with the VLAN definitions within the virtual machines, especially if you're utilizing macvlan networks within Docker. This ensures seamless communication between the virtual machines and the Docker containers residing on them. 12 | 13 | The recommended execution sequence is: 14 | 15 | 1. **`docker_initial_configuration.yml`**: *Run this playbook first*. It prepares the Docker hosts for container deployments by installing Docker, configuring necessary dependencies, creating Docker networks, mounting NFS shares (if needed), and setting up a non-root user for Docker management. 16 | 17 | 2. **`docker_deploy_containers.yml`**: This playbook deploys containers based on the `docker-compose.yml` files located in the `host_vars` directory. It also handles fetching secrets from HashiCorp Vault if configured. This playbook will not redeploy existing containers. It is used to ensure all defined containers in your code are the ones deployed on your hosts. 18 | 19 | 3. **`docker_sync_files.yml`**: This playbook synchronizes configuration files from your Ansible repository to the Docker hosts. It ensures that the container configurations on the hosts are up-to-date. This is aimed to manually update the configuration file for 1 or multiple containers as you want. 20 | 21 | 4. **`docker_redeploy_containers.yml`**: Use this playbook to update or redeploy specific containers. It removes the existing stack and deploys the latest version from the `docker-compose.yml` files. This playbook requires defining the `container_names` variable. This is aimed to (re) deploy one or multiple containers as you may want. 22 | 23 | 5. **`docker_delete_container.yml`**: Use this playbook to remove specific containers and their associated data. This also requires defining the `container_names` variable. This removed also their docker-volume. 24 | 25 | 6. **`update_host.yml`**: This playbook updates the system packages on the Docker hosts, ensuring that they are running the latest available versions. This runs one one host at the time and wait 1 minute to ensure the host had time to reboot before updating the next one. 26 | 27 | ## Directory Structure 28 | 29 | * **`files/containers`**: Contains configuration files for the Docker containers, mirroring the directory structure in `host_vars`. 30 | * **`group_vars`**: Holds variable definitions for different groups of hosts. The `all/vault` file stores sensitive data such as API keys and passwords (encrypted using Ansible Vault). 31 | * **`host_vars`**: Contains host-specific variables and, importantly, the `docker-compose.yml` files for each container on each host. 32 | * **`roles`**: Contains Ansible roles – reusable units of automation – that perform specific tasks. 33 | * **Playbooks**: The `.yml` files in the root directory are the Ansible playbooks. 34 | 35 | ## Requirements 36 | 37 | * Docker installed on the target hosts (this will be handled by the `docker_initial_configuration.yml` playbook, but Docker needs to be minimally present). 38 | * HashiCorp Vault (optional, but recommended for secrets management). 39 | * SSH access from the Ansible control node to the Docker hosts. 40 | * Correctly configured NFS server and shares (if used). 41 | 42 | ## Important Notes 43 | 44 | * **Network Consistency**: The network configurations defined in your `docker-compose.yml` files, especially for macvlan networks, *must* be consistent with the VLAN setup on your Proxmox virtual machines. This ensures that containers can communicate correctly within their assigned VLANs. 45 | * **Configuration Precedence**: Host-specific variables in `host_vars` override group variables in `group_vars`. This allows for flexible customization per host. 46 | * **Vault Encryption**: Encrypt sensitive data using Ansible Vault in the `group_vars/all/vault` file. 47 | * **Inventory**: Define your Docker hosts in the `inventory.yml` file. -------------------------------------------------------------------------------- /ansible_docker_management/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | 3 | #Specifies the location of your inventory file (where your hosts are defined). 4 | inventory = ./inventory.yml 5 | 6 | # Specifies the file containing the vault password for decrypting encrypted files (like vault-encrypted variables). You can use --ask-vault-pass or set the ANSIBLE_VAULT_PASSWORD_FILE environment variable 7 | vault_password_file = ~/.vault-pass 8 | 9 | # Specifies how Ansible should handle Python interpreter detection on managed hosts. 10 | # 'auto_silent' is generally recommended for Ansible >= 2.10. 11 | # It automatically detects the best Python interpreter and uses it silently. 12 | interpreter_python = auto_silent 13 | 14 | # Uncomment if you need to specify which interpreter should be use in case of conflicts 15 | # ansible_python_interpreter = /usr/bin/python3 16 | 17 | #Disables SSH host key checking (use with caution!) 18 | host_key_checking = False # **REMOVE OR SET TO 'True' FOR SECURITY** 19 | 20 | [inventory] 21 | # Enables YAML plugin for inventory parsing. 22 | enable_plugins = yaml 23 | 24 | [hashi_vault_collection] 25 | # Configuration for the 'hashi_vault' Ansible collection (if you are using it). 26 | # Specifies the authentication method to use for connecting to HashiCorp Vault. 27 | auth_method = token 28 | token_path = /home/homelab -------------------------------------------------------------------------------- /ansible_docker_management/docker_delete_container.yml: -------------------------------------------------------------------------------- 1 | - name: Remove Containers and it's data 2 | hosts: docker-hosts 3 | become: true 4 | vars: 5 | container_names: 6 | - doku-wiki 7 | 8 | roles: 9 | - docker_remove_container 10 | - docker_remove_container_files 11 | -------------------------------------------------------------------------------- /ansible_docker_management/docker_deploy_containers.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy Docker containers 3 | hosts: docker-hosts 4 | # become: true 5 | roles: 6 | - role: docker_deploy_containers 7 | -------------------------------------------------------------------------------- /ansible_docker_management/docker_initial_configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure the docker environment before deploying containers 3 | hosts: docker-hosts 4 | roles: 5 | - vm_configure_docker # Install Docker 6 | - docker_install_requirements # Install the requirements to run certain modules 7 | - docker_create_network # Create the necessary networks 8 | - vm_mount_nfs_shares 9 | - vm_create_docker_non-root_user # Create a non-root user for the directory and running certain containers 10 | -------------------------------------------------------------------------------- /ansible_docker_management/docker_redeploy_containers.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy or Redeploy a containers 3 | hosts: docker-hosts 4 | become: true 5 | vars: 6 | container_names: 7 | - homepage 8 | - authentik 9 | - traefik 10 | - it-tools 11 | 12 | roles: 13 | - docker_update_containers 14 | -------------------------------------------------------------------------------- /ansible_docker_management/docker_sync_files.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Sync docker containers data 3 | hosts: docker-hosts-test 4 | # hosts: docker-hosts 5 | become: true 6 | vars: 7 | container_names: 8 | - traefik 9 | - homepage 10 | roles: 11 | - docker_sync_files 12 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/certbot/cloudflare.ini: -------------------------------------------------------------------------------- 1 | dns_cloudflare_api_token = THE_CLOUDFLARE_API_TOKEN_FOR_THE_DOMAIN -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/homepage/bookmarks.yaml: -------------------------------------------------------------------------------- 1 | - Productivity: 2 | - Github: 3 | - icon: github.png 4 | abbr: GH 5 | href: https://github.com/ 6 | - Outlook: 7 | - icon: microsoft-outlook.png 8 | href: https://outlook.office.com 9 | 10 | - Social: 11 | - Reddit: 12 | - icon: reddit.png 13 | href: https://reddit.com/ 14 | description: The front page of the internet 15 | 16 | - Entertainment: 17 | - YouTube: 18 | - icon: youtube.png 19 | abbr: YT 20 | href: https://youtube.com/ 21 | 22 | - Self Hosting: 23 | - Self Hosted: 24 | - icon: selfh-st.png 25 | href: https://selfh.st 26 | 27 | - Awsome Self Hosted: 28 | - icon: https://awesome-selfhosted.net/_static/logo.svg 29 | abbr: ASH 30 | href: https://awesome-selfhosted.net/ 31 | 32 | - AI tools: 33 | - Gemini: 34 | - icon: google-gemini 35 | href: https://gemini.google.com 36 | 37 | - Google Ai Studio: 38 | - icon: google-gemini 39 | href: https://aistudio.google.com 40 | 41 | - Notebook LM: 42 | - icon: google-gemini 43 | href: https://notebooklm.google.com 44 | 45 | - Chat GPT: 46 | - icon: chatgpt 47 | href: https://chatgpt.com 48 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/homepage/services.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - Media: 3 | - Plex: 4 | icon: plex.png 5 | href: https://plex.example.com/ 6 | description: Plex Media Server 7 | 8 | - Overseerr: 9 | icon: overseerr.png 10 | href: https://overseerr.example.com/ 11 | description: Find new Series and Movies 12 | 13 | - Sonarr: 14 | icon: sonarr.png 15 | href: https://series.example.com 16 | description: Series 17 | 18 | - Radarr: 19 | icon: radarr.png 20 | href: https://Movies.example.com/ 21 | description: Movies 22 | 23 | - Bazarr: 24 | icon: bazarr.png 25 | href: https://bazarr.example.com/ 26 | description: Subtitles 27 | 28 | - Readarr: 29 | icon: readarr.png 30 | href: https://readarr.example.com/ 31 | description: Books 32 | 33 | - Home Management: 34 | - Home Assistant: 35 | icon: home-assistant.png 36 | href: https://homeassistant.example.com 37 | description: Smart-Home Management 38 | 39 | - Nextcloud: 40 | icon: nextcloud 41 | href: https://nextcloud.example.com 42 | description: Home Cloud 43 | 44 | - Mealie: 45 | icon: mealie.png 46 | href: https://mealie.example.com 47 | description: Meal Prep and Groceries management 48 | 49 | - Networking: 50 | - DNS 1: 51 | icon: technitium.png 52 | href: https://dns1.example.com 53 | description: Main DNS Server 54 | 55 | - DNS 2: 56 | icon: technitium.png 57 | href: https://dns2.example.com 58 | description: Secondary DNS Server 59 | 60 | - OpenSense: 61 | icon: opnsense.png 62 | href: https://opnsense.example.com 63 | description: Firewall 64 | 65 | - Traefik Node 01: 66 | icon: traefik.png 67 | href: https://traefik.example.com 68 | description: Proxy 69 | 70 | - TP-Link Omada: 71 | icon: omada 72 | href: https://omada.example.com 73 | description: Omada Software Controller 74 | 75 | - Monitoring and tools: 76 | - Uptime Kuma: 77 | icon: uptime-kuma.png 78 | href: https://uptime-kuma.example.com 79 | description: Monitor Uptimes 80 | 81 | - Grafana: 82 | icon: grafana.png 83 | href: https://grafana.example.com 84 | description: Monitoring Dashboard 85 | 86 | - InfluxDB: 87 | icon: influxdb.png 88 | href: https://influxdb.example.com 89 | description: Monitoring Dashboard 90 | 91 | - IT tools: 92 | icon: it-tools.png 93 | href: https://tools.example.com 94 | description: Collection of very useful tools 95 | 96 | - Wazuh Dashboard: 97 | icon: wazuh.png 98 | href: https://wazuh-dashboard.example.com 99 | description: Dashboard for Wazuh 100 | 101 | - Infrastructure: 102 | - Proxmox: 103 | icon: proxmox.png 104 | href: https://proxmox.example.com 105 | description: Proxmox Cluster 106 | 107 | - NAS1: 108 | icon: synology-dsm 109 | href: https://nas1.example.com 110 | description: Synology NAS 111 | 112 | - NAS2: 113 | icon: synology-dsm 114 | href: https://nas2.example.com 115 | description: Synology NAS 116 | 117 | - Portainer: 118 | icon: portainer.png 119 | href: https://portainer.example.com/#!/home 120 | description: Portainer on docker-host-03 121 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/homepage/settings.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # For configuration options and examples, please see: 3 | # https://github.com/benphelps/homepage/wiki/Settings 4 | 5 | providers: 6 | openweathermap: openweathermapapikey 7 | weatherapi: weatherapiapikey 8 | 9 | headerStyle: clean 10 | hideVersion: true 11 | 12 | layout: 13 | Media: 14 | style: row 15 | columns: 3 16 | Home Management: 17 | style: row 18 | columns: 3 19 | Monitoring: 20 | style: row 21 | columns: 4 22 | Networking: 23 | style: row 24 | columns: 4 25 | Infrastructure: 26 | style: row 27 | columns: 4 28 | 29 | background: 30 | # blur: sm 31 | #saturate: 50 32 | brightness: 10 33 | opacity: 30 34 | 35 | # cardBlur: md 36 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/homepage/widgets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # For configuration options and examples, please see: 3 | # https://github.com/benphelps/homepage/wiki/Information-Widgets 4 | 5 | - resources: 6 | cpu: true 7 | memory: true 8 | - resources: 9 | expanded: true 10 | disk: 11 | - / 12 | 13 | - search: 14 | provider: google 15 | target: _blank 16 | 17 | - datetime: 18 | text_size: xl 19 | format: 20 | dateStyle: long 21 | timeStyle: short 22 | hour12: false 23 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/dynamic/10000-infrastructure.yml: -------------------------------------------------------------------------------- 1 | http: 2 | routers: 3 | dns2: 4 | entryPoints: 5 | - websecure 6 | rule: "Host(`dns2.example.com`)" 7 | service: dns2 8 | middlewares: 9 | # - "forwardAuth-authentik" 10 | - "default-headers" 11 | 12 | proxmox: 13 | entryPoints: 14 | - websecure 15 | rule: "Host(`proxmox.example.com`)" 16 | service: proxmox 17 | middlewares: 18 | # - "forwardAuth-authentik" 19 | - "default-headers" 20 | 21 | nas1: 22 | entryPoints: 23 | - websecure 24 | rule: "Host(`nas1.example.com`)" 25 | middlewares: 26 | - "default-headers" 27 | tls: {} 28 | service: nas1 29 | 30 | nas2: 31 | entryPoints: 32 | - websecure 33 | rule: "Host(`nas2.example.com`)" 34 | middlewares: 35 | - "default-headers" 36 | tls: {} 37 | service: nas2 38 | 39 | homeassistant: 40 | # For Homeassistant config, check: https://www.home-assistant.io/integrations/http/#reverse-proxies 41 | # This relies on Homeassistant using http. No certs are needed in the Homeassistant config. 42 | entryPoints: 43 | - websecure 44 | rule: "Host(`homeassistant.example.com`)" 45 | middlewares: 46 | - "default-headers" 47 | tls: {} 48 | service: homeassistant 49 | 50 | services: 51 | dns2: 52 | loadBalancer: 53 | servers: 54 | - url: http://10.54.54.54:5380/ 55 | proxmox: 56 | loadBalancer: 57 | servers: 58 | - url: https://10.0.10.11:8006/ 59 | 60 | nas1: 61 | loadBalancer: 62 | servers: 63 | - url: https://10.0.10.20:5001 64 | passHostHeader: true 65 | 66 | nas2: 67 | loadBalancer: 68 | servers: 69 | - url: https://10.0.10.21:5001 70 | passHostHeader: true 71 | 72 | homeassistant: 73 | loadBalancer: 74 | servers: 75 | - url: http://10.0.14.10:8123 76 | passHostHeader: true 77 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/dynamic/10200-monitoring.yml: -------------------------------------------------------------------------------- 1 | http: 2 | routers: 3 | grafana: 4 | entryPoints: 5 | - websecure 6 | rule: "Host(`grafana.example.com`)" 7 | service: grafana 8 | middlewares: 9 | # - "forwardAuth-authentik" 10 | - "default-headers" 11 | 12 | uptime-kuma: 13 | entryPoints: 14 | - websecure 15 | rule: "Host(`uptime-kuma.example.com`)" 16 | service: uptime-kuma 17 | middlewares: 18 | # - "forwardAuth-authentik" 19 | - "default-headers" 20 | 21 | influxdb: 22 | entryPoints: 23 | - websecure 24 | rule: "Host(`influxdb.example.com`)" 25 | service: influxdb 26 | middlewares: 27 | # - "forwardAuth-authentik" 28 | - "default-headers" 29 | tools: 30 | entryPoints: 31 | - websecure 32 | rule: "Host(`tools.example.com`)" 33 | service: tools 34 | middlewares: 35 | # - "forwardAuth-authentik" 36 | - "default-headers" 37 | wazuh: 38 | entryPoints: 39 | - websecure 40 | rule: "Host(`wazuh-dashboard.example.com`)" 41 | service: wazuh 42 | middlewares: 43 | # - "forwardAuth-authentik" 44 | - "default-headers" 45 | services: 46 | uptime-kuma: 47 | loadBalancer: 48 | servers: 49 | - url: http://10.0.12.11:3001/ 50 | grafana: 51 | loadBalancer: 52 | servers: 53 | - url: http://10.0.12.12:3000/ 54 | influxdb: 55 | loadBalancer: 56 | servers: 57 | - url: http://10.0.12.13:8086/ 58 | tools: 59 | loadBalancer: 60 | servers: 61 | - url: http://10.0.12.14:80/ 62 | 63 | wazuh: 64 | loadBalancer: 65 | servers: 66 | - url: https://10.0.12.10:5601/ -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/dynamic/10300-management.yml: -------------------------------------------------------------------------------- 1 | http: 2 | routers: 3 | omada: 4 | entryPoints: 5 | - websecure 6 | rule: "Host(`omada.example.com`)" 7 | middlewares: 8 | - "default-headers" 9 | tls: {} 10 | service: omada 11 | 12 | portainer: 13 | entryPoints: 14 | - websecure 15 | rule: "Host(`portainer.example.com`)" 16 | service: portainer 17 | middlewares: 18 | # - "forwardAuth-authentik" 19 | - "default-headers" 20 | 21 | services: 22 | omada: 23 | loadBalancer: 24 | servers: 25 | - url: https://10.0.12.10:8043 26 | passHostHeader: true 27 | 28 | portainer: 29 | loadBalancer: 30 | servers: 31 | - url: https://10.0.11.13:9443/ 32 | passHostHeader: true 33 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/dynamic/10400-documents.yml: -------------------------------------------------------------------------------- 1 | http: 2 | routers: 3 | homepage: 4 | entryPoints: 5 | - websecure 6 | rule: "Host(`homepage.example.com`)" 7 | service: homepage 8 | middlewares: 9 | # - "forwardAuth-authentik" 10 | - "default-headers" 11 | 12 | services: 13 | homepage: 14 | loadBalancer: 15 | servers: 16 | - url: http://10.0.12.40:3000/ 17 | passHostHeader: true 18 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/dynamic/10500-download.yml: -------------------------------------------------------------------------------- 1 | http: 2 | routers: 3 | flaresolverr: 4 | entryPoints: 5 | - websecure 6 | rule: "Host(`flaresolverr.example.com`)" 7 | service: flaresolverr 8 | middlewares: 9 | # - "forwardAuth-authentik" 10 | - "default-headers" 11 | prowlarr: 12 | entryPoints: 13 | - websecure 14 | rule: "Host(`prowlarr.example.com`)" 15 | service: prowlarr 16 | middlewares: 17 | # - "forwardAuth-authentik" 18 | - "default-headers" 19 | overseerr: 20 | entryPoints: 21 | - websecure 22 | rule: "Host(`overseerr.example.com`)" 23 | service: overseerr 24 | middlewares: 25 | # - "forwardAuth-authentik" 26 | - "default-headers" 27 | 28 | services: 29 | flaresolverr: 30 | loadBalancer: 31 | servers: 32 | - url: http://10.0.13.50:8191/ 33 | prowlarr: 34 | loadBalancer: 35 | servers: 36 | - url: http://10.0.13.52:9696/ 37 | overseerr: 38 | loadBalancer: 39 | servers: 40 | - url: http://10.0.13.51:5055/ 41 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/dynamic/http-routers.yml: -------------------------------------------------------------------------------- 1 | http: 2 | routers: 3 | traefik-dashboard: 4 | rule: "Host(`traefik.example.com`)" 5 | service: api@internal 6 | entryPoints: 7 | - websecure 8 | middlewares: "default-headers" 9 | tls: 10 | options: "modern" 11 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/dynamic/legacy.yml: -------------------------------------------------------------------------------- 1 | http: 2 | routers: 3 | plex: 4 | entryPoints: 5 | - websecure 6 | rule: "Host(`plex.example.com`)" 7 | middlewares: 8 | - "default-headers" 9 | tls: {} 10 | service: plex 11 | 12 | jellyfin: 13 | entryPoints: 14 | - websecure 15 | rule: "Host(`jellyfin.example.com`)" 16 | middlewares: 17 | - "default-headers" 18 | tls: {} 19 | service: jellyfin 20 | 21 | movies: 22 | entryPoints: 23 | - websecure 24 | rule: "Host(`movies.example.com`)" 25 | middlewares: 26 | - "default-headers" 27 | tls: {} 28 | service: movies 29 | 30 | series: 31 | entryPoints: 32 | - websecure 33 | rule: "Host(`series.example.com`)" 34 | middlewares: 35 | - "default-headers" 36 | tls: {} 37 | service: series 38 | 39 | readarr: 40 | entryPoints: 41 | - websecure 42 | rule: "Host(`readarr.example.com`)" 43 | middlewares: 44 | - "default-headers" 45 | tls: {} 46 | service: readarr 47 | 48 | bazarr: 49 | entryPoints: 50 | - websecure 51 | rule: "Host(`bazarr.example.com`)" 52 | middlewares: 53 | - "default-headers" 54 | tls: {} 55 | service: bazarr 56 | 57 | nextcloud: 58 | entryPoints: 59 | - websecure 60 | rule: "Host(`nextcloud.example.com`)" 61 | middlewares: 62 | - nextcloud-chain 63 | - "default-headers" 64 | tls: {} 65 | service: nextcloud 66 | 67 | services: 68 | plex: 69 | loadBalancer: 70 | servers: 71 | - url: "https://10.0.0.60:32400" 72 | passHostHeader: true 73 | 74 | jellyfin: 75 | loadBalancer: 76 | servers: 77 | - url: "http://10.0.0.60:8096" 78 | passHostHeader: true 79 | 80 | movies: 81 | loadBalancer: 82 | servers: 83 | - url: "http://10.0.0.60:7878" 84 | passHostHeader: true 85 | 86 | series: 87 | loadBalancer: 88 | servers: 89 | - url: "http://10.0.0.60:8989" 90 | passHostHeader: true 91 | readarr: 92 | loadBalancer: 93 | servers: 94 | - url: "http://10.0.0.60:8787" 95 | passHostHeader: true 96 | bazarr: 97 | loadBalancer: 98 | servers: 99 | - url: "http://10.0.0.60:6767" 100 | passHostHeader: true 101 | 102 | nextcloud: 103 | loadBalancer: 104 | servers: 105 | - url: "http://10.0.0.60:11000" 106 | passHostHeader: true 107 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/dynamic/middlewares.yml: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | default-headers: 4 | headers: 5 | frameDeny: true 6 | browserXssFilter: true 7 | contentTypeNosniff: true 8 | forceSTSHeader: true 9 | stsIncludeSubdomains: true 10 | stsPreload: true 11 | stsSeconds: 15552000 12 | customFrameOptionsValue: SAMEORIGIN 13 | customRequestHeaders: 14 | X-Forwarded-Proto: https 15 | Strict-Transport-Security: "max-age=63072000" 16 | 17 | redirect-to-https: 18 | redirectScheme: 19 | scheme: https 20 | permanent: true 21 | 22 | forwardAuth-authentik: 23 | forwardAuth: 24 | address: "http://authentik_server:9000/outpost.goauthentik.io/auth/traefik" 25 | trustForwardHeader: true 26 | authResponseHeaders: 27 | - X-authentik-username 28 | - X-authentik-groups 29 | - X-authentik-email 30 | - X-authentik-name 31 | - X-authentik-uid 32 | - X-authentik-jwt 33 | - X-authentik-meta-jwks 34 | - X-authentik-meta-outpost 35 | - X-authentik-meta-provider 36 | - X-authentik-meta-app 37 | - X-authentik-meta-version 38 | 39 | nextcloud-secure-headers: 40 | headers: 41 | hostsProxyHeaders: 42 | - "X-Forwarded-Host" 43 | referrerPolicy: "same-origin" 44 | nextcloud-chain: 45 | chain: 46 | middlewares: 47 | # - ... (e.g. rate limiting middleware) 48 | - redirect-to-https 49 | - nextcloud-secure-headers 50 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/dynamic/tcp.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/marwan-belgueddab/homelab/e7dece17a9695ab1c9b5a3479a71db6122307ce8/ansible_docker_management/files/containers/traefik/dynamic/tcp.yml -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/dynamic/tls.yml: -------------------------------------------------------------------------------- 1 | tls: 2 | stores: 3 | default: 4 | defaultCertificate: 5 | certFile: /letsencrypt/live/example.com/fullchain.pem 6 | keyFile: /letsencrypt/live/example.com/privkey.pem 7 | certificates: 8 | - certFile: /letsencrypt/live/example.com/fullchain.pem 9 | keyFile: /letsencrypt/live/example.com/privkey.pem 10 | stores: 11 | - default 12 | options: 13 | modern: 14 | minVersion: VersionTLS13 15 | curvePreferences: ["X25519", "CurveP256", "CurveP384"] 16 | -------------------------------------------------------------------------------- /ansible_docker_management/files/containers/traefik/traefik.yml: -------------------------------------------------------------------------------- 1 | global: 2 | checkNewVersion: false 3 | sendAnonymousUsage: false 4 | serversTransport: 5 | insecureSkipVerify: true 6 | 7 | entryPoints: 8 | web: 9 | address: ":80" 10 | http: 11 | redirections: 12 | entryPoint: 13 | to: websecure 14 | scheme: https 15 | permanent: true 16 | 17 | websecure: 18 | address: ":443" 19 | http: 20 | tls: 21 | domains: 22 | - main: example.com 23 | sans: 24 | - "*.example.com" 25 | 26 | providers: 27 | file: 28 | directory: "/dynamic" 29 | watch: true 30 | docker: 31 | watch: true 32 | defaultRule: 'Host(`{{ index .Labels "com.docker.compose.service"}}.example.com`)' 33 | endpoint: "tcp://docker-socket-proxy:2375" 34 | exposedByDefault: false 35 | 36 | api: 37 | dashboard: true 38 | insecure: true 39 | log: 40 | level: DEBUG 41 | -------------------------------------------------------------------------------- /ansible_docker_management/group_vars/all/vars: -------------------------------------------------------------------------------- 1 | ########################### 2 | ## GENERAL CONFIGURATION ## 3 | ########################### 4 | timezone: "America/Toronto" 5 | locale: "en_US.UTF-8" 6 | # DNS settings 7 | dns_servers: 8 | - "10.53.53.53" 9 | - "1.1.1.1" 10 | domain: "example.com" 11 | dns_over_tls_servers: 12 | - "tls://10.53.53.53" 13 | - "tls://1.1.1.1" 14 | dns_over_https_servers: 15 | - "https://dns1.{{ domain }}" 16 | - "https://dns2.{{ domain }}" 17 | 18 | # VLAN Definitions - This is used when creating the SDN VLANs on Proxmox 19 | gateway_vlan: "10.0.0.0/24" 20 | management_vlan: "10.0.10.0/24" # proxmox host, nas, ansible control node 21 | compute_vlan: "10.0.11.0/24" # virtual machine, docker host etc 22 | service_restricted_vlan: "10.0.12.0/24" # Services with no access to internet at all 23 | service_external_vlan: "10.0.13.0/24" # Services with internet access 24 | home_assistant_vlan: "10.0.14.0/28" # Main interface for Home Assistant 25 | trusted_vlan: "10.0.15.0/24" # Laptop, PC, Phone 26 | multimedia_vlan: "10.0.16.0/24" # TV, Plex 27 | iot_vlan: "10.0.17.0/24" # IoT devices without internet 28 | 29 | # VLAN IDs - Used for SDN Vlans 30 | vlan_definitions: 31 | - id: 10 32 | name: management_vlan 33 | - id: 11 34 | name: compute_vlan 35 | - id: 12 36 | name: service_restricted_vlan 37 | - id: 13 38 | name: service_external_vlan 39 | - id: 14 40 | name: home_assistant_vlan 41 | - id: 15 42 | name: trusted_vlan 43 | - id: 16 44 | name: multimedia_vlan 45 | - id: 17 46 | name: iot_vlan 47 | 48 | ################### 49 | ## VM MANAGEMENT ## 50 | ################### 51 | 52 | ansible_user: "{{ vm_ansible_user }}" # As defined in the ansible_proxmox_management playbooks, or manualy if you have already manually created a user for ansible to use 53 | ansible_ssh_private_key_file: "{{ vm_ansible_ssh_private_key_file }}" # Only uses ssh keys 54 | nfs_server: "10.0.10.50" # Change to put the ip address for the nfs_server 55 | nfs_share_path: "/volume1/letsencrypt" # This is the path on the nfs_server, volume1 for synology for example, can be anything you want 56 | nfs_share_mount_point: "/mnt/nfs/letsencrypt" # This is the path on the docker-host, the virtual machine. 57 | nfs_docker_backup_path: "/volume1/docker-backup" 58 | nfs_docker_backup_mount_point: "/mnt/nfs/docker-backup" 59 | 60 | ####################### 61 | ## Docker MANAGEMENT ## 62 | ####################### 63 | 64 | docker-data: /opt/docker-data/ # directory on the docker-host where the containers files are saved 65 | docker_networks: 66 | - name: proxy 67 | driver: bridge 68 | subnet: 10.100.0.0/16 69 | gateway: 10.100.0.1 70 | iprange: 10.100.0.0/16 71 | 72 | - name: authentik_backend 73 | driver: bridge 74 | subnet: 10.111.0.0/24 75 | gateway: 10.111.0.1 76 | iprange: 10.111.0.0/24 77 | 78 | - name: gateway_vlan 79 | driver: macvlan 80 | subnet: "{{ gateway_vlan }}" 81 | gateway: 10.0.0.1 82 | iprange: "{{ gateway_vlan }}" 83 | options: 84 | parent: eth0.1 # If for some reasons you want to connect to vlan1, change to accomodate your setup 85 | 86 | - name: service_restricted_vlan 87 | driver: macvlan 88 | subnet: "{{ service_restricted_vlan }}" 89 | gateway: 10.0.12.1 90 | iprange: "{{ service_restricted_vlan }}" 91 | options: 92 | parent: "ens19" # Make sure that you have defined the right bridge on the virtual machine configuration as net1. 93 | 94 | - name: service_external_vlan 95 | driver: macvlan 96 | subnet: "{{ service_external_vlan }}" 97 | gateway: 10.0.13.1 98 | iprange: "{{ service_external_vlan }}" 99 | options: 100 | parent: "ens20" # Make sure that you have defined the right bridge on the virtual machine configuration as net1. 101 | 102 | - name: multimedia_vlan 103 | driver: macvlan 104 | subnet: "{{ multimedia_vlan }}" 105 | gateway: 10.0.16.1 106 | iprange: "{{ multimedia_vlan }}" 107 | options: 108 | parent: "ens21" 109 | -------------------------------------------------------------------------------- /ansible_docker_management/group_vars/all/vault: -------------------------------------------------------------------------------- 1 | # ENSURE TO MODIFY THE VALUES TO YOUR VARIABLES AND YOUR LIKING 2 | # Add here values that you want secret and encrypt it 3 | 4 | 5 | ################### 6 | ## VM MANAGEMENT ## 7 | ################### 8 | 9 | ansible_user: "{{ vm_ansible_user }}" 10 | ansible_ssh_private_key_file: "{{ vm_ansible_ssh_private_key_file }}" 11 | nfs_server: "10.0.10.50" 12 | nfs_share_path: "/volume1/letsencrypt" 13 | nfs_share_mount_point: "/mnt/nfs/letsencrypt" 14 | nfs_docker_backup_path: "/volume1/docker-backup" 15 | nfs_docker_backup_mount_point: "/mnt/nfs/docker-backup" -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-01/authentik/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | postgresql: 4 | image: docker.io/library/postgres:16-alpine 5 | restart: unless-stopped 6 | healthcheck: 7 | test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"] 8 | start_period: 20s 9 | interval: 30s 10 | retries: 5 11 | timeout: 5s 12 | volumes: 13 | - /opt/docker-data/authentik/postgresql:/var/lib/postgresql/data 14 | environment: 15 | POSTGRES_PASSWORD: ${PG_PASS} 16 | POSTGRES_USER: ${PG_USER} 17 | POSTGRES_DB: ${PG_DB:-authentik} 18 | 19 | networks: 20 | - authentik_backend 21 | 22 | redis: 23 | image: docker.io/library/redis:alpine 24 | command: --save 60 1 --loglevel warning 25 | restart: unless-stopped 26 | healthcheck: 27 | test: ["CMD-SHELL", "redis-cli ping | grep PONG"] 28 | start_period: 20s 29 | interval: 30s 30 | retries: 5 31 | timeout: 3s 32 | volumes: 33 | - /opt/docker-data/authentik/redis:/data 34 | 35 | networks: 36 | - authentik_backend 37 | 38 | server: 39 | image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2024.12.2} 40 | restart: unless-stopped 41 | command: server 42 | environment: 43 | AUTHENTIK_REDIS__HOST: redis 44 | AUTHENTIK_POSTGRESQL__HOST: postgresql 45 | AUTHENTIK_POSTGRESQL__USER: ${PG_USER} 46 | AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik} 47 | AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS} 48 | AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY} 49 | AUTHENTIK_BOOTSTRAP_PASSWORD: ${AUTHENTIK_BOOTSTRAP_PASSWORD} 50 | AUTHENTIK_BOOTSTRAP_TOKEN: ${AUTHENTIK_BOOTSTRAP_TOKEN} 51 | AUTHENTIK_ERROR_REPORTING__ENABLED: false 52 | AUTHENTIK_DISABLE_UPDATE_CHECK: true 53 | AUTHENTIK_DISABLE_STARTUP_ANALYTICS: true 54 | volumes: 55 | - /opt/docker-data/authentik/media:/media 56 | - /opt/docker-data/authentik/custom-templates:/templates 57 | depends_on: 58 | postgresql: 59 | condition: service_healthy 60 | redis: 61 | condition: service_healthy 62 | networks: 63 | - proxy 64 | - authentik_backend 65 | 66 | labels: 67 | traefik.enable: true 68 | traefik.http.routers.authentik.entrypoints: websecure 69 | traefik.docker.network: proxy 70 | traefik.http.routers.authentik.rule: Host(`auth.example.com`) 71 | traefik.http.services.authentik.loadbalancer.server.port: 9000 72 | traefik.http.routers.authentik.middlewares: default-headers@file 73 | 74 | worker: 75 | image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2024.12.2} 76 | restart: unless-stopped 77 | command: worker 78 | environment: 79 | AUTHENTIK_REDIS__HOST: redis 80 | AUTHENTIK_POSTGRESQL__HOST: postgresql 81 | AUTHENTIK_POSTGRESQL__USER: ${PG_USER} 82 | AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik} 83 | AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS} 84 | AUTHENTIK_BOOTSTRAP_PASSWORD: ${AUTHENTIK_BOOTSTRAP_PASSWORD} 85 | AUTHENTIK_BOOTSTRAP_TOKEN: ${AUTHENTIK_BOOTSTRAP_TOKEN} 86 | AUTHENTIK_SECRET_KEY: ${AUTHENTIK_SECRET_KEY} 87 | AUTHENTIK_ERROR_REPORTING__ENABLED: false 88 | AUTHENTIK_DISABLE_UPDATE_CHECK: true 89 | AUTHENTIK_DISABLE_STARTUP_ANALYTICS: true 90 | user: root 91 | volumes: 92 | - /opt/docker-data/authentik/media:/media 93 | - /opt/docker-data/authentik/certs:/certs 94 | - /opt/docker-data/authentik/custom-templates:/templates 95 | depends_on: 96 | postgresql: 97 | condition: service_healthy 98 | redis: 99 | condition: service_healthy 100 | networks: 101 | - authentik_backend 102 | 103 | networks: 104 | proxy: 105 | external: true 106 | authentik_backend: 107 | external: true 108 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-01/dns1/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | dns1: 3 | container_name: dns1 4 | hostname: dns1.example.com 5 | image: technitium/dns-server:latest 6 | user: 1000:1000 7 | security_opt: 8 | - no-new-privileges:true 9 | volumes: 10 | - /opt/docker-data/dns1:/etc/dns 11 | restart: always 12 | sysctls: 13 | - net.ipv4.ip_local_port_range=1024 65000 14 | networks: 15 | gateway_vlan: 16 | ipv4_address: 10.53.53.53 17 | labels: 18 | traefik.enable: true 19 | traefik.docker.network: proxy # Used for Traefik to find the container while Technitium is exposed on a different network 20 | traefik.http.routers.dns1.entrypoints: websecure 21 | traefik.http.services.dns1.loadbalancer.server.port: 5380 22 | traefik.http.routers.dns1.rule: Host(`dns1.example.com`) 23 | traefik.http.routers.dns1.middlewares: default-headers@file 24 | 25 | environment: 26 | - DNS_SERVER_DOMAIN=dns1.example.com #The primary domain name used by this DNS Server to identify itself. 27 | 28 | networks: 29 | gateway_vlan: 30 | external: true 31 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-01/portainer-agent/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | agent: 3 | ports: 4 | - 9001:9001 5 | container_name: portainer-agent 6 | restart: always 7 | volumes: 8 | - /var/run/docker.sock:/var/run/docker.sock 9 | - /var/lib/docker/volumes:/var/lib/docker/volumes 10 | image: portainer/agent:latest 11 | networks: 12 | - proxy 13 | 14 | networks: 15 | proxy: 16 | external: true 17 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-01/traefik/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | traefik: 3 | image: traefik:v3.2 4 | container_name: traefik 5 | restart: always 6 | security_opt: 7 | - no-new-privileges:true 8 | networks: 9 | service_restricted_vlan: 10 | ipv4_address: 10.0.12.10 11 | proxy: 12 | 13 | volumes: 14 | - /opt/docker-data/traefik/traefik.yml:/etc/traefik/traefik.yml:ro 15 | - /opt/docker-data/traefik/dynamic:/dynamic:ro 16 | - /mnt/nfs/letsencrypt/:/letsencrypt/:ro 17 | environment: 18 | - DOCKER_HOST=docker-socket-proxy 19 | depends_on: 20 | - docker-socket-proxy 21 | 22 | docker-socket-proxy: 23 | image: tecnativa/docker-socket-proxy:latest 24 | container_name: docker-socket-proxy 25 | restart: always 26 | networks: 27 | - proxy 28 | environment: 29 | - CONTAINERS=1 30 | - SERVICES=1 31 | - NETWORKS=1 32 | volumes: 33 | - /var/run/docker.sock:/var/run/docker.sock:ro 34 | 35 | networks: 36 | proxy: 37 | external: true 38 | internal: true 39 | service_restricted_vlan: 40 | external: true -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-02/grafana/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | grafana: 3 | container_name: grafana 4 | image: grafana/grafana-oss:latest 5 | restart: unless-stopped 6 | volumes: 7 | - grafana:/var/lib/grafana 8 | # - grafana.ini:/etc/grafana/grafana.ini 9 | 10 | environment: 11 | GF_SERVER_ROOT_URL: https://grafana.example.com 12 | 13 | user: 2000:2000 14 | networks: 15 | service_restricted_vlan: 16 | ipv4_address: 10.0.12.12 17 | 18 | networks: 19 | service_restricted_vlan: 20 | external: true 21 | 22 | volumes: 23 | grafana: 24 | name: grafana 25 | driver: local 26 | driver_opts: 27 | type: none 28 | o: bind 29 | device: /opt/docker-data/grafana 30 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-02/influxdb/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | influxdb2: 3 | image: influxdb:2 4 | container_name: influxdb 5 | restart: unless-stopped 6 | environment: 7 | - DOCKER_INFLUXDB_INIT_MODE=setup 8 | - DOCKER_INFLUXDB_INIT_ADMIN_TOKEN 9 | - DOCKER_INFLUXDB_INIT_USERNAME 10 | - DOCKER_INFLUXDB_INIT_PASSWORD 11 | - DOCKER_INFLUXDB_INIT_ORG 12 | - DOCKER_INFLUXDB_INIT_BUCKET 13 | 14 | volumes: 15 | - influxdb-data:/var/lib/influxdb2 16 | - influxdb-config:/etc/influxdb2 17 | user: 1000:1000 18 | networks: 19 | service_restricted_vlan: 20 | ipv4_address: 10.0.12.13 21 | 22 | networks: 23 | service_restricted_vlan: 24 | external: true 25 | volumes: 26 | influxdb-data: 27 | name: influxdb-data 28 | driver: local 29 | driver_opts: 30 | type: none 31 | o: bind 32 | device: /opt/docker-data/influxdb/data 33 | influxdb-config: 34 | name: influxdb-config 35 | driver: local 36 | driver_opts: 37 | type: none 38 | o: bind 39 | device: /opt/docker-data/influxdb/config -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-02/it-tools/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | it-tools: 4 | container_name: it-tools 5 | restart: unless-stopped 6 | image: ghcr.io/corentinth/it-tools:latest 7 | # user: 1000:1000 8 | networks: 9 | service_restricted_vlan: 10 | ipv4_address: 10.0.12.14 11 | 12 | networks: 13 | service_restricted_vlan: 14 | external: true 15 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-02/portainer-agent/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | agent: 3 | ports: 4 | - 9001:9001 5 | container_name: portainer-agent 6 | restart: always 7 | volumes: 8 | - /var/run/docker.sock:/var/run/docker.sock 9 | - /var/lib/docker/volumes:/var/lib/docker/volumes 10 | image: portainer/agent:latest 11 | networks: 12 | - proxy 13 | 14 | networks: 15 | proxy: 16 | external: true -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-02/uptime-kuma/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | uptime-kuma: 3 | container_name: uptime-kuma 4 | image: louislam/uptime-kuma:1 5 | restart: always 6 | volumes: 7 | - uptime-kuma:/app/data:rw 8 | 9 | # user: 2000:2000 10 | networks: 11 | service_restricted_vlan: 12 | ipv4_address: 10.0.12.11 13 | 14 | networks: 15 | service_restricted_vlan: 16 | external: true 17 | 18 | volumes: 19 | uptime-kuma: 20 | name: uptime-kuma 21 | driver: local 22 | driver_opts: 23 | type: none 24 | o: bind 25 | device: /opt/docker-data/uptime-kuma 26 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-03/certbot/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | certbot: 3 | image: certbot/dns-cloudflare:latest 4 | container_name: certbot 5 | volumes: 6 | - /mnt/nfs/letsencrypt:/etc/letsencrypt 7 | - /opt/docker-data/certbot/cloudflare.ini:/etc/cloudflare.ini:ro 8 | - /opt/docker-data/certbot:/var/lib/letsencrypt 9 | - /opt/docker-data/certbot:/var/log/letsencrypt 10 | entrypoint: > 11 | sh -c " 12 | certbot certonly \ 13 | --dns-cloudflare \ 14 | --dns-cloudflare-credentials /etc/cloudflare.ini \ 15 | --dns-cloudflare-propagation-seconds 30 \ 16 | --agree-tos \ 17 | --non-interactive \ 18 | --email admin@example.com \ 19 | -d example.com \ 20 | -d *.example.com \ 21 | && \ 22 | sleep infinity" 23 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-03/dns2/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | dns2: 3 | container_name: dns2 4 | hostname: dns2.example.com 5 | image: technitium/dns-server:latest 6 | user: 1000:1000 7 | security_opt: 8 | - no-new-privileges:true 9 | volumes: 10 | - /opt/docker-data/dns2:/etc/dns 11 | restart: always 12 | sysctls: 13 | - net.ipv4.ip_local_port_range=1024 65000 14 | networks: 15 | gateway_vlan: 16 | ipv4_address: 10.54.54.54 17 | 18 | environment: 19 | - DNS_SERVER_DOMAIN=dns2.example.com #The primary domain name used by this DNS Server to identify itself. 20 | networks: 21 | gateway_vlan: 22 | external: true 23 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-03/omada/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | omada: 3 | container_name: omada 4 | image: mbentley/omada-controller:5.15 5 | stop_grace_period: 60s 6 | environment: 7 | - PUID=508 8 | - PGID=508 9 | - MANAGE_HTTP_PORT=8088 10 | - MANAGE_HTTPS_PORT=8043 11 | - PORTAL_HTTP_PORT=8088 12 | - PORTAL_HTTPS_PORT=8843 13 | - PORT_APP_DISCOVERY=27001 14 | - PORT_ADOPT_V1=29812 15 | - PORT_UPGRADE_V1=29813 16 | - PORT_MANAGER_V1=29811 17 | - PORT_MANAGER_V2=29814 18 | - PORT_DISCOVERY=29810 19 | - SHOW_SERVER_LOGS=true 20 | - SHOW_MONGODB_LOGS=false 21 | # - SSL_CERT_NAME=tls.crt 22 | # - SSL_KEY_NAME=tls.key 23 | - TZ=America/Toronto 24 | 25 | volumes: 26 | - /opt/docker-data/omada/data:/opt/tplink/EAPController/data 27 | - /opt/docker-data/omada/logs:/opt/tplink/EAPController/logs 28 | restart: unless-stopped 29 | networks: 30 | service_restricted_vlan: 31 | ipv4_address: 10.0.13.10 32 | 33 | networks: 34 | service_restricted_vlan: 35 | external: true 36 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-03/portainer-agent/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | agent: 3 | ports: 4 | - 9001:9001 5 | container_name: portainer-agent 6 | restart: always 7 | volumes: 8 | - /var/run/docker.sock:/var/run/docker.sock 9 | - /var/lib/docker/volumes:/var/lib/docker/volumes 10 | image: portainer/agent:latest 11 | networks: 12 | - proxy 13 | 14 | networks: 15 | proxy: 16 | external: true -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-03/portainer/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | portainer-ce: 3 | ports: 4 | - "9443:9443" 5 | container_name: portainer 6 | restart: always 7 | volumes: 8 | - "/var/run/docker.sock:/var/run/docker.sock" 9 | - "/opt/docker-data/portainer:/data" 10 | image: "portainer/portainer-ce:latest" 11 | networks: 12 | - proxy 13 | 14 | networks: 15 | proxy: 16 | external: true 17 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-04/homepage/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | homepage: 3 | image: ghcr.io/gethomepage/homepage:latest 4 | container_name: homepage 5 | volumes: 6 | - /opt/docker-data/homepage:/app/config 7 | restart: unless-stopped 8 | user: 2000:2000 9 | networks: 10 | service_restricted_vlan: 11 | ipv4_address: 10.0.12.21 12 | 13 | networks: 14 | service_restricted_vlan: 15 | external: true 16 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-04/portainer-agent/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | agent: 3 | ports: 4 | - 9001:9001 5 | container_name: portainer-agent 6 | restart: always 7 | volumes: 8 | - /var/run/docker.sock:/var/run/docker.sock 9 | - /var/lib/docker/volumes:/var/lib/docker/volumes 10 | image: portainer/agent:latest 11 | networks: 12 | - proxy 13 | 14 | networks: 15 | proxy: 16 | external: true -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-05/flaresolverr/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | flaresolverr: 3 | # DockerHub mirror flaresolverr/flaresolverr:latest 4 | image: ghcr.io/flaresolverr/flaresolverr:latest 5 | container_name: flaresolverr 6 | environment: 7 | - LOG_LEVEL=info 8 | # - LOG_HTML=${LOG_HTML:-false} 9 | # - CAPTCHA_SOLVER=${CAPTCHA_SOLVER:-none} 10 | - TZ=America/Toronto 11 | 12 | user: 1000:1000 13 | restart: unless-stopped 14 | networks: 15 | service_external_vlan: 16 | ipv4_address: 10.0.13.50 17 | 18 | networks: 19 | service_external_vlan: 20 | external: true 21 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-05/overseerr/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | overseerr: 3 | image: sctx/overseerr:latest 4 | container_name: overseerr 5 | environment: 6 | - LOG_LEVEL=debug 7 | - TZ=America/Toronto 8 | user: 2000:2000 9 | volumes: 10 | - /opt/docker-data/overseerr/config:/app/config 11 | restart: unless-stopped 12 | networks: 13 | service_external_vlan: 14 | ipv4_address: 10.0.13.51 15 | 16 | networks: 17 | service_external_vlan: 18 | external: true 19 | -------------------------------------------------------------------------------- /ansible_docker_management/host_vars/docker-host-05/portainer-agent/docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | agent: 3 | ports: 4 | - 9001:9001 5 | container_name: portainer-agent 6 | restart: always 7 | volumes: 8 | - /var/run/docker.sock:/var/run/docker.sock 9 | - /var/lib/docker/volumes:/var/lib/docker/volumes 10 | image: portainer/agent:latest 11 | networks: 12 | - proxy 13 | 14 | networks: 15 | proxy: 16 | external: true -------------------------------------------------------------------------------- /ansible_docker_management/inventory.yml: -------------------------------------------------------------------------------- 1 | docker-hosts: 2 | hosts: 3 | docker-host-01: 4 | ansible_host: 10.0.11.11 5 | docker-host-02: 6 | ansible_host: 10.0.11.12 7 | docker-host-03: 8 | ansible_host: 10.0.11.13 9 | docker-host-04: 10 | ansible_host: 10.0.11.14 11 | docker-host-05: 12 | ansible_host: 10.0.11.15 13 | 14 | docker-hosts-test: 15 | hosts: 16 | docker-host-test-01: 17 | ansible_host: 10.0.11.199 18 | docker-host-test-02: 19 | ansible_host: 10.0.11.200 20 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_create_network/README.md: -------------------------------------------------------------------------------- 1 | # docker_create_network 2 | 3 | This role creates Docker networks defined in the `docker_networks` variable. 4 | 5 | ## Purpose 6 | 7 | This role simplifies the process of creating Docker networks, ensuring that containers can communicate with each other and external resources as needed. It supports various network drivers and allows for custom configurations. 8 | 9 | ## Tasks Performed 10 | 11 | - Creates Docker networks based on the provided definitions. 12 | 13 | ## Variables 14 | 15 | * **`docker_networks`** (*Required*): A list of dictionaries, where each dictionary defines a Docker network. This variable is defined in `group_vars/all/vars`. Each network definition can include the following keys: 16 | * **`name`** (*Required*): The name of the network. 17 | * **`driver`** (*Required*): The network driver (e.g., `bridge`, `macvlan`, `overlay`). 18 | * **`subnet`** (*Optional*): The subnet for the network (e.g., `10.0.0.0/16`). 19 | * **`gateway`** (*Optional*): The gateway for the network (e.g., `10.0.10.01`). 20 | * **`iprange`** (*Optional*): The IP range for the network (e.g., `10.0.10.0/24`). 21 | * **`options`** (*Optional*): Additional driver-specific options. For example, the macvlan driver needs a `parent` defined to tell which interface to attach to for macvlan networking. This is important and a working example is available in the `group_vars/all/vars`. Example: 22 | 23 | ```yaml 24 | options: 25 | parent: eth0.10 26 | ``` 27 | 28 | 29 | 30 | ## Important Notes 31 | 32 | * This role requires the `community.docker` Ansible collection. 33 | * Ensure that the chosen network driver and options are compatible with your Docker environment. Review the `docker_networks` variable in `group_vars/all/vars` and adjust network definitions according to your requirements. -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_create_network/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create Docker networks 4 | community.docker.docker_network: 5 | name: "{{ network.name }}" 6 | driver: "{{ network.driver }}" 7 | enable_ipv6: false 8 | ipam_config: 9 | - subnet: "{{ network.subnet | default(omit) }}" 10 | gateway: "{{ network.gateway | default(omit) }}" 11 | iprange: "{{ network.iprange | default(omit) }}" 12 | driver_options: "{{ network.options | default({}) }}" 13 | state: present 14 | loop: "{{ docker_networks }}" 15 | loop_control: 16 | loop_var: network 17 | register: docker_network_result 18 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_deploy_containers/README.md: -------------------------------------------------------------------------------- 1 | # docker_deploy_containers 2 | 3 | This role deploys Docker containers defined in docker-compose files located in the `host_vars` directory. 4 | 5 | ## Purpose 6 | 7 | This role simplifies the deployment of multiple containers defined via docker-compose files by automating the process. It fetches secrets from HashiCorp Vault, prepares the necessary directories, and then deploys the containers. 8 | 9 | ## Tasks Performed 10 | 11 | 1. Discovers docker-compose files within the `host_vars` directory for the current host. 12 | 2. Fetches environment variables from HashiCorp Vault if needed by the containers. 13 | 3. Creates directories for persistent data if they don't exist. 14 | 4. Copies data files from the `files/containers` directory to the persistent data directories. 15 | 5. Deploys containers using `docker-compose`. 16 | 17 | ## Variables 18 | 19 | * **Several optional variables can be defined within the `docker-compose.yml` files and in the group_vars**. Refer to the `defaults/main.yml` file within the role for a comprehensive list. Some commonly used variables include: 20 | * All variables defined in a docker-compose file as detailed in the docker documentation. This allows to fully manage docker containers with Ansible. 21 | * You define your `docker-compose.yml` file the same way you would do with the CLI, Portainer, Dockge or Komodo for example. 22 | * Other Variables: 23 | * **`docker-data`** (*Required*): The base directory where the containers will create/save their data. Defined in `group_vars/all/vars`. 24 | 25 | ## Important Notes 26 | 27 | * This role requires the `community.docker` and `community.hashi_vault` Ansible collections. 28 | * Ensure HashiCorp Vault is set up and accessible if you're using it for secrets management. 29 | * The directory structure for files in `files/containers` should mirror the directory names of the containers names in `host_vars`. 30 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_deploy_containers/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # container_default: 2 | # api_version: 3 | # auto_remove: 4 | # blkio_weight: 5 | # ca_path: 6 | # cap_drop: 7 | # capabilities: 8 | # cgroup_parent: 9 | # cgroupns_mode: 10 | # cleanup: 11 | # client_cert: 12 | # client_key: 13 | # command: 14 | # command_handling: 15 | # comparisons: 16 | # container_default_behavior: 17 | # cpu_period: 18 | # cpu_quota: 19 | # cpu_shares: 20 | # cpuset_cpus: 21 | # cpuset_mems: 22 | # debug: 23 | # detach: 24 | # device_read_bps: 25 | # device_read_iops: 26 | # device_write_bps: 27 | # device_write_iops: 28 | # devices: 29 | # dns_opts: 30 | # dns_search_domains: 31 | # dns_servers: 32 | # docker_host: 33 | # docker_path: 34 | # domainname: 35 | # entrypoint: 36 | # env: {} 37 | # env_file: 38 | # etc_hosts: 39 | # expose: 40 | # force_kill: 41 | # groups: 42 | # healthcheck: 43 | # hostname: 44 | # ignore_image: 45 | # image: 46 | # init: 47 | # interactive: 48 | # ipc_mode: 49 | # keep_volumes: 50 | # kernel_memory: 51 | # labels: 52 | # links: 53 | # log_driver: 54 | # log_options: 55 | # mac_address: 56 | # # memory: 512MB 57 | # memory_reservation: 58 | # memory_swap: 59 | # memory_swappiness: 60 | # mounts: 61 | # name: 62 | # network_mode: 63 | # networks: 64 | # oom_killer: 65 | # oom_score_adj: 66 | # paused: 67 | # pid_mode: 68 | # pids_limit: 69 | # privileged: 70 | # published_ports: 71 | # pull: 72 | # purge_networks: 73 | # read_only: 74 | # recreate: 75 | # recreate_reasons: 76 | # restart: 77 | # restart_policy: "unless-stopped" 78 | # restart_retries: "3" 79 | # runtime: 80 | # security_opts: 81 | # shm_size: 82 | # ssl_version: 83 | # state: "present" 84 | # stop_signal: 85 | # stop_timeout: 86 | # storage_opts: 87 | # sysctls: 88 | # timeout: 89 | # tls: 90 | # tls_hostname: 91 | # tmpfs: 92 | # tty: 93 | # ulimits: 94 | # use_ssh_client: 95 | # user: 96 | # userns_mode: 97 | # uts: 98 | # validate_certs: 99 | # volume_driver: 100 | # volumes: 101 | # volumes_from: 102 | # working_dir: 103 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_deploy_containers/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Restart Docker 2 | ansible.builtin.service: 3 | name: docker 4 | state: restarted 5 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_deploy_containers/tasks/deploy_compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # roles/docker_deploy_containers/tasks/deploy_compose.yml 3 | - name: Set facts for current compose file 4 | ansible.builtin.set_fact: 5 | container_dir_path: "{{ compose_file.path | dirname }}" 6 | docker_compose_file: "{{ compose_file.path }}" 7 | container_dir_name: "{{ compose_file.path | dirname | basename }}" 8 | data_src_dir: "{{ playbook_dir }}/files/containers/{{ compose_file.path | dirname | basename }}" 9 | volume_dest_dir: "/opt/docker-data/{{ compose_file.path | dirname | basename }}" 10 | delegate_to: localhost 11 | 12 | - name: Check if data source directory exists for {{ container_dir_name }} 13 | ansible.builtin.stat: 14 | path: "{{ data_src_dir }}" 15 | register: data_src_dir_stat 16 | delegate_to: localhost 17 | when: data_src_dir != "" 18 | 19 | - name: Register existing containers for {{ container_dir_name }} 20 | ansible.builtin.command: docker container inspect "{{ container_dir_name}}" 21 | register: container_inspect 22 | ignore_errors: true 23 | changed_when: false 24 | become: true 25 | 26 | - name: Fetch environment variable values from Vault for {{ container_dir_name }} 27 | community.hashi_vault.vault_kv2_get: 28 | url: "http://127.0.0.1:8200" 29 | path: "{{ container_dir_name }}" 30 | engine_mount_point: docker 31 | token_path: "/home/homelab" 32 | register: vault_secrets 33 | delegate_to: localhost 34 | ignore_errors: true 35 | when: container_inspect.rc != 0 or container_inspect.rc is undefined 36 | 37 | - name: Convert Vault output to environment file format 38 | ansible.builtin.copy: 39 | content: | 40 | {% for key, value in vault_secrets.data.data.items() %} 41 | {{ key }}={{ value }} 42 | {% endfor %} 43 | dest: "/tmp/{{ container_dir_name }}_env_file.env" 44 | mode: "0644" 45 | when: vault_secrets.secret is defined and container_inspect.rc != 0 or container_inspect.rc is undefined 46 | 47 | - name: Create container volume directory for {{ container_dir_name }} 48 | ansible.builtin.file: 49 | path: "{{ volume_dest_dir }}" 50 | state: directory 51 | mode: "0755" 52 | owner: docker-non-root-user 53 | group: docker-non-root-group 54 | become: true 55 | when: container_inspect.rc != 0 or container_inspect.rc is undefined 56 | 57 | - name: Copy container data directory for {{ container_dir_name }} 58 | ansible.posix.synchronize: 59 | src: "{{ data_src_dir }}/" 60 | dest: "{{ volume_dest_dir }}/" 61 | recursive: true 62 | owner: true 63 | become: true 64 | when: data_src_dir_stat.stat.exists and (container_inspect.rc != 0 or container_inspect.rc is undefined) 65 | 66 | - name: Deploy Docker containers for {{ container_dir_name }} 67 | community.docker.docker_compose_v2: 68 | definition: "{{ lookup('file', docker_compose_file) | from_yaml }}" 69 | project_name: "{{ container_dir_name }}" 70 | env_files: "{{ '/tmp/' + container_dir_name + '_env_file.env' if vault_secrets.secret is defined else omit }}" 71 | state: present 72 | become: true 73 | when: container_inspect.rc != 0 or container_inspect.rc is undefined 74 | 75 | - name: Remove temporary environment file 76 | ansible.builtin.file: 77 | path: "/tmp/{{ container_dir_name }}_env_file.env" 78 | state: absent 79 | become: true 80 | when: vault_secrets.secret is defined 81 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_deploy_containers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # roles/docker_deploy_containers/tasks/main.yml 3 | - name: Find docker-compose files for {{ inventory_hostname }} 4 | ansible.builtin.find: 5 | paths: "{{ playbook_dir }}/host_vars/{{ inventory_hostname }}/" 6 | file_type: file 7 | patterns: "docker-compose.yml" 8 | recurse: true 9 | register: docker_compose_files 10 | delegate_to: localhost 11 | 12 | - name: Loop through each docker-compose in host_vars and deploy 13 | ansible.builtin.include_tasks: deploy_compose.yml 14 | loop: "{{ docker_compose_files.files }}" 15 | loop_control: 16 | loop_var: compose_file 17 | when: docker_compose_files.files | length > 0 18 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_install_requirements/README.md: -------------------------------------------------------------------------------- 1 | # docker_install_requirements 2 | 3 | This role installs the required Python packages for interacting with Docker. 4 | 5 | ## Purpose 6 | 7 | This role ensures that the necessary Python libraries are available for Ansible to manage Docker containers and images, preparing the environment for other Docker-related roles. 8 | 9 | ## Tasks Performed 10 | 11 | 1. Installs `pip`, the Python package installer. 12 | 2. Installs the `docker` Python package. (ref : https://docker-py.readthedocs.io/en/stable/ ) 13 | 14 | ## Variables 15 | 16 | This role does not use any specific variables. 17 | 18 | ## Important Notes 19 | 20 | * This role requires `become: true` as it installs system-wide packages. -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_install_requirements/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure pip is installed 3 | ansible.builtin.apt: 4 | name: python3-pip 5 | state: present 6 | update_cache: true 7 | cache_valid_time: 3600 8 | become: true 9 | 10 | - name: Install Docker SDK for Python 11 | ansible.builtin.pip: 12 | name: docker 13 | state: present 14 | break_system_packages: true 15 | become: true 16 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_remove_container/README.md: -------------------------------------------------------------------------------- 1 | # docker_remove_container 2 | 3 | This role removes Docker containers/stack defined in the `host_vars` and the docker volumes if existings. 4 | 5 | ## Purpose 6 | 7 | This role simplifies the removal of Docker containers, ensuring that all related resources are also cleaned up, preventing conflicts and freeing up resources. 8 | 9 | ## Tasks Performed 10 | 11 | 1. Validates that a list of container names (`container_names`) is provided. 12 | 2. Locates all `docker-compose.yml` files for the current host. 13 | 3. Filters the compose files to match the specified container names. 14 | 4. Removes the matching containers and potential docker volumes using `docker-compose module`. 15 | 16 | ## Variables 17 | 18 | * **`container_names`** (*Required*): A list of container names to remove. Defined in the playbook that uses this role (`docker_delete_container.yml`). 19 | 20 | ## Important Notes 21 | 22 | * This role uses the `community.docker` Ansible collection. 23 | * Make sure the `container_names` variable is correctly defined in the playbook. -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_remove_container/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Validate input for container_names 2 | ansible.builtin.assert: 3 | that: 4 | - container_names is defined 5 | - container_names | length > 0 6 | fail_msg: "You must provide a list of containers in 'container_names'." 7 | 8 | - name: Find all docker-compose.yml files for the host 9 | ansible.builtin.find: 10 | paths: "{{ playbook_dir }}/host_vars/{{ inventory_hostname }}" 11 | patterns: "docker-compose.yml" 12 | file_type: file 13 | recurse: true 14 | register: all_docker_compose_files 15 | delegate_to: localhost 16 | 17 | - name: Filter docker-compose files for container_names 18 | ansible.builtin.set_fact: 19 | target_docker_compose_files: >- 20 | {{ 21 | all_docker_compose_files.files | selectattr( 22 | 'path', 'search', '^.*/(' + '|'.join(container_names) + ')/docker-compose.yml$' 23 | ) | list 24 | }} 25 | 26 | - name: Remove specified containers 27 | ansible.builtin.include_tasks: remove_compose.yml 28 | loop: "{{ target_docker_compose_files }}" 29 | loop_control: 30 | loop_var: compose_file 31 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_remove_container/tasks/remove_compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set facts for current compose file 3 | ansible.builtin.set_fact: 4 | container_dir_path: "{{ compose_file.path | dirname }}" 5 | docker_compose_file: "{{ compose_file.path }}" 6 | container_dir_name: "{{ compose_file.path | dirname | basename }}" 7 | delegate_to: localhost 8 | 9 | - name: Removing container {{ container_dir_name }} 10 | community.docker.docker_compose_v2: 11 | definition: "{{ lookup('file', docker_compose_file) | from_yaml }}" 12 | project_name: "{{ container_dir_name }}" 13 | pull: missing 14 | state: absent 15 | remove_volumes: true 16 | become: true 17 | tags: 18 | - deploy_container 19 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_remove_container_files/README.md: -------------------------------------------------------------------------------- 1 | # docker_remove_container_files 2 | 3 | This role removes container data directories. 4 | 5 | ## Purpose 6 | 7 | This role complements the `docker_remove_container` role by removing the persistent data directories associated with Docker containers. It ensures complete cleanup and frees up disk space. 8 | 9 | ## Tasks Performed 10 | 11 | 1. Retrieves a list of directories in the `host_vars` for the current host. 12 | 2. Filters the directories to match the specified container names. 13 | 3. Deletes the matched data directories if they exist, by default under `/opt/docker-data/container_name` 14 | 15 | ## Variables 16 | 17 | * **`container_names`** (*Required*): A list of container names for which to remove data directories. Should be defined in the playbook where this role is used. 18 | * **`docker-data`** (*Required*): The base directory for container data. Defined in `group_vars/all/vars`. 19 | 20 | ## Important Notes 21 | 22 | * This role requires `become: true` as it modifies the file system. -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_remove_container_files/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get list of directories in host_vars for {{ inventory_hostname }} 3 | ansible.builtin.find: 4 | paths: "{{ playbook_dir }}/host_vars/{{ inventory_hostname }}" 5 | file_type: directory 6 | patterns: "*" 7 | register: host_vars_dirs 8 | delegate_to: localhost 9 | 10 | - name: Filter directories to match defined containers 11 | ansible.builtin.set_fact: 12 | matched_container_dirs: "{{ host_vars_dirs.files | map(attribute='path') | map('basename') | intersect(container_names) | list }}" 13 | delegate_to: localhost 14 | 15 | - name: Prepare directory mapping for containers 16 | ansible.builtin.set_fact: 17 | container_data_dirs: "{{ container_data_dirs | default({}) | combine({item: {'data_src_dir': playbook_dir + '/files/containers/' + item, 'volume_dest_dir': '/opt/docker-data/' + item}}) }}" 18 | loop: "{{ matched_container_dirs }}" 19 | delegate_to: localhost 20 | 21 | - name: Delete container directories if they exist 22 | ansible.builtin.file: 23 | path: "{{ container_data_dirs[item]['volume_dest_dir'] }}" 24 | state: absent 25 | become: true 26 | loop: "{{ matched_container_dirs }}" 27 | loop_control: 28 | label: "{{ item }}" 29 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_sync_files/README.md: -------------------------------------------------------------------------------- 1 | # docker_sync_files 2 | 3 | This role synchronizes container configuration files from the Ansible control node to the target Docker hosts. 4 | 5 | ## Purpose 6 | 7 | This role simplifies the management of container configurations by ensuring that the configuration files on the Docker hosts are consistent with the files managed in your Ansible repository. 8 | 9 | This is a useful task for containers that needs updates to configuration file like Traefik or Homepage. You can modify the files in your ansible playbook and sync with the docker host. 10 | 11 | ## Tasks Performed 12 | 13 | 1. Retrieves a list of directories in `host_vars` for the current host. 14 | 2. Filters the directories to match the specified container names. 15 | 3. Creates the necessary directories on the target host if they don't exist. 16 | 4. Synchronizes files from the `files/containers` directory on the control node to the corresponding directories on the target host. 17 | 18 | ## Variables 19 | 20 | * **`container_names`** (*Required*): A list of container names to synchronize files for. Should be defined in the playbook (`docker_sync_files.yml`). 21 | * **`docker-data`** (*Required*): The base directory for container data. Defined in `group_vars/all/vars`. 22 | 23 | 24 | ## Important Notes 25 | 26 | * The role uses `synchronize` module which uses `rsync`. Make sure rsync is installed on both the control node and the Docker hosts. 27 | * Ensure the directory structure in `files/containers` mirrors the structure in `host_vars`. 28 | * Files removed in your ansible playbook will not be removed from the docker host. [ *This is in my list of improvements for this role* ] 29 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_sync_files/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get list of directories in host_vars for {{ inventory_hostname }} 3 | ansible.builtin.find: 4 | paths: "{{ playbook_dir }}/host_vars/{{ inventory_hostname }}" 5 | file_type: directory 6 | patterns: "*" 7 | register: host_vars_dirs 8 | delegate_to: localhost 9 | 10 | - name: Filter directories to match defined containers 11 | ansible.builtin.set_fact: 12 | matched_container_dirs: "{{ host_vars_dirs.files | map(attribute='path') | map('basename') | intersect(container_names) | list }}" 13 | delegate_to: localhost 14 | 15 | - name: Prepare directory mapping for containers 16 | ansible.builtin.set_fact: 17 | container_data_dirs: >- 18 | {{ container_data_dirs | default({}) | combine({ 19 | item: { 20 | 'data_src_dir': playbook_dir ~ '/files/containers/' ~ item, 21 | 'volume_dest_dir': '/opt/docker-data/' ~ item 22 | } 23 | }) }} 24 | loop: "{{ matched_container_dirs }}" 25 | delegate_to: localhost 26 | 27 | - name: Ensure container directories are available 28 | ansible.builtin.file: 29 | path: "{{ container_data_dirs[item]['volume_dest_dir'] }}" 30 | state: directory 31 | mode: "0755" 32 | owner: docker-non-root-user 33 | group: docker-non-root-group 34 | become: true 35 | loop: "{{ matched_container_dirs }}" 36 | loop_control: 37 | label: "{{ item }}" 38 | 39 | - name: Check if data source directory exists for each container 40 | ansible.builtin.stat: 41 | path: "{{ container_data_dirs[item]['data_src_dir'] }}" 42 | register: data_src_dir_stat 43 | loop: "{{ matched_container_dirs }}" 44 | delegate_to: localhost 45 | loop_control: 46 | label: "{{ item }}" 47 | 48 | - name: Copy container data directory for each container 49 | ansible.posix.synchronize: 50 | src: "{{ container_data_dirs[item]['data_src_dir'] }}/" 51 | dest: "{{ container_data_dirs[item]['volume_dest_dir'] }}/" 52 | recursive: true 53 | rsync_opts: 54 | - "--chown=2000:2000" # need to use a viriable instead of hardcoding the value 55 | when: > 56 | data_src_dir_stat.results | selectattr('item', 'equalto', item) | map(attribute='stat.exists') | first | bool 57 | loop: "{{ matched_container_dirs }}" 58 | loop_control: 59 | label: "{{ item }}" 60 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_update_containers/README.md: -------------------------------------------------------------------------------- 1 | # docker_update_containers 2 | 3 | This role updates or redeploys Docker containers based on their `docker-compose.yml` definitions. 4 | 5 | ## Purpose 6 | 7 | This role allows for updating / recreating containers with modified configurations or newer images. It simplifies the redeployment process and ensures containers are running the latest desired versions. 8 | 9 | This can be used to deploy a single docker containers manually instead of using the `docker_deploy_containers` role. 10 | 11 | ## Tasks Performed 12 | 13 | 1. Validates input for the `container_names` variable. 14 | 2. Finds all `docker-compose.yml` files for the host. 15 | 3. Filters the compose files based on `container_names`. 16 | 4. Fetches environment variables from Vault (if defined). 17 | 5. Removes the existing container stack. ( Does not remove the volumes ) 18 | 6. Deploys or redeploys the specified containers using `docker-compose`. 19 | 20 | ## Variables 21 | 22 | * **`container_names`** (*Required*): A list of containers to be updated. Define it in the playbook where you are using this role. 23 | 24 | ## Important Notes 25 | 26 | * Requires the `community.docker` and `community.hashi_vault` collections. 27 | * This role removes the existing container stack before redeploying. Ensure you have proper backups or persistent volumes configured if you need to preserve data. 28 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_update_containers/tasks/deploy_compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks/deploy_compose.yml 3 | - name: Set facts for current compose file 4 | ansible.builtin.set_fact: 5 | container_dir_path: "{{ compose_file.path | dirname }}" 6 | docker_compose_file: "{{ compose_file.path }}" 7 | container_dir_name: "{{ compose_file.path | dirname | basename }}" 8 | delegate_to: localhost 9 | 10 | - name: Fetch environment variable values from Vault for {{ container_dir_name }} 11 | community.hashi_vault.vault_kv2_get: 12 | url: "http://127.0.0.1:8200" 13 | path: "{{ container_dir_name }}" 14 | engine_mount_point: docker 15 | token_path: "/home/homelab" 16 | register: vault_secrets 17 | delegate_to: localhost 18 | ignore_errors: true 19 | 20 | - name: Convert Vault output to environment file format 21 | ansible.builtin.copy: 22 | content: | 23 | {% for key, value in vault_secrets.data.data.items() %} 24 | {{ key }}={{ value }} 25 | {% endfor %} 26 | dest: "/tmp/{{ container_dir_name }}_env_file.env" 27 | mode: "0644" 28 | when: vault_secrets.secret is defined 29 | 30 | - name: Remove existing stack to avoid conflicts for {{ container_dir_name }} 31 | community.docker.docker_compose_v2: 32 | definition: "{{ lookup('file', docker_compose_file) | from_yaml }}" 33 | project_name: "{{ container_dir_name }}" 34 | pull: missing 35 | state: absent 36 | become: true 37 | 38 | - name: Deploy Docker compose for {{ container_dir_name }} 39 | community.docker.docker_compose_v2: 40 | definition: "{{ lookup('file', docker_compose_file) | from_yaml }}" 41 | project_name: "{{ container_dir_name }}" 42 | env_files: "{{ '/tmp/' + container_dir_name + '_env_file.env' if vault_secrets.secret is defined else omit }}" 43 | pull: missing 44 | state: present 45 | become: true 46 | 47 | - name: Remove temporary environment file 48 | ansible.builtin.file: 49 | path: "/tmp/{{ container_dir_name }}_env_file.env" 50 | state: absent 51 | become: true 52 | when: vault_secrets.secret is defined 53 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/docker_update_containers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks/main.yml 3 | - name: Validate input for container_names 4 | ansible.builtin.assert: 5 | that: 6 | - container_names is defined 7 | - container_names | length > 0 8 | fail_msg: "You must provide a list of containers in 'container_names'." 9 | 10 | - name: Find all docker-compose.yml files for the host 11 | ansible.builtin.find: 12 | paths: "{{ playbook_dir }}/host_vars/{{ inventory_hostname }}" 13 | patterns: "docker-compose.yml" 14 | file_type: file 15 | recurse: true 16 | register: docker_compose_files 17 | delegate_to: localhost 18 | 19 | - name: Filter docker-compose files for container_names 20 | ansible.builtin.set_fact: 21 | target_docker_compose_files: >- 22 | {{ 23 | docker_compose_files.files | selectattr( 24 | 'path', 'search', '^.*/(' + '|'.join(container_names) + ')/docker-compose.yml$' 25 | ) | list 26 | }} 27 | 28 | - name: Deploy specified containers 29 | ansible.builtin.include_tasks: deploy_compose.yml 30 | loop: "{{ target_docker_compose_files }}" 31 | loop_control: 32 | loop_var: compose_file 33 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/update_hosts/README.md: -------------------------------------------------------------------------------- 1 | # update_hosts 2 | 3 | This role updates packages on the target hosts. 4 | 5 | ## Purpose 6 | 7 | This role ensures that the system packages on your target hosts are up-to-date, improving security and providing the latest bug fixes and features. 8 | 9 | ## Tasks Performed 10 | 11 | 1. Updates the apt package cache. 12 | 2. Upgrades all installed packages to their latest versions. 13 | 3. Removes unnecessary dependencies and purges their configuration files. 14 | 4. Cleans the apt cache. 15 | 16 | ## Variables 17 | 18 | This role does not use any specific variables. 19 | 20 | ## Important Notes 21 | 22 | * Requires `become: true`. 23 | * An active internet connection is required to download package updates. 24 | 25 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/update_hosts/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Reboot host 2 | ansible.builtin.reboot: 3 | connect_timeout: 5 4 | reboot_timeout: 300 5 | pre_reboot_delay: 5 6 | post_reboot_delay: 30 -------------------------------------------------------------------------------- /ansible_docker_management/roles/update_hosts/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update hosts ( Debian based ) 3 | when: ansible_os_family == 'Debian' 4 | block: 5 | - name: Update apt cache 6 | ansible.builtin.apt: 7 | update_cache: true 8 | cache_valid_time: 3600 9 | 10 | - name: Upgrade packages 11 | ansible.builtin.apt: 12 | upgrade: dist 13 | autoremove: true 14 | autoclean: true 15 | 16 | - name: Check if reboot is required - Debian 17 | ansible.builtin.stat: 18 | path: /var/run/reboot-required 19 | register: reboot_required_file 20 | ignore_errors: true 21 | changed_when: true 22 | notify: Reboot host 23 | 24 | rescue: 25 | - name: Display error message for Debian update failure 26 | ansible.builtin.debug: 27 | msg: "Failed to update Debian based host. Please check logs." 28 | 29 | - name: Update hosts ( RedHat based ) 30 | when: ansible_os_family == 'RedHat' 31 | block: 32 | - name: Update dnf cache 33 | ansible.builtin.dnf: 34 | update_cache: true 35 | cache_valid_time: 3600 36 | 37 | - name: Upgrade packages 38 | ansible.builtin.dnf: 39 | upgrade: all 40 | 41 | - name: Check if reboot is required - RedHat 42 | ansible.builtin.command: 43 | cmd: needs-restarting -r -s 44 | register: reboot_required_command 45 | failed_when: reboot_required_command.rc > 1 46 | changed_when: reboot_required_command.rc == 1 47 | notify: Reboot host 48 | 49 | rescue: 50 | - name: Display error message for RedHat update failure 51 | ansible.builtin.debug: 52 | msg: "Failed to update RedHat based host. Please check logs." 53 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/vm_configure_docker/README.md: -------------------------------------------------------------------------------- 1 | # vm_configure_docker 2 | 3 | This role installs and configures Docker on a virtual machine. 4 | 5 | ## Purpose 6 | 7 | This role streamlines the process of installing Docker on the virtual machine, ensuring that the necessary packages are installed, configured, and updated correctly. This follows the same tasks as indicated in the Docker documentation. 8 | 9 | ## Tasks Performed 10 | 11 | 1. Updates and upgrades existing packages on the VM. 12 | 2. Removes any conflicting Docker packages that may already be installed. 13 | 3. Installs required dependencies for Docker. 14 | 4. Adds Docker's official GPG key and repository. 15 | 5. Installs the Docker Engine, CLI, containerd, and other related packages. 16 | 6. Adds a dedicated Docker group and adds the ansible user to it. 17 | 18 | 19 | ## Variables 20 | 21 | * **`docker_pkgs`**: List of Docker-related packages to be removed (if present). Defined in `roles/vm_configure_docker/vars/main.yml`. 22 | * **`vm_ansible_user`**: Username of the Ansible user on the VM. Defined in `group_vars/all/vars`. 23 | 24 | ## Important Notes 25 | 26 | * Requires `become: true`. 27 | * An active internet connection is needed for downloading packages and updates. -------------------------------------------------------------------------------- /ansible_docker_management/roles/vm_configure_docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart docker 3 | ansible.builtin.service: 4 | name: docker 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/vm_configure_docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update and upgrade all packages 3 | ansible.builtin.apt: 4 | update_cache: true 5 | upgrade: dist 6 | cache_valid_time: 3600 7 | 8 | - name: Remove existing Docker packages 9 | ansible.builtin.apt: 10 | name: "{{ docker_pkgs }}" 11 | state: absent 12 | cache_valid_time: 0 # Ensure removal regardless of cache 13 | 14 | - name: Install dependencies for Docker 15 | ansible.builtin.apt: 16 | name: 17 | - ca-certificates 18 | - curl 19 | - gnupg2 20 | 21 | - name: Create directory for Docker's GPG key 22 | ansible.builtin.file: 23 | path: /etc/apt/keyrings 24 | state: directory 25 | mode: "0755" 26 | 27 | - name: Add Docker's official GPG key 28 | ansible.builtin.apt_key: 29 | url: https://download.docker.com/linux/ubuntu/gpg 30 | keyring: /etc/apt/keyrings/docker.gpg 31 | state: present 32 | 33 | - name: Add Docker repository 34 | ansible.builtin.apt_repository: 35 | repo: >- 36 | deb [arch=amd64 37 | signed-by=/etc/apt/keyrings/docker.gpg] 38 | https://download.docker.com/linux/ubuntu {{ ansible_lsb.codename }} stable 39 | filename: docker 40 | state: present 41 | 42 | - name: Install desired Docker packages 43 | ansible.builtin.apt: 44 | name: 45 | - docker-ce 46 | - docker-ce-cli 47 | - containerd.io 48 | - docker-buildx-plugin 49 | - docker-compose-plugin 50 | state: present 51 | update_cache: true 52 | 53 | # # Additional Docker hardening tasks (consider these based on your needs): 54 | - name: Create a dedicated Docker group for managing containers 55 | ansible.builtin.group: 56 | name: docker 57 | state: present 58 | 59 | - name: Add user to the Docker group for non-root management 60 | ansible.builtin.user: 61 | name: "{{ vm_ansible_user }}" # Adjust username as needed 62 | groups: docker 63 | append: true 64 | 65 | # I need to fix something related to the daemon as it's breaking the daemon and blocker docker from starting 66 | # - name: Configure Docker daemon options for enhanced security 67 | # ansible.builtin.template: 68 | # src: docker-daemon.j2 69 | # dest: /etc/docker/daemon.json 70 | # owner: root 71 | # group: root 72 | # mode: "0600" 73 | # notify: Restart docker 74 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/vm_configure_docker/templates/docker-daemon.j2: -------------------------------------------------------------------------------- 1 | { 2 | "log-driver": "json-file", 3 | "log-opts": { 4 | "max-size": "10m", 5 | "max-file": "3" 6 | }, 7 | "live-restore": true, 8 | "default-runtime": "runc", 9 | "userns-remap": "default", 10 | "data-root": "/var/lib/docker", 11 | "storage-driver": "overlay2", 12 | "storage-opts": [ 13 | "overlay2.override_kernel_check=1" 14 | ], 15 | "oom-score-adjust": -500, 16 | "disable-legacy-registry": true, 17 | "max-concurrent-uploads": 5, 18 | "max-concurrent-downloads": 5, 19 | "registry-mirrors": [ 20 | "https://ghcr.io" 21 | ], 22 | "insecure-registries": [], 23 | "debug": false, 24 | "tls": false, 25 | "tlsverify": false, 26 | "tlskey": "/var/lib/docker/key.pem", 27 | "tlscert": "/var/lib/docker/cert.pem", 28 | "tlscacert": "/var/lib/docker/ca.pem", 29 | "default-address-pools": [ 30 | { 31 | "base": "10.100.0.0/16", 32 | "size": 16 33 | } 34 | ] 35 | } -------------------------------------------------------------------------------- /ansible_docker_management/roles/vm_configure_docker/vars/main.yml: -------------------------------------------------------------------------------- 1 | docker_pkgs: 2 | - docker.io 3 | - docker-doc 4 | - docker-compose 5 | - docker-compose-v2 6 | - podman-docker 7 | - containerd 8 | - runc 9 | 10 | arch_mapping: # Map ansible architecture {{ ansible_architecture }} names to Docker's architecture names 11 | - x86_64: amd64 12 | - aarch64: arm64 13 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/vm_create_docker_non-root_user/README.md: -------------------------------------------------------------------------------- 1 | # vm_create_docker_non-root_user 2 | 3 | This role configures a non-root user for managing Docker. 4 | 5 | ## Purpose 6 | 7 | This role enhances security by setting up a dedicated non-root user for Docker management, reducing the risks associated with running containers as root. 8 | 9 | ## Tasks Performed 10 | 11 | 1. Creates a dedicated Docker group (`docker-non-root-group`) if it doesn't exist and adds it a GID. 12 | 2. Creates a non-root user (`docker-non-root-user`) and adds it a UID, dedicated group with no home directory and limited shell access. 13 | 3. Adds the non-root user to the Docker group (if the group exists). 14 | 4. Creates the `/opt/docker-data` directory. 15 | 5. Changes the ownership of `/opt/docker-data` to the non-root user. 16 | 17 | ## Variables 18 | 19 | This role does not use any specific variables. However, it relies on the previously created non-root user and group. 20 | 21 | ## Important Notes 22 | 23 | * Requires `become: true`. 24 | * The `/opt/docker-data` directory will be used for storing container data, ensuring that the non-root user has the necessary permissions. -------------------------------------------------------------------------------- /ansible_docker_management/roles/vm_create_docker_non-root_user/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if group docker-non-root-group exists 3 | ansible.builtin.group: 4 | name: docker-non-root-group 5 | register: group_check 6 | become: true 7 | 8 | - name: Create docker-non-root-group if it doesn't exist 9 | ansible.builtin.group: 10 | name: docker-non-root-group 11 | gid: 2000 12 | state: present 13 | become: true 14 | 15 | - name: Check if user docker-non-root-user exists 16 | ansible.builtin.user: 17 | name: docker-non-root-user 18 | register: user_check 19 | become: true 20 | 21 | - name: Create docker-non-root-user if it doesn't exist (no home directory) 22 | ansible.builtin.user: 23 | name: docker-non-root-user 24 | uid: 2000 25 | group: docker-non-root-group 26 | create_home: false # Should not create a home directly.. but still do 27 | shell: /bin/false # Set shell to /bin/false 28 | state: present 29 | become: true 30 | 31 | - name: Add user docker-non-root-user to docker group (if it exists) 32 | ansible.builtin.user: 33 | name: docker-non-root-user 34 | groups: docker 35 | append: true 36 | ignore_errors: true 37 | become: true 38 | 39 | - name: Create /opt/docker-data directory if it doesn't exist 40 | ansible.builtin.file: 41 | path: /opt/docker-data 42 | state: directory 43 | mode: "0755" 44 | become: true 45 | 46 | - name: Change ownership of /opt/docker-data to docker-non-root-user 47 | ansible.builtin.file: 48 | path: /opt/docker-data 49 | owner: docker-non-root-user 50 | group: docker-non-root-group 51 | recurse: true # Apply ownership recursively to subdirectories/files 52 | become: true 53 | -------------------------------------------------------------------------------- /ansible_docker_management/roles/vm_mount_nfs_shares/README.md: -------------------------------------------------------------------------------- 1 | # vm_mount_nfs_shares 2 | 3 | This role mounts NFS shares on the virtual machine. 4 | 5 | ## Purpose 6 | 7 | This role simplifies the process of mounting NFS shares, which can be used for sharing data or configuration files between the NFS server and the virtual machine, centralizing storage and configuration. 8 | 9 | ## Tasks Performed 10 | 11 | 1. Installs the `nfs-common` package. 12 | 2. Mounts the specified NFS shares. 13 | 14 | ## Variables 15 | 16 | * **`nfs_server`** (*Required*): The IP address or hostname of the NFS server. Defined in `group_vars/all/vars`. 17 | * **`nfs_share_path`** (*Required*): The path to the NFS share on the server. Defined in `group_vars/all/vars`. 18 | * **`nfs_share_mount_point`** (*Required*): The mount point for the NFS share on the VM. Defined in `group_vars/all/vars`. 19 | * **`nfs_docker_backup_path`** (*Required*): The path for docker backup storage on the nfs_server. Defined in `group_vars/all/vars`. 20 | * **`nfs_docker_backup_mount_point`** (*Required*): The path for docker backup storage on the docker-host. Defined in `group_vars/all/vars`. 21 | 22 | 23 | ## Important Notes 24 | 25 | * The NFS server must be accessible from the virtual machine. 26 | * Ensure that the NFS share is exported and permissions are configured correctly on the NFS server. 27 | * On a Synology NAS for example, ensure that both read/write permissions are enabled in the NFS permissions for the specific folder. 28 | * This role attempts to mount the shares multiple times with a delay in case of temporary network issues. -------------------------------------------------------------------------------- /ansible_docker_management/roles/vm_mount_nfs_shares/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Ensure the right package for NFS is installed 2 | ansible.builtin.apt: 3 | name: nfs-common 4 | state: present 5 | update_cache: true 6 | become: true 7 | 8 | - name: Mount NFS share and ensure it's mounted on boot for Lets Encrypt 9 | ansible.posix.mount: 10 | src: "{{ nfs_server }}:{{ nfs_share_path }}" 11 | path: "{{ nfs_share_mount_point }}" 12 | fstype: nfs 13 | opts: defaults,_netdev 14 | state: mounted 15 | boot: true 16 | become: true 17 | retries: 3 18 | delay: 5 19 | 20 | - name: Mount NFS share and ensure it's mounted on boot for Docker Backup 21 | ansible.posix.mount: 22 | src: "{{ nfs_server }}:{{ nfs_docker_backup_path }}" 23 | path: "{{ nfs_docker_backup_mount_point }}" 24 | fstype: nfs 25 | opts: defaults,_netdev 26 | state: mounted 27 | boot: true 28 | become: true 29 | retries: 3 30 | delay: 5 31 | -------------------------------------------------------------------------------- /ansible_docker_management/update_host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update the hosts 3 | hosts: docker-hosts 4 | # hosts: docker-hosts-test 5 | become: true 6 | serial: 1 7 | roles: 8 | - update_hosts 9 | -------------------------------------------------------------------------------- /ansible_proxmox_management/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | 3 | #Specifies the location of your inventory file (where your hosts are defined). 4 | inventory = ./inventory.yml 5 | 6 | # Specifies the file containing the vault password for decrypting encrypted files (like vault-encrypted variables). You can use --ask-vault-pass or set the ANSIBLE_VAULT_PASSWORD_FILE environment variable 7 | vault_password_file = ~/.vault-pass 8 | 9 | # Specifies how Ansible should handle Python interpreter detection on managed hosts. 10 | # 'auto_silent' is generally recommended for Ansible >= 2.10. 11 | # It automatically detects the best Python interpreter and uses it silently. 12 | interpreter_python = auto_silent 13 | 14 | # Uncomment if you need to specify which interpreter should be use in case of conflicts 15 | # ansible_python_interpreter = /usr/bin/python3 16 | 17 | #Disables SSH host key checking (use with caution!) 18 | host_key_checking = False # **REMOVE OR SET TO 'True' FOR SECURITY** 19 | 20 | [inventory] 21 | # Enables YAML plugin for inventory parsing. 22 | enable_plugins = yaml 23 | 24 | [hashi_vault_collection] 25 | # Configuration for the 'hashi_vault' Ansible collection (if you are using it). 26 | # Specifies the authentication method to use for connecting to HashiCorp Vault. 27 | auth_method = token 28 | token_path = /home/homelab -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/all/vars: -------------------------------------------------------------------------------- 1 | ########################### 2 | ## GENERAL CONFIGURATION ## 3 | ########################### 4 | timezone: "America/Toronto" 5 | locale: "en_US.UTF-8" 6 | 7 | # DNS settings 8 | dns_servers: 9 | - "10.53.53.53" 10 | - "1.1.1.1" 11 | 12 | # The domain value is used when creating virtual machines for the hostname and can be used for any other places needed for that 13 | domain: "example.com" 14 | 15 | # For DoT and DoH, not used yet 16 | dns_over_tls_servers: 17 | - "tls://10.53.53.53" 18 | - "tls://1.1.1.1" 19 | dns_over_https_servers: 20 | - "https://dns1.{{ domain }}" 21 | - "https://dns2.{{ domain }}" 22 | 23 | # VLAN Definitions - This is used when creating the SDN VLANs on Proxmox 24 | gateway_vlan: "10.0.0.0/24" 25 | management_vlan: "10.0.10.0/24" # proxmox host, nas, ansible control node 26 | compute_vlan: "10.0.11.0/24" # virtual machine, docker host etc 27 | service_restricted_vlan: "10.0.12.0/24" # Services with no access to internet at all 28 | service_external_vlan: "10.0.13.0/24" # Services with internet access 29 | home_assistant_vlan: "10.0.14.0/28" # Main interface for Home Assistant 30 | trusted_vlan: "10.0.15.0/24" # Laptop, PC, Phone 31 | multimedia_vlan: "10.0.16.0/24" # TV, Plex 32 | iot_vlan: "10.0.17.0/24" # IoT devices without internet 33 | 34 | # VLAN IDs - Used for SDN Vlans 35 | vlan_definitions: 36 | - id: 10 37 | name: management_vlan 38 | - id: 11 39 | name: compute_vlan 40 | - id: 12 41 | name: service_restricted_vlan 42 | - id: 13 43 | name: service_external_vlan 44 | - id: 14 45 | name: home_assistant_vlan 46 | - id: 15 47 | name: trusted_vlan 48 | - id: 16 49 | name: multimedia_vlan 50 | - id: 17 51 | name: iot_vlan 52 | 53 | # Links for the virtual machine images 54 | debian_image_url: "" 55 | debian_image_name: "" 56 | ubuntu_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 57 | ubuntu_image_name: noble-server-cloudimg-amd64.qcow2 58 | 59 | # Variable for adding the Authentik Realm to Proxmox during the intial configuration 60 | authentik_issuer_url: https://auth.{{ domain }}/application/o/proxmox/ -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/all/vault: -------------------------------------------------------------------------------- 1 | # ENSURE TO MODIFY THE VALUES TO YOUR VARIABLES AND YOUR LIKING 2 | # Except the api_token_file_path, all other files and variables should be existing before running any playbook 3 | 4 | ######################## 5 | ## PROXMOX MANAGEMENT ## 6 | ######################## 7 | # Proxmox root default user 8 | pve_root_user: root 9 | pve_root_password: MyVeryStrongAndPrivateProxmoxRootPassword # Modify the value with the password you used when configuring proxmox. If it's not the same password on each node, you have to define the password on the host_vars 10 | pve_root_ssh_private_key_file: "~/.ssh/pve_root_id_rsa" 11 | pve_root_ssh_public_key_file: "~/.ssh/pve_root_id_rsa.pub" 12 | 13 | # Ansible Proxmox ssh and API user information 14 | pve_ansible_user: pve_ansible_user 15 | pve_ansible_ssh_private_key_file: "~/.ssh/ansible_id_rsa" 16 | pve_ansible_ssh_public_key_file: "~/.ssh/ansible_id_rsa.pub" 17 | pve_ssh_port: 22 18 | 19 | # - Proxmox nodes API Tokens - # 20 | # The add_user role will create a file with the token id to be used everytime you'll create a virtual machine for example 21 | api_token_file_path: "~/.ansible/{{ inventory_hostname }}-{{ pve_ansible_token_id }}" 22 | 23 | ################### 24 | ## VM MANAGEMENT ## 25 | ################### 26 | 27 | # Virtual Machines User Management 28 | vm_admin_ssh_private_key_file: "~/.ssh/homelab" 29 | vm_admin_ssh_public_key_file: "~/.ssh/homelab.pub" 30 | 31 | vm_ansible_user: vm_ansible_user 32 | vm_ansible_ssh_private_key_file: "~/.ssh/ansible2_id_rsa" 33 | vm_ansible_ssh_public_key_file: "~/.ssh/ansible2_id_rsa.pub" 34 | -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/proxmox/vars.yml: -------------------------------------------------------------------------------- 1 | ######################## 2 | ## PROXMOX MANAGEMENT ## 3 | ######################## 4 | 5 | # default users 6 | ansible_user: "{{ pve_ansible_user_ssh }}" 7 | ansible_ssh_private_key_file: "{{ pve_ansible_ssh_private_key_file }}" 8 | 9 | # Define Ansible user and API Token variable 10 | pve_ansible_user_api: "{{ pve_ansible_user }}_api" # the pve_ansible_user variable is set in the vault, you can define it in this file if you want 11 | pve_ansible_user_ssh: "{{ pve_ansible_user }}_ssh" 12 | pve_ansible_user_api_realm: "{{ pve_ansible_user_api }}@pam" 13 | pve_ansible_token_id: "{{ pve_ansible_user_api }}_token" 14 | 15 | # Proxmox admin user information 16 | 17 | # Groups for Ansible User and Admin User 18 | pve_ansible_group: "pve_ansible_group" 19 | pve_admin_group: "pve_admin_group" 20 | pve_admin_group_role: "Administrator" 21 | 22 | # API Token 23 | pve_ansible_token_privilege: "Administrator" 24 | 25 | snippets_storage: local 26 | snippets_path: "/var/lib/vz/snippets/" 27 | image_storage: local 28 | vm_storage: local-lvm 29 | pve_image_path: "/var/lib/vz/images/0" 30 | 31 | 32 | 33 | ######################## 34 | ## Cluster Config ## 35 | ######################## 36 | pve_cluster_name: "my_proxmox_cluster" 37 | pve_cluster_join_timeout: 60 38 | 39 | # Proxmox network configuration (adjust for your setup) 40 | pve_network_interface: "vmbr1" 41 | pve_compute_bridge: "vmbr1" 42 | 43 | # Primary node in the cluster 44 | pve_primary_node: pve01 45 | 46 | # SSH-related settings 47 | pve_cluster_conf: /etc/pve/corosync.conf 48 | pve_ssh_ciphers: "aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com" 49 | 50 | ################### 51 | ## VIRTUAL MACHINES ## 52 | ################### 53 | vm_config: "{{ lookup('fileglob', playbook_dir + '/group_vars/pve/vms/*.yml', errors='ignore') | map('from_yaml') | list | combine }}" -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/proxmox/vms/docker-host-test-01.yml: -------------------------------------------------------------------------------- 1 | vm_config: 2 | ubuntu-docker-host-test-01: 3 | vm_image_name: noble-server-cloudimg-amd64.qcow2 4 | vm_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 5 | 6 | # Compute 7 | name: "ubuntu-docker-host-test-01" 8 | vmid: "9999" 9 | node: "pve03" 10 | memory: 4096 11 | cores: 4 12 | 13 | # Networking 14 | net: 15 | net0: "virtio,bridge=vlan199" 16 | ipconfig: 17 | # ipconfig0: "ip=dhcp" 18 | ipconfig0: "ip=10.0.199.10/2,gw=10.0.199.1" 19 | 20 | # Cloud-init configuration 21 | citype: "nocloud" 22 | cicustom: "user=local:snippets/ubuntu-docker-host-test-01.yml" 23 | cloud_init_template: "docker-host.j2" 24 | cloud_init_template_name: "ubuntu-docker-host-test-01.yml" 25 | # Storage 26 | bootdisk: "scsi0" 27 | disk: "scsi0" 28 | size: "32G" 29 | scsihw: "virtio-scsi-pci" 30 | storage: "local-lvm" 31 | scsi: 32 | scsi0: "local-lvm:0,import-from=local:0/noble-server-cloudimg-amd64.qcow2,format=qcow2" 33 | ide: 34 | ide2: "local-lvm:cloudinit" 35 | 36 | # Other VM Configuration 37 | agent: "enabled=1" 38 | onboot: true 39 | state: present # Manually set to indicate that virtual machine already exists in the proxmox cluster/node and should not be created/stopped in some playbooks 40 | -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/proxmox/vms/home-assistant.yml: -------------------------------------------------------------------------------- 1 | vm_config: 2 | home-assistant: 3 | vm_image_name: haos_ova-14.1.qcow2.xz 4 | vm_image_url: https://github.com/home-assistant/operating-system/releases/download/14.1/haos_ova-14.1.qcow2.xz 5 | # Compute 6 | name: "home-assistant" 7 | vmid: "100" 8 | node: "pve01" 9 | memory: 4096 10 | cores: 4 11 | tags: 12 | - automation 13 | - home-assistant 14 | - iot 15 | # Networking 16 | net: 17 | net0: "virtio,bridge=vnet50" # the main vlan interface 18 | net1: "virtio,bridge=vnet51" # this interface can be connected to the same IOT/CCTV network so you need to play with firewall rules and keep the vlan totally closed 19 | ipconfig: 20 | # ipconfig0: "ip=dhcp" 21 | ipconfig0: "ip=10.0.50.10/28,gw=10.0.50.1" 22 | ipconfig1: "ip=10.0.51.10/16,gw=10.0.51.1" # for some reasons these values are not used 23 | # Storage 24 | bootdisk: "scsi0" 25 | disk: "scsi0" 26 | size: "32G" 27 | scsihw: "virtio-scsi-pci" 28 | storage: "local-lvm" 29 | scsi: 30 | scsi0: "local-lvm:0,import-from=local:0/haos_ova-14.1.qcow2,format=qcow2" 31 | 32 | bios: ovmf # Use OVMF (EFI) 33 | efidisk0: # EFI disk configuration as the VM uses UEFI 34 | storage: local-lvm 35 | format: raw 36 | efitype: 4m 37 | pre_enrolled_keys: false 38 | 39 | # Other VM Configuration 40 | agent: "enabled=1" 41 | onboot: true 42 | state: new 43 | -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/proxmox/vms/ubuntu-docker-host-01.yml: -------------------------------------------------------------------------------- 1 | vm_config: 2 | ubuntu-docker-host-01: 3 | vm_image_name: noble-server-cloudimg-amd64.qcow2 4 | vm_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 5 | 6 | # Compute 7 | name: "ubuntu-docker-host-01" 8 | vmid: "10001" 9 | node: "pve01" 10 | memory: 4096 11 | cores: 4 12 | tags: 13 | - networking 14 | - ubuntu 15 | - docker 16 | # Networking 17 | net: 18 | net0: "virtio,bridge=vmbr1" 19 | net1: "virtio,bridge=vnet20" # Will be ens19 on the vm 20 | net2: "virtio,bridge=vnet3" # ens20 21 | net3: "virtio,bridge=vnet800" #ens21 22 | ipconfig: 23 | # ipconfig0: "ip=dhcp" 24 | ipconfig0: "ip=10.0.0.10/24,gw=10.0.0.1" 25 | # Cloud-init configuration 26 | citype: "nocloud" 27 | cicustom: "user=local:snippets/ubuntu-docker-host-01.yml" 28 | cloud_init_template: "docker-host.j2" 29 | cloud_init_template_name: "ubuntu-docker-host-01.yml" 30 | # Storage 31 | bootdisk: "scsi0" 32 | disk: "scsi0" 33 | size: "32G" 34 | scsihw: "virtio-scsi-pci" 35 | storage: "local-lvm" 36 | scsi: 37 | scsi0: "local-lvm:0,import-from=local:0/noble-server-cloudimg-amd64.qcow2,format=qcow2" 38 | ide: 39 | ide2: "local-lvm:cloudinit" 40 | 41 | # Other VM Configuration 42 | agent: "enabled=1" 43 | onboot: true 44 | state: new # This is to indicate if the virtual machine needs to be created or already exists in Proxmox. It needs to be manually definded. -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/proxmox/vms/ubuntu-docker-host-02.yml: -------------------------------------------------------------------------------- 1 | vm_config: 2 | ubuntu-docker-host-02: 3 | vm_image_name: noble-server-cloudimg-amd64.qcow2 4 | vm_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 5 | 6 | # Compute 7 | name: "ubuntu-docker-host-02" 8 | vmid: "10002" 9 | node: "pve02" 10 | memory: 4096 11 | cores: 4 12 | tags: 13 | - networking 14 | - ubuntu 15 | - docker 16 | # Networking 17 | net: 18 | net0: "virtio,bridge=vmbr1" 19 | ipconfig: 20 | # ipconfig0: "ip=dhcp" 21 | ipconfig0: "ip=10.0.0.20/24,gw=10.0.0.1" 22 | # Cloud-init configuration 23 | citype: "nocloud" 24 | cicustom: "user=local:snippets/ubuntu-docker-host-02.yml" # the file will be saved until this name on Proxmox. Ideally use the VM for better management 25 | cloud_init_template: "docker-host.j2" 26 | cloud_init_template_name: "ubuntu-docker-host-02.yml" # the value should be similar to the cicustom name 27 | # Storage 28 | bootdisk: "scsi0" 29 | disk: "scsi0" 30 | size: "32G" 31 | scsihw: "virtio-scsi-pci" 32 | storage: "local-lvm" 33 | scsi: 34 | scsi0: "local-lvm:0,import-from=local:0/noble-server-cloudimg-amd64.qcow2,format=qcow2" 35 | ide: 36 | ide2: "local-lvm:cloudinit" 37 | 38 | # Other VM Configuration 39 | agent: "enabled=1" 40 | onboot: true 41 | state: new # This is to indicate if the virtual machine needs to be created or already exists in Proxmox. It needs to be manually definded. -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/pve/README.md: -------------------------------------------------------------------------------- 1 | # Variables File for the pve node group 2 | This document outlines the configuration variables used in Ansible roles designed for managing a Proxmox VE environment. These variables provide defaults and customizable options for various aspects of Proxmox management, including user setup, cluster configuration, and virtual machine deployments. 3 | 4 | ## Purpose 5 | 6 | This document serves as a central reference for understanding and customizing the configuration variables used across various Ansible roles for Proxmox VE. It provides a comprehensive overview of the available variables, their purpose, default values, and how they influence the behavior of the Proxmox management roles. By understanding these variables, you can: 7 | 8 | * Customize your Proxmox deployments to match specific environment requirements. 9 | * Easily adjust default settings for user creation, cluster setup, and VM configurations. 10 | * Gain a better understanding of the configuration options available within the Proxmox Ansible roles. 11 | * Use this document as a starting point for creating your own `group_vars/pve/vars.yml` or similar configuration files. 12 | 13 | 14 | ## Variables 15 | 16 | This section details the variables categorized by your functional area, as defined in the provided input. 17 | 18 | ### PROXMOX MANAGEMENT Variables 19 | 20 | * **ansible\_user** *(Optional)*: 21 | * Description: Base username for Ansible related users (SSH and API). This variable serves as a prefix for generating specific usernames for Ansible automation. 22 | * Default: `"{{ pve_ansible_user_ssh }}"` (which itself defaults to `pve_ansible_user_ssh` if not overridden further down) 23 | * Example: `"automation"` 24 | 25 | * **ansible\_ssh\_private\_key\_file** *(Optional)*: 26 | * Description: Path to the private SSH key file used for Ansible to connect to Proxmox hosts using the `ansible_user`. This variable should point to the private key corresponding to the public key deployed for the Ansible SSH user. 27 | * Default: `"{{ pve_ansible_ssh_private_key_file }}"` (effectively defaulting to itself if not set externally) 28 | * Example: `"~/.ssh/pve_ansible_id_rsa"` 29 | 30 | * **pve\_ansible\_user\_api** *(Optional)*: 31 | * Description: Base username for the Proxmox API user. This is used to construct the full API username. 32 | * Default: `"{{ pve_ansible_user }}_api"` (derived from `ansible_user`) 33 | * Example: `"proxmox_automation_api"` 34 | 35 | * **pve\_ansible\_user\_ssh** *(Optional)*: 36 | * Description: Base username for the Proxmox SSH user used by Ansible. This is used to construct the full SSH username. 37 | * Default: `"{{ pve_ansible_user }}_ssh"` (derived from `ansible_user`) 38 | * Example: `"proxmox_automation_ssh"` 39 | 40 | * **pve\_ansible\_user\_api\_realm** *(Optional)*: 41 | * Description: Full username for the Proxmox API user, including the realm. Typically uses the Proxmox PAM realm (`@pam`). 42 | * Default: `"{{ pve_ansible_user_api }}@pam"` (derived from `pve_ansible_user_api`) 43 | * Example: `"proxmox_automation_api@pam"` 44 | 45 | * **pve\_ansible\_token\_id** *(Optional)*: 46 | * Description: The ID for the Proxmox API token to be created for the `pve_ansible_user_api_realm`. 47 | * Default: `"{{ pve_ansible_user_api }}_token"` (derived from `pve_ansible_user_api`) 48 | * Example: `"proxmox_automation_api_token"` 49 | 50 | * **pve\_ansible\_group** *(Optional)*: 51 | * Description: Name of the Proxmox group to which the Ansible API user will be added. 52 | * Default: `"pve_ansible_group"` 53 | * Example: `"ProxmoxAutomation"` 54 | 55 | * **pve\_admin\_group** *(Optional)*: 56 | * Description: Name of the Proxmox group to be created for administrative users. 57 | * Default: `"pve_admin_group"` 58 | * Example: `"ProxmoxAdmins"` 59 | 60 | * **pve\_admin\_group\_role** *(Optional)*: 61 | * Description: Proxmox role assigned to both the Ansible user group and the Admin user group. Determines the level of access granted. 62 | * Default: `"Administrator"` 63 | * Example: `"PVEAdmin"` (If you have a custom role named `PVEAdmin`) 64 | 65 | * **pve\_ansible\_token\_privilege** *(Optional)*: 66 | * Description: *(This variable is defined but not directly used in the provided tasks. It likely intended to define the privilege level for the API token, but the roles might directly assign the `pve_admin_group_role`.)* Intended privilege level for the Proxmox API token. 67 | * Default: `"Administrator"` 68 | * Example: `"PVEVMAdmin"` (If you wanted to create a more restricted token) 69 | 70 | * **snippets\_storage** *(Optional)*: 71 | * Description: Proxmox storage to be used for storing snippets (Cloud-Init templates etc.). 72 | * Default: `"local"` 73 | * Example: `"local-lvm"` 74 | 75 | * **snippets\_path** *(Optional)*: 76 | * Description: Path on the Proxmox host where snippets are stored. 77 | * Default: `"/var/lib/vz/snippets/"` 78 | 79 | * **image\_storage** *(Optional)*: 80 | * Description: Proxmox storage to be used for storing VM images (ISOs, templates). 81 | * Default: `"local"` 82 | * Example: `"local-lvm"` 83 | 84 | * **vm\_storage** *(Optional)*: 85 | * Description: Proxmox storage to be used for creating virtual machine disks. 86 | * Default: `"local-lvm"` 87 | * Example: `"ssd-pool"` 88 | 89 | * **pve\_image\_path** *(Optional)*: 90 | * Description: Path on the Proxmox host where VM images are downloaded and stored. 91 | * Default: `"/var/lib/vz/images/0"` 92 | 93 | 94 | ### Cluster Config Variables 95 | 96 | * **pve\_cluster\_name** *(Optional)*: 97 | * Description: Name of the Proxmox cluster to be created or joined. 98 | * Example: `"production-cluster"` 99 | 100 | * **pve\_cluster\_join\_timeout** *(Optional)*: 101 | * Description: Timeout in seconds for joining a Proxmox cluster. 102 | * Default: `60` 103 | * Example: `120` 104 | 105 | * **pve\_network\_interface** *(Optional)*: 106 | * Description: *(This variable is defined but not directly used in the provided tasks. It is likely intended to represent the main network interface, but `pve_compute_bridge` is used for bridge creation.)* Intended network interface for Proxmox management. 107 | * Default: `"vmbr1"` 108 | * Example: `"eth0"` 109 | 110 | * **pve\_compute\_bridge** *(Optional)*: 111 | * Description: Name of the bridge interface to be created for compute networking (VMs). 112 | * Default: `"vmbr1"` 113 | * Example: `"vmbr0"` 114 | 115 | * **pve\_primary\_node** *(Optional)*: 116 | * Description: Hostname or IP address of the primary Proxmox node in the cluster. This should match a hostname in your Ansible inventory. 117 | * Default: `"pve1"` 118 | * Example: `"proxmox-master"` 119 | 120 | * **pve\_cluster\_conf** *(Optional)*: 121 | * Description: Path to the Proxmox cluster configuration file (Corosync configuration). 122 | * Default: `/etc/pve/corosync.conf` 123 | 124 | * **pve\_ssh\_ciphers** *(Optional)*: 125 | * Description: Comma-separated list of SSH ciphers to be used for SSH client configuration within the Proxmox cluster for enhanced security. 126 | * Default: `"aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com"` 127 | * Example: `"chacha20-poly1305@openssh.com,aes256-gcm@openssh.com"` (More restrictive cipher list) 128 | 129 | ### VIRTUAL MACHINES Variables 130 | 131 | * **vm\_config** *(Optional)*: 132 | * Description: This variable is dynamically populated by looking up YAML files in the `playbook_dir + '/group_vars/pve/vms/*.yml'` directory. It combines the content of all YAML files found in that directory into a single dictionary. This dictionary is expected to define the configuration for virtual machines to be managed by Ansible. The structure and content of these YAML files would be specific to the VM deployment roles. 133 | * Default: ` "{{ lookup('fileglob', playbook_dir + '/group_vars/pve/vms/*.yml', errors='ignore') | map('from_yaml') | list | combine }}"` (Dynamic lookup) 134 | * Example: This variable's value is dynamically generated; see example usage section for how to structure VM configuration files. 135 | 136 | ## Example Usage 137 | 138 | These variables are typically defined in a `group_vars/pve/vars.yml` file within your Ansible project to provide default configurations for your Proxmox environment. 139 | You can find an example in the `group_vars/pve/vars.yml` file -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/pve/vars.yml: -------------------------------------------------------------------------------- 1 | ######################## 2 | ## PROXMOX MANAGEMENT ## 3 | ######################## 4 | 5 | # default users 6 | ansible_user: "{{ pve_ansible_user_ssh }}" 7 | ansible_ssh_private_key_file: "{{ pve_ansible_ssh_private_key_file }}" 8 | 9 | # Define Ansible user and API Token variable 10 | pve_ansible_user_api: "{{ pve_ansible_user }}_api" # the pve_ansible_user variable is set in the vault, you can define it in this file if you want 11 | pve_ansible_user_ssh: "{{ pve_ansible_user }}_ssh" 12 | pve_ansible_user_api_realm: "{{ pve_ansible_user_api }}@pam" 13 | pve_ansible_token_id: "{{ pve_ansible_user_api }}_token" 14 | 15 | # Proxmox admin user information 16 | 17 | # Groups for Ansible User and Admin User 18 | pve_ansible_group: "pve_ansible_group" 19 | pve_admin_group: "pve_admin_group" 20 | pve_admin_group_role: "Administrator" 21 | 22 | # API Token 23 | pve_ansible_token_privilege: "Administrator" 24 | 25 | snippets_storage: local 26 | snippets_path: "/var/lib/vz/snippets/" 27 | image_storage: local 28 | vm_storage: local-lvm 29 | pve_image_path: "/var/lib/vz/images/0" 30 | 31 | 32 | 33 | ######################## 34 | ## Cluster Config ## 35 | ######################## 36 | pve_cluster_name: "my-proxmox-cluster" 37 | pve_cluster_join_timeout: 60 38 | 39 | # Proxmox network configuration (adjust for your setup) 40 | pve_network_interface: "vmbr1" 41 | pve_compute_bridge: "vmbr1" 42 | 43 | # Primary node in the cluster 44 | pve_primary_node: pve1 45 | 46 | # SSH-related settings 47 | pve_cluster_conf: /etc/pve/corosync.conf 48 | pve_ssh_ciphers: "aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com" 49 | 50 | ################### 51 | ## VIRTUAL MACHINES ## 52 | ################### 53 | vm_config: "{{ lookup('fileglob', playbook_dir + '/group_vars/pve/vms/*.yml', errors='ignore') | map('from_yaml') | list | combine }}" -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/pve/vms/docker-host-01.yml: -------------------------------------------------------------------------------- 1 | vm_config: 2 | docker-host-01: 3 | vm_image_name: noble-server-cloudimg-amd64.qcow2 4 | vm_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 5 | 6 | # Compute 7 | name: "docker-host-01" 8 | vmid: "1001" 9 | node: "pve1" 10 | memory: 4096 11 | cores: 4 12 | tags: 13 | - ubuntu 14 | - docker 15 | 16 | # Networking 17 | net: 18 | net0: "virtio,bridge=vmbr1" # This is the main interface and it'll have an ip address attached to it as defined below 19 | net1: "virtio,bridge=vnet12" # Other interface if needed. This is will most likely report to ens19 in the virtual machine 20 | net2: "virtio,bridge=vnet13" # ens20 21 | net3: "virtio,bridge=vnet16" # ens21 22 | ipconfig: 23 | # ipconfig0: "ip=dhcp" 24 | ipconfig0: "ip=10.0.11.11/24,gw=10.0.11.1" # You can manually define the IP address for ipconfig0 = net0. Make sure the interface allow this vlan 25 | 26 | # Cloud-init configuration 27 | citype: "nocloud" 28 | cicustom: "user=local:snippets/docker-host-01.yml" 29 | cloud_init_template: "docker-host.j2" 30 | cloud_init_template_name: "docker-host-01.yml" 31 | # Storage 32 | bootdisk: "scsi0" 33 | disk: "scsi0" 34 | size: "32G" 35 | scsihw: "virtio-scsi-pci" 36 | storage: "local-lvm" 37 | scsi: 38 | scsi0: "local-lvm:0,import-from=local:0/noble-server-cloudimg-amd64.qcow2,format=qcow2" 39 | ide: 40 | ide2: "local-lvm:cloudinit" 41 | 42 | # Other VM Configuration 43 | agent: "enabled=1" 44 | onboot: true 45 | state: new # Manually set to indicate that virtual machine already exists in the proxmox cluster/node and should not be created/stopped in some playbooks 46 | -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/pve/vms/docker-host-02.yml: -------------------------------------------------------------------------------- 1 | vm_config: 2 | docker-host-02: 3 | vm_image_name: noble-server-cloudimg-amd64.qcow2 4 | vm_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 5 | 6 | # Compute 7 | name: "docker-host-02" 8 | vmid: "1002" 9 | node: "pve1" 10 | memory: 4096 11 | cores: 4 12 | tags: 13 | - ubuntu 14 | - docker 15 | 16 | # Networking 17 | net: 18 | net0: "virtio,bridge=vmbr1" # This is the main interface and it'll have an ip address attached to it as defined below 19 | net1: "virtio,bridge=vnet12" # Other interface if needed. This is will most likely report to ens19 in the virtual machine 20 | net2: "virtio,bridge=vnet13" # ens20 21 | net3: "virtio,bridge=vnet16" # ens21 22 | ipconfig: 23 | # ipconfig0: "ip=dhcp" 24 | ipconfig0: "ip=10.0.11.12/24,gw=10.0.11.1" # You can manually define the IP address for ipconfig0 = net0. Make sure the interface allow this vlan 25 | 26 | # Cloud-init configuration 27 | citype: "nocloud" 28 | cicustom: "user=local:snippets/docker-host-02.yml" 29 | cloud_init_template: "docker-host.j2" 30 | cloud_init_template_name: "docker-host-02.yml" 31 | # Storage 32 | bootdisk: "scsi0" 33 | disk: "scsi0" 34 | size: "32G" 35 | scsihw: "virtio-scsi-pci" 36 | storage: "local-lvm" 37 | scsi: 38 | scsi0: "local-lvm:0,import-from=local:0/noble-server-cloudimg-amd64.qcow2,format=qcow2" 39 | ide: 40 | ide2: "local-lvm:cloudinit" 41 | 42 | # Other VM Configuration 43 | agent: "enabled=1" 44 | onboot: true 45 | state: new # Manually set to indicate that virtual machine already exists in the proxmox cluster/node and should not be created/stopped in some playbooks 46 | -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/pve/vms/docker-host-03.yml: -------------------------------------------------------------------------------- 1 | vm_config: 2 | docker-host-03: 3 | vm_image_name: noble-server-cloudimg-amd64.qcow2 4 | vm_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 5 | 6 | # Compute 7 | name: "docker-host-03" 8 | vmid: "1003" 9 | node: "pve2" 10 | memory: 4096 11 | cores: 4 12 | tags: 13 | - ubuntu 14 | - docker 15 | 16 | # Networking 17 | net: 18 | net0: "virtio,bridge=vmbr1" # This is the main interface and it'll have an ip address attached to it as defined below 19 | net1: "virtio,bridge=vnet12" # Other interface if needed. This is will most likely report to ens19 in the virtual machine 20 | net2: "virtio,bridge=vnet13" # ens20 21 | net3: "virtio,bridge=vnet16" # ens21 22 | ipconfig: 23 | # ipconfig0: "ip=dhcp" 24 | ipconfig0: "ip=10.0.11.13/24,gw=10.0.11.1" # You can manually define the IP address for ipconfig0 = net0. Make sure the interface allow this vlan 25 | 26 | # Cloud-init configuration 27 | citype: "nocloud" 28 | cicustom: "user=local:snippets/docker-host-03.yml" 29 | cloud_init_template: "docker-host.j2" 30 | cloud_init_template_name: "docker-host-03.yml" 31 | # Storage 32 | bootdisk: "scsi0" 33 | disk: "scsi0" 34 | size: "32G" 35 | scsihw: "virtio-scsi-pci" 36 | storage: "local-lvm" 37 | scsi: 38 | scsi0: "local-lvm:0,import-from=local:0/noble-server-cloudimg-amd64.qcow2,format=qcow2" 39 | ide: 40 | ide2: "local-lvm:cloudinit" 41 | 42 | # Other VM Configuration 43 | agent: "enabled=1" 44 | onboot: true 45 | state: new # Manually set to indicate that virtual machine already exists in the proxmox cluster/node and should not be created/stopped in some playbooks 46 | -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/pve/vms/docker-host-04.yml: -------------------------------------------------------------------------------- 1 | vm_config: 2 | docker-host-04: 3 | vm_image_name: noble-server-cloudimg-amd64.qcow2 4 | vm_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 5 | 6 | # Compute 7 | name: "docker-host-04" 8 | vmid: "1004" 9 | node: "pve2" 10 | memory: 4096 11 | cores: 4 12 | tags: 13 | - ubuntu 14 | - docker 15 | 16 | # Networking 17 | net: 18 | net0: "virtio,bridge=vmbr1" # This is the main interface and it'll have an ip address attached to it as defined below 19 | net1: "virtio,bridge=vnet12" # Other interface if needed. This is will most likely report to ens19 in the virtual machine 20 | net2: "virtio,bridge=vnet13" # ens20 21 | net3: "virtio,bridge=vnet16" # ens21 22 | ipconfig: 23 | # ipconfig0: "ip=dhcp" 24 | ipconfig0: "ip=10.0.11.14/24,gw=10.0.11.1" # You can manually define the IP address for ipconfig0 = net0. Make sure the interface allow this vlan 25 | 26 | # Cloud-init configuration 27 | citype: "nocloud" 28 | cicustom: "user=local:snippets/docker-host-04.yml" 29 | cloud_init_template: "docker-host.j2" 30 | cloud_init_template_name: "docker-host-04.yml" 31 | # Storage 32 | bootdisk: "scsi0" 33 | disk: "scsi0" 34 | size: "32G" 35 | scsihw: "virtio-scsi-pci" 36 | storage: "local-lvm" 37 | scsi: 38 | scsi0: "local-lvm:0,import-from=local:0/noble-server-cloudimg-amd64.qcow2,format=qcow2" 39 | ide: 40 | ide2: "local-lvm:cloudinit" 41 | 42 | # Other VM Configuration 43 | agent: "enabled=1" 44 | onboot: true 45 | state: new # Manually set to indicate that virtual machine already exists in the proxmox cluster/node and should not be created/stopped in some playbooks 46 | -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/pve/vms/docker-host-05.yml: -------------------------------------------------------------------------------- 1 | vm_config: 2 | docker-host-05: 3 | vm_image_name: noble-server-cloudimg-amd64.qcow2 4 | vm_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 5 | 6 | # Compute 7 | name: "docker-host-05" 8 | vmid: "1005" 9 | node: "pve3" 10 | memory: 4096 11 | cores: 4 12 | tags: 13 | - ubuntu 14 | - docker 15 | 16 | # Networking 17 | net: 18 | net0: "virtio,bridge=vmbr1" # This is the main interface and it'll have an ip address attached to it as defined below 19 | net1: "virtio,bridge=vnet12" # Other interface if needed. This is will most likely report to ens19 in the virtual machine 20 | net2: "virtio,bridge=vnet13" # ens20 21 | net3: "virtio,bridge=vnet16" # ens21 22 | ipconfig: 23 | # ipconfig0: "ip=dhcp" 24 | ipconfig0: "ip=10.0.11.15/24,gw=10.0.11.1" # You can manually define the IP address for ipconfig0 = net0. Make sure the interface allow this vlan 25 | 26 | # Cloud-init configuration 27 | citype: "nocloud" 28 | cicustom: "user=local:snippets/docker-host-05.yml" 29 | cloud_init_template: "docker-host.j2" 30 | cloud_init_template_name: "docker-host-05.yml" 31 | # Storage 32 | bootdisk: "scsi0" 33 | disk: "scsi0" 34 | size: "32G" 35 | scsihw: "virtio-scsi-pci" 36 | storage: "local-lvm" 37 | scsi: 38 | scsi0: "local-lvm:0,import-from=local:0/noble-server-cloudimg-amd64.qcow2,format=qcow2" 39 | ide: 40 | ide2: "local-lvm:cloudinit" 41 | 42 | # Other VM Configuration 43 | agent: "enabled=1" 44 | onboot: true 45 | state: new # Manually set to indicate that virtual machine already exists in the proxmox cluster/node and should not be created/stopped in some playbooks 46 | -------------------------------------------------------------------------------- /ansible_proxmox_management/group_vars/pve/vms/home-assistant.yml: -------------------------------------------------------------------------------- 1 | vm_config: 2 | home-assistant: 3 | vm_image_name: haos_ova-14.1.qcow2.xz 4 | vm_image_url: https://github.com/home-assistant/operating-system/releases/download/14.1/haos_ova-14.1.qcow2.xz 5 | # Compute 6 | name: "home-assistant" 7 | vmid: "1006" 8 | node: "pve3" 9 | memory: 4096 10 | cores: 4 11 | tags: 12 | - automation 13 | - home-assistant 14 | - iot 15 | # Networking 16 | net: 17 | net0: "virtio,bridge=vnet14" # the main vlan interface 18 | net1: "virtio,bridge=vnet17" # this interface can be connected to the same IOT/CCTV network so you need to play with firewall rules and keep the vlan totally closed 19 | ipconfig: 20 | # ipconfig0: "ip=dhcp" 21 | ipconfig0: "ip=10.0.14.10/28,gw=10.0.14.1" # for some reasons these values are not used and it'll default to DHCP 22 | ipconfig1: "ip=10.0.17.10/16,gw=10.0.17.1" # for some reasons these values are not used and it'll default to DHCP 23 | # Storage 24 | bootdisk: "scsi0" 25 | disk: "scsi0" 26 | size: "32G" 27 | scsihw: "virtio-scsi-pci" 28 | storage: "local-lvm" 29 | scsi: 30 | scsi0: "local-lvm:0,import-from=local:0/haos_ova-14.1.qcow2,format=qcow2" 31 | 32 | bios: ovmf # Use OVMF (EFI) 33 | efidisk0: # EFI disk configuration as the VM uses UEFI 34 | storage: local-lvm 35 | format: raw 36 | efitype: 4m 37 | pre_enrolled_keys: false 38 | 39 | # Other VM Configuration 40 | agent: "enabled=1" 41 | onboot: true 42 | state: new 43 | -------------------------------------------------------------------------------- /ansible_proxmox_management/inventory.yml: -------------------------------------------------------------------------------- 1 | pve: 2 | hosts: 3 | pve1: 4 | ansible_host: 10.0.10.11 5 | pve2: 6 | ansible_host: 10.0.10.12 7 | pve3: 8 | ansible_host: 10.0.10.13 9 | pve4: 10 | ansible_host: 10.0.10.14 11 | 12 | proxmox: 13 | hosts: 14 | pve10: 15 | ansible_host: 10.20.2.252 16 | pve11: 17 | ansible_host: 10.20.2.250 18 | pve12: 19 | ansible_host: 10.20.2.249 20 | -------------------------------------------------------------------------------- /ansible_proxmox_management/pve_configure_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure Proxmox Cluster 3 | hosts: pve 4 | become: true 5 | 6 | roles: 7 | - pve_create_cluster 8 | -------------------------------------------------------------------------------- /ansible_proxmox_management/pve_configure_network_bridge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure Proxmox Bridges 3 | hosts: pve 4 | 5 | roles: 6 | - pve_configure_bridges 7 | -------------------------------------------------------------------------------- /ansible_proxmox_management/pve_configure_sdn.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure SDN and VLAN on Proxmox 3 | hosts: pve 4 | become: true 5 | 6 | roles: 7 | - pve_sdn_vlan_setup 8 | -------------------------------------------------------------------------------- /ansible_proxmox_management/pve_create_virtual_machines.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create a virtual machine based on values in group vars 3 | hosts: pve 4 | become: true 5 | 6 | roles: 7 | - pve_download_image 8 | - pve_create_vm 9 | -------------------------------------------------------------------------------- /ansible_proxmox_management/pve_delete_vm.yaml: -------------------------------------------------------------------------------- 1 | - name: Delete virtual machines hosted on proxmox 2 | hosts: pve1 # Group of hosts. I put the primary_node for clusters 3 | roles: 4 | - pve_delete_vm 5 | vars: 6 | node: "pve1" # Ensure this matches your actual node name 7 | vmids: 8 | - 1500 9 | -------------------------------------------------------------------------------- /ansible_proxmox_management/pve_post_install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Proxmox post install configuration 3 | hosts: pve 4 | become: true 5 | vars: 6 | ansible_user: "{{ pve_root_user }}" 7 | ansible_password: "{{ pve_root_password }}" 8 | roles: 9 | - pve_post_install 10 | - pve_update_firewall 11 | - pve_add_users 12 | -------------------------------------------------------------------------------- /ansible_proxmox_management/pve_udpate_hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update the hosts 3 | hosts: pve 4 | become: true 5 | serial: 1 6 | roles: 7 | - pve_update_hosts 8 | -------------------------------------------------------------------------------- /ansible_proxmox_management/pve_vm_post_install_cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove cloud init related files and drives 3 | hosts: pve 4 | become: true 5 | roles: 6 | - pve_vm_post_install_cleanup 7 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_add_users/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: pve_add_users 2 | This Ansible role automates user and access management within a Proxmox VE environment. It configures SSH access, creates Proxmox API users for automation, sets up an administrative user, and optionally integrates with Authentik for OpenID Connect authentication. 3 | 4 | ## Purpose 5 | 6 | This role is designed to streamline and automate the initial user and access control setup for a Proxmox VE server or cluster. It addresses the need for: 7 | 8 | * **Enhanced Security:** Hardening SSH access by disabling password authentication and enforcing key-based login. 9 | * **Automated Access:** Creating dedicated users for Ansible automation with API access and appropriate permissions. 10 | * **Administrative User Management:** Setting up a dedicated administrative user group and user with strong credentials. 11 | * **Centralized Authentication (Optional):** Integrating Proxmox VE with Authentik for centralized user authentication via OpenID Connect. 12 | 13 | 14 | ## Tasks Performed 15 | 16 | 1. **Ansible API User Creation:** 17 | * Creates a dedicated user for Ansible API access within Proxmox. 18 | * Creates a dedicated Proxmox group for Ansible users. 19 | * Adds the Ansible API user to the designated group. 20 | * Assigns a specified role to the Ansible user group for API permissions. 21 | * Generates an API token for the Ansible API user and saves it to a local file on the Ansible controller (delegated to localhost for security). 22 | 23 | 2. **Admin User Creation (to avoid using root):** 24 | * Creates a dedicated Proxmox Admin user. 25 | * Creates a dedicated Proxmox Admin group. 26 | * Adds the Admin user to the Admin group. 27 | * Assigns a specified role to the Admin user group for full Admin permissions. 28 | 29 | 3. **Authentik Realm Integration (Optional):** 30 | * Checks if an Authentik realm already exists in Proxmox. 31 | * Creates an Authentik realm in Proxmox if it doesn't exist, enabling OpenID Connect authentication against an Authentik instance. 32 | * You can generate the variables for Authentik prior to have it installed or configured as, in Authentik, you can defined the Cliend ID and Secret manually. 33 | 4. **SSH Configuration:** 34 | * Ensures SSH key-based authentication is enabled for the root user. 35 | * Disables password-based authentication for SSH. 36 | * Restarts the SSH service to apply configuration changes. 37 | * Ensures the SSH service is enabled and running. 38 | ## Variables 39 | 40 | * **`pve_root_user`** (*Required*): The root username for Proxmox. Defined in `group_vars/all/vault`. 41 | * **`pve_root_ssh_public_key_file`** (*Required*): Path to the public SSH key file for the root user. Defined in `group_vars/all/vault`. 42 | * **`pve_ansible_user`** (*Required*): The username for the Ansible user. Defined in `group_vars/all/vault`. 43 | * **`pve_ansible_ssh_private_key_file`** (*Required*): Path to the private SSH key file for the Ansible user. Defined in `group_vars/all/vault`. 44 | * **`pve_ansible_ssh_public_key_file`** (*Required*): Path to the public SSH key file for the Ansible user. Defined in `group_vars/all/vault`. 45 | * **`pve_ssh_port`** (*Optional*): The SSH port for Proxmox. Defaults to 22. Defined in `group_vars/all/vault`. 46 | * **`api_token_file_path`** (*Required*): The path where the generated API token will be stored. Defined in `group_vars/all/vault`. 47 | * **`_pve_admin_user_realm`** (*Required*): Proxmox admin user with realm. Fetched from Vault. Defined in `roles/pve_add_users/tasks/fetch_from_vault.yml`. 48 | * **`_pve_admin_password`** (*Required*): Password for the Proxmox admin user. Fetched from Vault. Defined in `roles/pve_add_users/tasks/fetch_from_vault.yml`. 49 | * **`pve_authentik_client_secret`** (*Required*): Client secret for Authentik integration. Fetched from Vault. Defined in `roles/pve_add_users/tasks/fetch_from_vault.yml`. 50 | * **`pve_authentik_client_id`** (*Required*): Client ID for Authentik integration. Fetched from Vault. Defined in `roles/pve_add_users/tasks/fetch_from_vault.yml`. 51 | * **`authentik_issuer_url`** (*Required*): URL of the Authentik issuer. Defined in `group_vars/all/vars`. 52 | * **`pve_ansible_user_api`**, **`pve_ansible_user_ssh`**, **`pve_ansible_user_api_realm`**, **`pve_ansible_token_id`**: Variables derived from `pve_ansible_user` for managing the ansible user on proxmox. Defined in `group_vars/proxmox/vars.yml` and `group_vars/pve/vars.yml`. 53 | * **`pve_ansible_group`**, **`pve_admin_group`**, **`pve_admin_group_role`**: Variables to manage proxmox groups and roles. Defined in `group_vars/proxmox/vars.yml` and `group_vars/pve/vars.yml`. 54 | * **`pve_ansible_token_privilege`**: Privilege level for the Ansible user's API token. Defined in `group_vars/proxmox/vars.yml` and `group_vars/pve/vars.yml`. 55 | 56 | 57 | ## Important Notes 58 | 59 | * This role requires the `community.general` and `community.hashi_vault` collections. Ensure these are installed before running the role. 60 | * The role retrieves secrets from a HashiCorp Vault instance. Make sure Vault is configured and accessible, and that the required secrets are stored in the specified path. 61 | * SSH keys for both root and the Ansible user must exist before running the role. The paths to these keys are configured via variables. This role modifies the `/etc/ssh/sshd_config` file and restarts the SSH service. Ensure no other processes are managing SSH configuration simultaneously. 62 | * Review the firewall rules created by this role and adjust them according to your specific security needs. 63 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_add_users/tasks/fetch_from_vault.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Fetch secret from Vault 3 | community.hashi_vault.vault_kv2_get: 4 | url: "http://127.0.0.1:8200" 5 | path: proxmox 6 | engine_mount_point: ansible 7 | token_path: "/home/homelab" 8 | register: proxmox_vault 9 | delegate_to: localhost 10 | 11 | - name: Set secret value to variable 12 | ansible.builtin.set_fact: 13 | _pve_admin_user_realm: "{{ proxmox_vault.data.data.pve_admin_user_realm }}" 14 | _pve_admin_password: "{{ proxmox_vault.data.data.pve_admin_password }}" 15 | pve_authentik_client_secret: "{{ proxmox_vault.data.data.pve_authentik_client_secret }}" 16 | pve_authentik_client_id: "{{ proxmox_vault.data.data.pve_authentik_client_id }}" 17 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_add_users/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Fetch form Vault 3 | ansible.builtin.import_tasks: fetch_from_vault.yml 4 | 5 | - name: Create the Proxmox Ansible User 6 | ansible.builtin.import_tasks: pve_add_ansible_user.yml 7 | 8 | - name: Create the Proxmox Admin User 9 | ansible.builtin.import_tasks: pve_add_admin_user.yml 10 | 11 | - name: Create the Authentik Realm 12 | ansible.builtin.import_tasks: pve_create_authentik_realm.yml 13 | 14 | - name: Configure SSH on Proxmox 15 | ansible.builtin.import_tasks: ssh_configuration.yml 16 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_add_users/tasks/pve_add_admin_user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure the Proxmox admin group exists 3 | ansible.builtin.shell: 4 | cmd: | 5 | set -o pipefail 6 | if ! pveum group list | grep -q "{{ pve_admin_group }}"; then 7 | pveum groupadd {{ pve_admin_group }} -comment 'Admin User Group'; 8 | fi 9 | executable: /bin/bash 10 | register: admin_group_add_result 11 | changed_when: admin_group_add_result.stdout != "" 12 | failed_when: admin_group_add_result.rc != 0 13 | 14 | - name: Ensure the Proxmox admin user exists 15 | ansible.builtin.shell: 16 | cmd: | 17 | set -o pipefail 18 | if ! pveum user list | grep -q "{{ _pve_admin_user_realm }}"; then 19 | pveum useradd {{ _pve_admin_user_realm }} --password {{ _pve_admin_password }} -comment 'Admin User'; 20 | fi 21 | executable: /bin/bash 22 | register: admin_user_add_result 23 | changed_when: admin_user_add_result.stdout != "" 24 | failed_when: admin_user_add_result.rc != 0 25 | 26 | - name: Ensure the admin user is in the admin group 27 | ansible.builtin.shell: 28 | cmd: | 29 | set -o pipefail 30 | if ! pveum user list {{ _pve_admin_user_realm }} | grep -q "{{ pve_admin_group }}"; then 31 | pveum usermod {{ _pve_admin_user_realm }} -group {{ pve_admin_group }}; 32 | fi 33 | executable: /bin/bash 34 | register: admin_usermod_result 35 | changed_when: admin_usermod_result.stdout != "" 36 | failed_when: admin_usermod_result.rc != 0 37 | 38 | - name: Ensure the admin group is assigned to admin role 39 | ansible.builtin.shell: 40 | cmd: | 41 | set -o pipefail 42 | if ! pveum acl list | grep -q "{{ pve_admin_group }}"; then 43 | pveum aclmod / -group {{ pve_admin_group }} -role {{ pve_admin_group_role }}; 44 | fi 45 | executable: /bin/bash 46 | register: admin_aclmod_result 47 | changed_when: admin_aclmod_result.stdout != "" 48 | failed_when: admin_aclmod_result.rc != 0 49 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_add_users/tasks/pve_add_ansible_user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### Section 1: Add Ansible SSH User ### 3 | - name: Ensure Ansible SSH user exists 4 | ansible.builtin.user: 5 | name: "{{ pve_ansible_user_ssh }}" 6 | comment: "Ansible SSH User" 7 | shell: "/bin/bash" 8 | groups: "sudo" 9 | state: present 10 | 11 | - name: Ensure SSH key is present for {{ pve_ansible_user_ssh }} 12 | ansible.posix.authorized_key: 13 | user: "{{ pve_ansible_user_ssh }}" 14 | state: present 15 | key: "{{ lookup('file', pve_ansible_ssh_public_key_file) }}" 16 | 17 | - name: Add to sudoers {{ pve_ansible_user_ssh }} 18 | ansible.builtin.template: 19 | src: "templates/sudoers.j2" 20 | dest: "/etc/sudoers.d/{{ pve_ansible_user_ssh }}" 21 | mode: "0440" 22 | validate: "visudo -cf %s" # Ensure syntax validation before applying 23 | 24 | ### Section 2: Configure Proxmox API User ### 25 | - name: Ensure Proxmox API group exists 26 | ansible.builtin.shell: 27 | cmd: | 28 | set -o pipefail 29 | if ! pveum group list | grep -q "{{ pve_ansible_group }}"; then 30 | pveum groupadd {{ pve_ansible_group }} -comment 'Ansible Group'; 31 | fi 32 | executable: /bin/bash 33 | register: ansible_group_add_result 34 | changed_when: ansible_group_add_result.stdout != "" # Only mark as changed if the group was created 35 | failed_when: ansible_group_add_result.rc != 0 36 | 37 | - name: Ensure Proxmox API user exists 38 | ansible.builtin.shell: 39 | cmd: | 40 | set -o pipefail 41 | if ! pveum user list | grep -q "{{ pve_ansible_user_api_realm }}"; then 42 | pveum useradd {{ pve_ansible_user_api_realm }} -comment 'API user for VM deployment'; 43 | fi 44 | executable: /bin/bash 45 | register: ansible_user_add_result 46 | changed_when: ansible_user_add_result.stdout != "" # Only mark as changed if the user was created 47 | failed_when: ansible_user_add_result.rc != 0 48 | 49 | - name: Add Proxmox API user to group 50 | ansible.builtin.shell: 51 | cmd: | 52 | set -o pipefail 53 | if ! pveum user list {{ pve_ansible_user_api_realm }} | grep -q "{{ pve_ansible_group }}"; then 54 | pveum usermod {{ pve_ansible_user_api_realm }} -group {{ pve_ansible_group }}; 55 | fi 56 | executable: /bin/bash 57 | register: ansible_user_mod_result 58 | changed_when: ansible_user_mod_result.stdout != "" # Only mark as changed if the user was modified 59 | failed_when: ansible_user_mod_result.rc != 0 60 | 61 | - name: Assign group to role 62 | ansible.builtin.shell: 63 | cmd: | 64 | set -o pipefail 65 | if ! pveum acl list | grep -q "{{ pve_ansible_group }}"; then 66 | pveum aclmod / -group {{ pve_ansible_group }} -role {{ pve_admin_group_role }}; 67 | fi 68 | executable: /bin/bash 69 | register: ansible_acl_mod_result 70 | changed_when: ansible_acl_mod_result.stdout != "" # Only mark as changed if the acl was modified 71 | failed_when: ansible_acl_mod_result.rc != 0 72 | 73 | ### Section 3: Generate API Key ### 74 | - name: Check if token exists for the Proxmox API user 75 | ansible.builtin.shell: 76 | cmd: | 77 | set -o pipefail 78 | pveum user token list {{ pve_ansible_user_api_realm }} --output-format json | jq -r '.[] | select(.id=="{{ pve_ansible_token_id }}") | .id' 79 | executable: /bin/bash 80 | register: token_check_result 81 | changed_when: false 82 | failed_when: false 83 | 84 | - name: Delete existing token for the Proxmox API user if it exists 85 | ansible.builtin.command: 86 | cmd: "pveum user token delete {{ pve_ansible_user_api_realm }} {{ pve_ansible_token_id }}" 87 | when: token_check_result.stdout != "" 88 | register: token_delete_result 89 | changed_when: "'deleted' in token_delete_result.stderr or token_check_result.stdout != ''" 90 | 91 | - name: Generate API key for the Proxmox API user 92 | ansible.builtin.command: 93 | cmd: "pveum user token add {{ pve_ansible_user_api_realm }} {{ pve_ansible_token_id }} -privsep 0 --comment 'API token for ansible user'" 94 | register: api_token_output 95 | changed_when: "'Token already exists' not in api_token_output.stdout" 96 | failed_when: "'error' in api_token_output.stdout" 97 | 98 | - name: Save API token value to localhost file 99 | ansible.builtin.copy: 100 | content: "{{ api_token_output.stdout | regex_findall('[a-f0-9\\-]{36}') | first }}" 101 | dest: "/home/homelab/.ansible/{{ inventory_hostname }}-{{ pve_ansible_token_id }}" 102 | mode: "0644" 103 | force: true 104 | delegate_to: localhost 105 | when: api_token_output.stdout is not none and 106 | (api_token_output.stdout | regex_findall('[a-f0-9\\-]{36}') | length > 0) 107 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_add_users/tasks/pve_create_authentik_realm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # roles/pve_add_users/taks/pve_create_authentik_realm.yml 3 | - name: Check if Authentik realm exists 4 | ansible.builtin.command: 5 | cmd: "pveum realm list" 6 | register: realm_list 7 | changed_when: false 8 | failed_when: realm_list.rc != 0 9 | 10 | - name: Ensure Authentik realm exists in Proxmox 11 | ansible.builtin.command: 12 | cmd: > 13 | pveum realm add authentik 14 | --type openid 15 | --client-id "{{ pve_authentik_client_id }}" 16 | --client-key "{{ pve_authentik_client_secret }}" 17 | --issuer-url "{{ authentik_issuer_url }}" 18 | --username-claim username 19 | --autocreate 1 20 | when: "'authentik' not in realm_list.stdout" 21 | register: realm_add_result 22 | changed_when: "'already exists' not in realm_add_result.stderr" 23 | failed_when: realm_add_result.rc != 0 and 'already exists' not in realm_add_result.stderr 24 | 25 | - name: Confirm Authentik realm creation 26 | ansible.builtin.debug: 27 | msg: "Authentik realm created successfully or already exists." 28 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_add_users/tasks/ssh_configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure SSH key is present for {{ pve_root_user }} 3 | ansible.posix.authorized_key: 4 | user: "{{ pve_root_user }}" 5 | state: present 6 | key: "{{ lookup('file', pve_root_ssh_public_key_file) }}" 7 | 8 | - name: Deactivate PasswordAuthentication 9 | ansible.builtin.lineinfile: 10 | path: "/etc/ssh/sshd_config" 11 | line: "PasswordAuthentication yes" 12 | state: absent 13 | 14 | - name: Ensure PasswordAuthentication is not activated 15 | ansible.builtin.lineinfile: 16 | path: "/etc/ssh/sshd_config" 17 | line: "PasswordAuthentication no" 18 | state: present 19 | 20 | - name: Restart SSH 21 | ansible.builtin.service: 22 | name: sshd 23 | state: restarted 24 | 25 | - name: Ensure SSH service is enabled and running 26 | ansible.builtin.service: 27 | name: sshd 28 | state: started 29 | enabled: true 30 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_add_users/templates/sudoers.j2: -------------------------------------------------------------------------------- 1 | {{ pve_ansible_user_ssh }} ALL=(ALL) NOPASSWD:ALL -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_configure_bridges/tasks/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: pve_configure_bridges 2 | 3 | This Ansible role automates the creation of a network bridge on a Proxmox VE node using `pvesh`. It intelligently selects a suitable physical interface to bridge, prioritizing interfaces that are currently down, and ensures the bridge is created with the desired name. 4 | 5 | ## Purpose 6 | 7 | The purpose of this role is to simplify and automate the creation of network bridges in a Proxmox VE environment. It addresses the need for a consistent and repeatable way to configure bridges, particularly when setting up virtualized networks or preparing Proxmox hosts for specific networking configurations. By using this role, you can: 8 | 9 | * Automate the creation of network bridges, reducing manual command execution and potential errors. 10 | * Dynamically select a suitable physical interface for bridging, adapting to different server configurations. 11 | * Ensure bridge creation is idempotent, preventing unintended changes on subsequent role executions. 12 | * Quickly provision Proxmox nodes with necessary bridge interfaces for virtual machine networking or SDN setups. 13 | 14 | This role is beneficial in scenarios where you need to programmatically create bridges as part of your Proxmox infrastructure provisioning or when you want a more robust and automated approach to bridge configuration than manual steps. 15 | 16 | I use this role to create a bridge `vmbr1` for the traffic coming from the virtual machine to separate it from the traffic for proxmox. This checks for interfaces that are not used already, if you have multiple network interfaces that is perfect. In the case you do not have a second interface, I create manually on proxmox an interface `vmbr1` based on the default interface, in many cases it could be eno1 or enp0s18, and use the bridge port: `eno1.1` or `enp0s18.1` . While not ideal or perfect, it kinda works. 17 | 18 | ## Variables 19 | 20 | * **`pve_compute_bridge`** (*Required*): The name of the bridge interface to be created. Defined in `group_vars/proxmox/vars.yml` and `group_vars/pve/vars.yml`. 21 | * **`down_interface`**: The first available network interface that is currently down. Automatically determined. Defined in `roles/pve_configure_bridges/vars/main.yml`. 22 | * **`up_interface`**: The first available network interface that is currently up (used as a fallback if no down interface is found). Automatically determined. Defined in `roles/pve_configure_bridges/vars/main.yml`. 23 | 24 | ## Important Notes 25 | 26 | * This role assumes that you have a network interface available other than loopback, existing bridges, and wireless devices. 27 | * The role will fail if no suitable interface is found for creating the bridge. 28 | * Bridge configuration is done using `pvesh`. Ensure your Proxmox environment is set up correctly for using pvesh. 29 | 30 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_configure_bridges/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set bridge interface name {{ pve_compute_bridge }} 3 | ansible.builtin.set_fact: 4 | iface_name: {{ pve_compute_bridge }} 5 | 6 | - name: Get existing network interfaces (excluding loopback, existing bridges, and wireless devices) 7 | ansible.builtin.shell: ip -br link show | awk '$3 != "lo" && $2 !~ /@/ && !/vmbr/ && !/wlan/ && !/wlp/ && !/wl*/ {print $1,$2}' 8 | register: interfaces_with_state 9 | changed_when: false 10 | 11 | - name: Find a suitable interface (preferably DOWN) 12 | ansible.builtin.set_fact: 13 | # bridge_interface: "{{ (interfaces_with_state.stdout_lines | select('search', ' DOWN$') | map('split', ' ') | map(attribute=0) | list | first) | default((interfaces_with_state.stdout_lines | select('search', ' UP$') | map('split', ' ') | map(attribute=0) | list | first)) }}" # select down first and if none select up 14 | bridge_interface: "{{ down_interface | default(up_interface) }}" 15 | when: interfaces_with_state.stdout_lines | length > 0 16 | 17 | - name: Fail if no suitable interface is found 18 | ansible.builtin.fail: 19 | msg: "No suitable network interface found to create the bridge. Ensure an interface other than loopback, existing bridges and wireless is available." 20 | when: bridge_interface is not defined 21 | 22 | - name: Check if bridge already exists for {{ iface_name }} 23 | ansible.builtin.command: pvesh get /nodes/{{ inventory_hostname }}/network/{{ iface_name }} 24 | register: bridge_exists 25 | changed_when: false 26 | ignore_errors: true 27 | 28 | - name: Create bridge using pvesh for {{ iface_name }} 29 | ansible.builtin.shell: pvesh create /nodes/{{ inventory_hostname }}/network \ 30 | --iface {{ iface_name }} \ 31 | --type bridge \ 32 | --autostart 1 \ 33 | --bridge_ports {{ bridge_interface }} 34 | become: true 35 | changed_when: true 36 | when: bridge_exists.rc != 0 37 | register: pvesh_result 38 | ignore_errors: true 39 | 40 | - name: Display bridge configuration 41 | ansible.builtin.command: pvesh set /nodes/{{ inventory_hostname }}/network/ 42 | register: bridge_config 43 | become: true 44 | changed_when: false 45 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_configure_bridges/vars/main.yml: -------------------------------------------------------------------------------- 1 | down_interface: "{{ interfaces_with_state.stdout_lines | select('search', ' DOWN$') | map('split', ' ') | map(attribute=0) | list | first }}" 2 | up_interface: "{{ interfaces_with_state.stdout_lines | select('search', ' UP$') | map('split', ' ') | map(attribute=0) | list | first }}" -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_cluster/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: pve_create_cluster 2 | 3 | This Ansible role automates the setup of a Proxmox VE cluster. It handles cluster initialization on the primary node, joining nodes to the cluster, and configuring SSH access for cluster communication. 4 | 5 | ## Purpose 6 | 7 | This role simplifies the process of creating and expanding a Proxmox VE cluster. It automates the steps necessary to form a functional cluster, including: 8 | 9 | * **Cluster Initialization:** Automatically initializes a new Proxmox cluster on the designated primary node. 10 | * **Node Joining:** Streamlines the process of adding additional Proxmox nodes to the existing cluster. 11 | * **SSH Configuration:** Configures SSH keys and settings to facilitate secure and automated communication within the cluster. 12 | * **Consistency and Repeatability:** Ensures a consistent cluster setup across all nodes, reducing manual errors and simplifying management. 13 | 14 | This create a cluster using the primary_node variable, you can run it again to have a new node join the existing cluster with the same primary node. 15 | 16 | ## Tasks Performed 17 | 18 | 1. **Cluster Pre-checks:** Verifies if a Corosync configuration exists and checks for existing cluster membership. 19 | 2. **Cluster Identification:** Identifies if the host is already part of a cluster and retrieves cluster information. 20 | 3. **Cluster Name Validation:** Ensures that if a cluster is found, its name matches the expected cluster name. 21 | 4. **Cluster Initialization (Primary Node):** Initializes a new Proxmox cluster on the designated primary node if no cluster exists. 22 | 5. **Quorum Verification (Primary Node):** Waits for quorum to be established on the primary node after cluster initialization. 23 | 6. **SSH Key Generation and Distribution:** Creates SSH key pairs for the root user on each node and distributes public keys to authorized keys for passwordless SSH access within the cluster. 24 | 7. **SSH Client Configuration:** Configures SSH client settings for seamless communication between cluster nodes. 25 | 8. **SSH Server Configuration:** Configures SSH server settings to allow root logins from cluster hosts (passwordless, key-based). 26 | 9. **Node Joining (Non-Primary Nodes):** Adds non-primary nodes to the Proxmox cluster, leveraging SSH for secure joining. 27 | 10. **Temporary SSH Host Key Handling (Node Joining):** Temporarily adds the primary node's SSH host key to the `known_hosts` file during the join process and removes it afterwards. 28 | 29 | ## Variables 30 | 31 | * **pve\_cluster\_name** *(Required)*: 32 | * Description: The desired name for the Proxmox cluster. This name will be used when creating a new cluster or validating against an existing one. 33 | * Example: `"my-proxmox-cluster"` 34 | 35 | * **pve\_primary\_node** *(Required)*: 36 | * Description: The hostname or IP address of the designated primary Proxmox node. This node will be used for cluster initialization and as the join point for other nodes. This should correspond to a host defined in your Ansible inventory. 37 | * Example: `"proxmox01"` 38 | 39 | * **pve\_cluster\_conf** *(Required)*: 40 | * Description: The path to the Proxmox cluster configuration file. This is used to check if a cluster already exists. 41 | * Default: `"/etc/pve/corosync.conf"` 42 | 43 | * **pve\_ssh\_port** *(Optional)*: 44 | * Description: The SSH port used for Proxmox nodes. Defaults to standard SSH port 22. 45 | * Default: `"22"` 46 | * Example: `"2222"` 47 | 48 | * **pve\_ssh\_ciphers** *(Optional)*: 49 | * Description: A string defining the SSH ciphers to be used in the SSH client configuration for better security. Defaults to a set of recommended ciphers. 50 | * Default: `"chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes128-ctr"` 51 | 52 | ## Important Notes 53 | 54 | * This role requires root access to the PVE nodes. 55 | * Ensure that all PVE nodes have network connectivity to each other before running this role. 56 | * The role modifies SSH configuration files and restarts the SSH service. Be cautious if you are manually managing SSH configurations. This role uses the `pvecm` command to manage the cluster. Ensure this command is available on your Proxmox nodes. -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_cluster/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reload ssh server configuration 3 | ansible.builtin.systemd: 4 | name: ssh.service 5 | state: reloaded 6 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_cluster/tasks/configure_ssh.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create SSH directory for root 3 | ansible.builtin.file: 4 | path: /root/.ssh/ 5 | state: directory 6 | mode: "0700" 7 | 8 | - name: Create root SSH key pair for PVE 9 | ansible.builtin.user: 10 | name: root 11 | generate_ssh_key: true 12 | ssh_key_bits: 521 13 | ssh_key_file: /root/.ssh/id_ed25519 14 | ssh_key_type: ed25519 15 | ssh_key_comment: "root@{{ inventory_hostname_short }}" 16 | 17 | - name: Fetch root SSH public key 18 | ansible.builtin.slurp: 19 | src: /root/.ssh/id_ed25519.pub 20 | register: _pve_root_ssh_pubkey 21 | 22 | - name: Authorize all hosts' root SSH public keys 23 | ansible.posix.authorized_key: 24 | user: root 25 | key: "{{ hostvars[item]._pve_root_ssh_pubkey.content | b64decode }}" 26 | with_items: "{{ ansible_play_hosts }}" 27 | 28 | - name: Configure SSH clients for connecting to PVE cluster hosts 29 | ansible.builtin.blockinfile: 30 | dest: /etc/ssh/ssh_config 31 | create: true 32 | mode: "0644" 33 | marker: "# {mark}: PVE host configuration options (managed by ansible)." 34 | content: | 35 | {% for host in ansible_play_hosts %} 36 | Host {{ hostvars[host].ansible_host | join(" ") }} 37 | IdentityFile /root/.ssh/id_ed25519 38 | Port {{ pve_ssh_port }} 39 | {% endfor %} 40 | 41 | - name: Allow root logins from PVE cluster hosts 42 | ansible.builtin.blockinfile: 43 | dest: /etc/ssh/sshd_config 44 | marker: "# {mark}: Allow root logins from PVE hosts (managed by ansible)." 45 | content: | 46 | {% for host in ansible_play_hosts %} 47 | Match Address {{ hostvars[host].ansible_host | join(",") }} 48 | PermitRootLogin prohibit-password 49 | {% endfor %} 50 | validate: "/usr/sbin/sshd -t -f %s" 51 | notify: 52 | - Reload ssh server configuration 53 | 54 | - name: Enable and start SSH server 55 | ansible.builtin.systemd: 56 | name: ssh.service 57 | enabled: true 58 | state: started 59 | 60 | - name: Fetch a SSH public key to use for cluster joins 61 | ansible.builtin.slurp: 62 | src: "/etc/ssh/ssh_host_ed25519_key.pub" 63 | register: _pve_ssh_public_key 64 | 65 | - name: Add PVE-provided ciphers to SSH client config 66 | ansible.builtin.lineinfile: 67 | line: "Ciphers {{ pve_ssh_ciphers }}" 68 | regexp: "^Ciphers .*" 69 | insertbefore: BOF 70 | create: true 71 | mode: "0644" 72 | dest: /root/.ssh/config 73 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_cluster/tasks/join_cluster.yml: -------------------------------------------------------------------------------- 1 | # --- 2 | --- 3 | - name: Join_cluster_init 4 | when: inventory_hostname != pve_primary_node 5 | block: 6 | - name: Identify the SSH public key and SSH addresses of initial cluster host 7 | ansible.builtin.set_fact: 8 | _pve_cluster_host_key: "{{ ' '.join((hostvars[pve_primary_node]._pve_ssh_public_key.content | b64decode).split()[:-1]) }}" 9 | _pve_cluster_host_addresses: "{{ hostvars[pve_primary_node].ansible_host }}" 10 | 11 | - name: Temporarily mark that cluster host as known in root user's known_hosts 12 | ansible.builtin.blockinfile: 13 | dest: /root/.ssh/known_hosts 14 | create: true 15 | mode: "0600" 16 | marker: "# {mark}: cluster host key for joining" 17 | content: "{{ _pve_cluster_host_addresses }} {{ _pve_cluster_host_key }}" 18 | 19 | # Step 4: Add node to Proxmox cluster 20 | - name: Add node to Proxmox cluster 21 | ansible.builtin.command: pvecm add {{ hostvars[pve_primary_node].ansible_host }} -use_ssh 22 | when: inventory_hostname != pve_primary_node 23 | throttle: 1 24 | args: 25 | creates: "{{ pve_cluster_conf }}" 26 | 27 | - name: Remove the cluster host's public key from root user's known_hosts 28 | ansible.builtin.blockinfile: 29 | dest: /root/.ssh/known_hosts 30 | state: absent 31 | mode: "0600" 32 | marker: "# {mark}: cluster host key for joining" 33 | when: inventory_hostname != pve_primary_node 34 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if Corosync config exists 3 | ansible.builtin.stat: 4 | path: /etc/pve/corosync.conf 5 | register: corosync_stat 6 | 7 | - name: Lookup cluster information 8 | ansible.builtin.command: pvecm status 9 | register: _pvecm_status 10 | when: corosync_stat.stat.exists 11 | ignore_errors: true 12 | changed_when: false 13 | 14 | - name: Identify if the host is already part of a cluster 15 | ansible.builtin.set_fact: 16 | _proxmox_cluster_name: pvecm_output.stdout is regex_search('^Name:\s*(.*)', '\\1') 17 | when: "_pvecm_status.rc | default('') == 0" 18 | 19 | - name: Identify all clusters that the hosts may be in 20 | ansible.builtin.set_fact: 21 | _pve_found_clusters: "{{ _pve_found_clusters | default([]) + [_pve_active_cluster] }}" 22 | when: _pve_active_cluster is defined 23 | 24 | - name: Ensure that hosts found are not in multiple existing clusters 25 | ansible.builtin.assert: 26 | that: 27 | - "_pve_found_clusters | default([]) | length <= 1" 28 | msg: "Some or all of the hosts are part of multiple clusters. Please ensure hosts are only part of one cluster." 29 | 30 | - name: Ensure the found cluster matches the specified cluster name 31 | ansible.builtin.assert: 32 | that: 33 | - "_pve_found_clusters | length > 0" 34 | - "_pve_found_clusters[0] == pve_cluster_name" 35 | msg: "Some or all of the hosts in the inventory, specifically '{{ inventory_hostname }}', appear to be in a cluster named \ 36 | '{{ _pve_found_clusters | default([]) | first }}', which differs from the specified clustername of \ 37 | '{{ pve_cluster_name }}'. Please ensure the clustername is correct. An existing cluster's name cannot be modified." 38 | when: "_pve_found_clusters is defined and (_pve_found_clusters | length > 0)" 39 | 40 | - name: Default initialization node to the primary node 41 | ansible.builtin.set_fact: 42 | _init_node: "{{ _init_node | default(groups['all'][0]) }}" 43 | when: inventory_hostname == pve_primary_node 44 | 45 | 46 | - name: Initialize the Proxmox cluster if no cluster is found 47 | ansible.builtin.command: > 48 | pvecm create {{ pve_cluster_name }} 49 | args: 50 | creates: "{{ pve_cluster_conf }}" 51 | when: 52 | - "_pve_found_clusters is not defined" 53 | - "inventory_hostname == pve_primary_node" 54 | 55 | - name: Wait for quorum on primary node 56 | ansible.builtin.command: pvecm status 57 | register: _pvecm_quorum_status 58 | when: 59 | - "_pve_found_clusters is not defined" 60 | - "inventory_hostname == pve_primary_node" 61 | retries: 5 62 | delay: 10 63 | until: "_pvecm_quorum_status.stdout | regex_search('Quorate\\s*:\\s*Yes')" 64 | changed_when: "_pvecm_quorum_status.stdout | regex_search('Quorate\\s*:\\s*Yes') is not defined" 65 | 66 | - name: Configure SSH 67 | ansible.builtin.include_tasks: configure_ssh.yml 68 | 69 | - name: Join Cluster 70 | ansible.builtin.include_tasks: join_cluster.yml 71 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_vm/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: pve_create_vm 2 | 3 | This Ansible role automates the creation and initial configuration of virtual machines within a Proxmox VE environment. It handles uploading Cloud-Init user-data, creating the VM, resizing disks, and starting the newly created virtual machines. 4 | 5 | ## Purpose 6 | 7 | This role simplifies and automates the deployment of virtual machines on Proxmox VE. It is designed to be used in scenarios where you need to programmatically create and configure multiple VMs based on a defined configuration. The role aims to: 8 | 9 | * Streamline VM creation by handling Cloud-Init configuration and VM provisioning in a single Ansible role. 10 | * Provide a consistent and repeatable method for deploying VMs, reducing manual configuration errors. 11 | * Automate common post-creation tasks such as disk resizing and VM startup. 12 | * Integrate with HashiCorp Vault for secure secret management (optional, for fetching API tokens and admin passwords). 13 | 14 | 15 | ## Tasks Performed 16 | 17 | 1. Fetch secrets from HashiCorp Vault (if configured, for API credentials and VM admin passwords - although password handling is not directly shown in these tasks, vault fetching is). 18 | 2. Upload Cloud-Init user-data templates to Proxmox snippets, preparing customization for the VMs. 19 | 3. Create the virtual machines in Proxmox VE based on the provided configuration. 20 | 4. Resize the virtual machine disks after creation, potentially based on Cloud-Init needs. 21 | 5. Start the newly created virtual machines. 22 | 23 | ## Variables 24 | 25 | * **`api_token_file_path`** (*Required*): Path to the Ansible user's API token for Proxmox. Defined in `group_vars/all/vault`. 26 | * **`pve_primary_node`** (*Required*): The hostname or IP address of the primary Proxmox node. Defined in `group_vars/proxmox/vars.yml` and `group_vars/pve/vars.yml`. 27 | * **`vm_config`** (*Required*): A dictionary defining the configuration for each VM. This is loaded from YAML files in `group_vars/pve/vms/` and/or `group_vars/proxmox/vms/`. 28 | * **Example VM Configuration:** Defined in the respective `group_vars` files. The following are common parameters: 29 | * **`name`**: Name of the VM. 30 | * **`vmid`**: Proxmox VM ID. 31 | * **`node`**: Proxmox node to create the VM on. 32 | * **`memory`**: Memory allocated to the VM. 33 | * **`cores`**: Number of cores allocated to the VM. 34 | * **`net`**: Network configuration for the VM. 35 | * **`ipconfig`**: IP configuration for the VM. 36 | * **`citype`**: Cloud-init configuration type. 37 | * **`cicustom`**: Custom cloud-init parameters. 38 | * **`cloud_init_template`**: The cloud-init template file. 39 | * **`cloud_init_template_name`**: Name of the cloud-init file in proxmox snippets. 40 | * **`state`**: Desired state of the VM. Either `new` or `present`. 41 | * **`vm_storage`** (*Required*): Storage location for VMs. Defined in `group_vars/proxmox/vars.yml` and `group_vars/pve/vars.yml`. 42 | * **`dns_servers`** (*Required*): DNS servers for VMs. Defined in `group_vars/all/vars`. 43 | * **`_vm_admin_user`** (*Required*): Admin username for the VMs. Fetched from Vault. 44 | * **`_vm_admin_password_hashed`** (*Required*): Hashed password for the VM admin user. Fetched from Vault. 45 | * **`domain`** (*Required*): Domain name used in VM configuration. Defined in `group_vars/all/vars`. 46 | * **`vm_admin_ssh_public_key_file`** (*Required*): Path to the VM admin's public SSH key. Defined in `group_vars/all/vault`. 47 | * **`vm_ansible_user`** (*Required*): Ansible user for the VMs. Defined in `group_vars/all/vault`. 48 | * **`vm_ansible_ssh_public_key_file`** (*Required*): Path to the Ansible user's public SSH key for the VMs. Defined in `group_vars/all/vault`. 49 | 50 | 51 | ## Important Notes 52 | 53 | * This role depends on the `community.general` Ansible collection and HashiCorp Vault. 54 | * Ensure the required cloud-init template file exists in the `templates` directory. 55 | * The role assumes that the specified Proxmox node and storage are available. 56 | * Review and adjust the VM configuration parameters in the `group_vars` files to match your requirements. 57 | * The variable `state` controls whether to create a new VM or modify an existing VM. When set to `new`, a new VM is created. When set to `present`, the role ensures the VM exists. This is defined in your `vm_config`. 58 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_vm/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Sleep 2 | ansible.builtin.wait_for: 3 | timeout: 5 # Wait for 5 seconds (you can adjust this as needed) 4 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_vm/tasks/create_vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create virtual machine in Proxmox 3 | community.general.proxmox_kvm: 4 | # Proxmox API configuration 5 | api_user: "{{ pve_ansible_user_api_realm }}" 6 | api_token_id: "{{ pve_ansible_token_id }}" 7 | api_token_secret: "{{ lookup('file', api_token_file_path) }}" 8 | api_host: "{{ inventory_hostname }}" 9 | 10 | # Virtual machine configuration 11 | autostart: "{{ item.value.autostart | default(true) }}" 12 | vmid: "{{ item.value.vmid }}" 13 | name: "{{ item.key }}" 14 | node: "{{ item.value.node }}" 15 | memory: "{{ item.value.memory }}" 16 | cores: "{{ item.value.cores }}" 17 | tags: "{{ item.value.tags | default(omit) }}" 18 | bootdisk: "{{ item.value.bootdisk }}" 19 | scsihw: "{{ item.value.scsihw }}" 20 | storage: "{{ vm_storage }}" 21 | searchdomains: "{{ dns_servers }}" 22 | nameservers: "{{ dns_servers }}" 23 | net: "{{ item.value.net }}" 24 | ipconfig: "{{ item.value.ipconfig }}" 25 | 26 | # Cloud-init configuration 27 | citype: "{{ item.value.citype | default(omit) }}" 28 | cicustom: "{{ item.value.cicustom | default(omit) }}" 29 | cloud_init_template: "{{ item.value.cloud_init_template | default(omit) }}" 30 | 31 | efidisk0: "{{ item.value.efidisk0 | default(omit) }}" 32 | bios: "{{ item.value.bios | default('seabios') }}" 33 | ostype: "{{ item.balue.ostype | default('l26') }}" 34 | scsi: "{{ item.value.scsi }}" 35 | ide: "{{ item.value.ide | default(omit) }}" 36 | agent: "enabled=1" 37 | onboot: "{{ item.value.onboot | default('true') }}" 38 | state: present 39 | timeout: 30 # Timeout in seconds 40 | loop: "{{ vm_config | dict2items }}" 41 | loop_control: 42 | pause: 5 43 | label: "{{ item.key }}" 44 | register: created_pve_vm 45 | delegate_to: "{{ ansible_play_batch[0] }}" 46 | when: inventory_hostname == ansible_play_batch[0] and item.value.state == "new" 47 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_vm/tasks/fetch_from_vault.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Fetch secret from Vault 3 | community.hashi_vault.vault_kv2_get: 4 | url: "http://127.0.0.1:8200" 5 | path: proxmox 6 | engine_mount_point: ansible 7 | token_path: "/home/homelab" 8 | register: proxmox_vault 9 | delegate_to: localhost 10 | 11 | - name: Set secret value to variable 12 | ansible.builtin.set_fact: 13 | _vm_admin_user: "{{ proxmox_vault.data.data.vm_admin_user }}" 14 | _vm_admin_password_hashed: "{{ proxmox_vault.data.data.vm_admin_password_hashed }}" 15 | 16 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_vm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Fetch form Vault 3 | ansible.builtin.import_tasks: fetch_from_vault.yml 4 | 5 | - name: Upload Cloud Init File to Proxmox snippet 6 | ansible.builtin.import_tasks: upload_cloud_init_file.yml 7 | 8 | - name: Create Virtual machine based on playbook variables 9 | ansible.builtin.import_tasks: create_vm.yml 10 | 11 | - name: Modify disk size following cloud init install 12 | ansible.builtin.import_tasks: update_disk_size.yml 13 | 14 | - name: Start virtual machine 15 | ansible.builtin.import_tasks: start_vm.yml 16 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_vm/tasks/start_vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Start VMs 3 | - name: Start VMs 4 | community.general.proxmox_kvm: 5 | api_user: "{{ pve_ansible_user_api_realm }}" 6 | api_token_id: "{{ pve_ansible_token_id }}" 7 | api_token_secret: "{{ lookup('file', api_token_file_path) }}" 8 | api_host: "{{ inventory_hostname }}" 9 | 10 | # node: '{{item.node}}' 11 | vmid: "{{ item.value.vmid }}" 12 | name: "{{ item.key }}" 13 | state: "started" 14 | loop: "{{ vm_config | dict2items }}" 15 | loop_control: 16 | pause: 5 17 | label: "{{ item.key }}" 18 | delegate_to: "{{ ansible_play_batch[0] }}" 19 | when: created_pve_vm is succeeded and inventory_hostname == ansible_play_batch[0] and item.value.state == "new" 20 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_vm/tasks/update_disk_size.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update existing disk 3 | community.general.proxmox_disk: 4 | # ansible proxmox api variables 5 | api_user: "{{ pve_ansible_user_api_realm }}" 6 | api_token_id: "{{ pve_ansible_token_id }}" 7 | api_token_secret: "{{ lookup('file', api_token_file_path) }}" 8 | api_host: "{{ inventory_hostname }}" 9 | 10 | vmid: "{{ item.value.vmid }}" 11 | disk: "{{ item.value.disk }}" 12 | size: "{{ item.value.size }}" 13 | 14 | state: "resized" 15 | timeout: 60 16 | loop: "{{ vm_config | dict2items }}" 17 | loop_control: 18 | pause: 5 19 | label: "{{ item.key }}" 20 | delegate_to: "{{ ansible_play_batch[0] }}" 21 | when: created_pve_vm is succeeded and inventory_hostname == ansible_play_batch[0] and item.value.state == "new" 22 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_vm/tasks/upload_cloud_init_file.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Upload Cloud-Init user-data 3 | become: true 4 | ansible.builtin.template: 5 | src: "{{ item.value.cloud_init_template }}" 6 | dest: "{{ snippets_path }}{{ item.value.cloud_init_template_name }}" 7 | mode: "0644" 8 | loop: "{{ vm_config | dict2items }}" 9 | loop_control: 10 | label: "{{ item.key }}" 11 | check_mode: false 12 | register: template_result 13 | when: 14 | - item.value.cloud_init_template is defined 15 | - item.value.cloud_init_template_name is defined 16 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_create_vm/templates/docker-host.j2: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | hostname: {{ item.value.name }} 3 | manage_etc_hosts: true 4 | fqdn: {{ item.value.name }}.{{ domain }} 5 | 6 | users: 7 | - name: "{{ _vm_admin_user }}" 8 | groups: sudo 9 | passwd: "{{ _vm_admin_password_hashed }}" 10 | lock_passwd: false 11 | shell: /bin/bash 12 | #sudo: ALL=(ALL) NOPASSWD:ALL 13 | ssh_authorized_keys: 14 | - "{{ lookup("file", vm_admin_ssh_public_key_file ) }}" 15 | 16 | - name: "{{ vm_ansible_user }}" 17 | gecos: "{{ vm_ansible_user }}" 18 | primary_group: "{{ vm_ansible_user }}" 19 | groups: sudo 20 | shell: /bin/bash 21 | sudo: ALL=(ALL) NOPASSWD:ALL 22 | ssh_authorized_keys: 23 | - "{{ lookup("file", vm_ansible_ssh_public_key_file ) }}" 24 | 25 | package_update: true 26 | package_upgrade: true 27 | 28 | packages: 29 | - qemu-guest-agent 30 | 31 | write_files: 32 | - path: /etc/sysctl.d/99-disable-ipv6.conf 33 | content: | 34 | net.ipv6.conf.all.disable_ipv6 = 1 35 | net.ipv6.conf.default.disable_ipv6 = 1 36 | net.ipv6.conf.lo.disable_ipv6 = 1 37 | runcmd: 38 | - sysctl --system 39 | - systemctl enable qemu-guest-agent 40 | - systemctl start qemu-guest-agent 41 | 42 | - reboot 1 43 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_delete_vm/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: pve_delete_vm 2 | 3 | This Ansible role automates the deletion of virtual machines within a Proxmox VE environment. It stops the specified VMs, then proceeds to delete them, and optionally cleans up associated user-data files. 4 | 5 | ## Purpose 6 | 7 | This role is designed to streamline the process of removing virtual machines from a Proxmox VE cluster. It provides an automated and repeatable way to delete VMs, which is useful in scenarios such as: 8 | 9 | * Cleaning up test or development environments after use. 10 | * Decommissioning VMs as part of infrastructure lifecycle management. 11 | * Automating VM deletion based on specific triggers or schedules. 12 | 13 | By using this role, you can ensure VMs are properly stopped and deleted, freeing up resources and maintaining a clean Proxmox environment. 14 | 15 | ## Tasks Performed 16 | 17 | 1. Stop the specified Proxmox virtual machines gracefully. 18 | 2. Delete the specified Proxmox virtual machines. 19 | 3. Display the results of the VM deletion process for logging and verification. 20 | 4. Ensure the removal of associated user-data snippet files (optional cleanup). 21 | 22 | ## Variables 23 | 24 | * **`pve_ansible_user_api_realm`** (*Required*): Proxmox API user with realm. Defined in your `group_vars`. 25 | * **`pve_ansible_token_id`** (*Required*): API token ID for the Ansible user. Defined in your `group_vars`. 26 | * **`api_token_file_path`** (*Required*): Path to the file containing the API token secret. Defined in `group_vars/all/vault`. 27 | * **`vmids`** (*Required*): A list of VM IDs to be deleted. Defined in the playbook that uses this role (`pve_delete_vm.yaml`). 28 | * **`node`** (*Required*): The Proxmox node where the VMs are located. Defined in the playbook that uses this role (`pve_delete_vm.yaml`). 29 | 30 | ## Important Notes 31 | 32 | * This role interacts with the Proxmox API. Ensure that the API user and token are correctly configured. 33 | * The `vmids` variable is mandatory and should contain a list of existing VM ID's representing the VM to delete. Provide the node name via the variable `node`. 34 | * Exercise caution when using this role as VM deletion is irreversible. 35 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_delete_vm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Sleep 2 | ansible.builtin.wait_for: 3 | timeout: 5 # Wait for 5 seconds (you can adjust this as needed) 4 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_delete_vm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Stop Proxmox Virtual Machine 3 | community.general.proxmox_kvm: 4 | api_user: "{{ pve_ansible_user_api_realm }}" 5 | api_token_id: "{{ pve_ansible_token_id }}" 6 | api_token_secret: "{{ lookup('file', api_token_file_path) }}" 7 | api_host: "{{ inventory_hostname }}" 8 | vmid: "{{ item }}" 9 | node: "{{ node }}" 10 | state: "stopped" # Setting the state to 'absent' will delete the VM 11 | force: true 12 | delegate_to: localhost 13 | with_items: "{{ vmids }}" 14 | register: pve_vm_stop_result 15 | notify: 16 | - Sleep 17 | 18 | - name: Delete Proxmox Virtual Machine 19 | community.general.proxmox_kvm: 20 | api_user: "{{ pve_ansible_user_api_realm }}" 21 | api_token_id: "{{ pve_ansible_token_id }}" 22 | api_token_secret: "{{ lookup('file', api_token_file_path) }}" 23 | api_host: "{{ inventory_hostname }}" 24 | vmid: "{{ item }}" 25 | node: "{{ node }}" 26 | state: "absent" # Setting the state to 'absent' will delete the VM 27 | delegate_to: localhost 28 | with_items: "{{ vmids }}" 29 | register: pve_vm_delete_result 30 | 31 | - name: Display result of VM deletion 32 | ansible.builtin.debug: 33 | var: pve_vm_delete_result 34 | 35 | - name: Ensure the user-data file is removed 36 | become: true 37 | ansible.builtin.file: 38 | path: /var/lib/vz/snippets/docker-host.yml # Specify the correct path to the snippet folder 39 | state: absent 40 | with_items: "{{ vmids }}" 41 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_download_image/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: pve_download_image 2 | 3 | This Ansible role automates the download and preparation of virtual machine images for use in a Proxmox VE environment. It ensures a designated download directory exists, checks for the presence of specified VM images, downloads them if missing, and extracts `.xz` compressed images (for Home Assistant for example). 4 | 5 | ## Purpose 6 | 7 | This role is designed to simplify the process of obtaining and preparing VM images for deployment on Proxmox VE. It addresses the need to pre-download necessary VM images onto a Proxmox host, ensuring they are readily available for creating virtual machines. This is beneficial in scenarios where: 8 | 9 | * You want to automate the image download process as part of your Proxmox infrastructure provisioning. 10 | * You need to ensure specific VM images are present on your Proxmox hosts before creating VMs. 11 | * You want to handle image downloads and decompression in a consistent and repeatable manner. 12 | 13 | By using this role, you can automate the initial step of VM creation, saving time and reducing manual effort in managing VM images. 14 | 15 | Use this role before running the one to create a virtual machine. 16 | For information, the virtual machine files will not be visible from the UI. 17 | 18 | ## Tasks Performed 19 | 20 | 1. Create a designated directory for storing VM images if it does not already exist. 21 | 2. Check if each specified VM image file already exists in the download directory. 22 | 3. Download VM image files from provided URLs if they are not already present. 23 | 4. Extract `.xz` compressed VM image files if they were downloaded and are in `.xz` format. 24 | 5. Verify that the extracted image file exists after decompression (for `.xz` images). 25 | 26 | ## Variables 27 | 28 | * **pve\_image\_path** *(Required)*: 29 | * Description: The absolute path to the directory on the Proxmox host where VM images will be downloaded and stored. This directory will be created if it does not exist. 30 | * Example: `/var/lib/vz/images/0` 31 | * The variable is in this set if playbooks, specified in `group_vars/pve/vars.yml` 32 | * **vm\_config** *(Required)*: 33 | * Description: A dictionary that defines the VM images to be downloaded. Each key in this dictionary can be an arbitrary identifier for the VM image (e.g., "ubuntu", "centos"). The value associated with each key must be a dictionary containing the details for that specific VM image. 34 | * Each VM image definition within `vm_config` requires the following keys: 35 | * **vm\_image\_name** *(Required)*: The desired filename for the VM image (including the `.xz` extension if applicable). This is the name used to store the image in the `pve_image_path` directory. 36 | * **vm\_image\_url** *(Required)*: The URL from which the VM image should be downloaded. 37 | * In the set of playbook, the virtual machines variables are defined in `group_vars/pve/vms/vm_name.yml` 38 | * Example: 39 | ```yaml 40 | vm_config: 41 | ubuntu: 42 | vm_image_name: noble-server-cloudimg-amd64.qcow2 43 | vm_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 44 | 45 | home_assistant: 46 | vm_image_name: haos_ova-14.1.qcow2.xz 47 | vm_image_url: https://github.com/home-assistant/operating-system/releases/download/14.1/haos_ova-14.1.qcow2.xz 48 | ``` 49 | 50 | ## Example Usage 51 | 52 | Here's an example of how to use this role in a playbook alone: 53 | 54 | ```yaml 55 | --- 56 | --- 57 | - name: Download virtual machines names based on values defined in the playbook 58 | hosts: pve 59 | become: true 60 | 61 | roles: 62 | - pve_download_image 63 | vars: 64 | pve_image_path: "/var/lib/vz/images/0" 65 | vm_config: 66 | ubuntu: 67 | vm_image_name: noble-server-cloudimg-amd64.qcow2 68 | vm_image_url: https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 69 | 70 | home_assistant: 71 | vm_image_name: haos_ova-14.1.qcow2.xz 72 | vm_image_url: https://github.com/home-assistant/operating-system/releases/download/14.1/haos_ova-14.1.qcow2.xz -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_download_image/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # --- 2 | # Create a download folder if it doesn't exist 3 | - name: Create download folder 4 | ansible.builtin.file: 5 | path: "{{ pve_image_path }}" 6 | mode: "0755" 7 | state: directory 8 | 9 | # Check if the image file already exists and download if it doesn't 10 | # Check and download images 11 | - name: Check if images exist 12 | ansible.builtin.stat: 13 | path: "{{ pve_image_path }}/{{ item.value.vm_image_name }}" 14 | register: image_status_all 15 | loop: "{{ vm_config | dict2items }}" 16 | loop_control: 17 | label: "{{ item.key }}" 18 | 19 | - name: Set image status facts 20 | ansible.builtin.set_fact: 21 | image_exists: "{{ image_status_all.results[item.key].stat.exists | default(false) }}" 22 | loop: "{{ vm_config | dict2items }}" 23 | loop_control: 24 | label: "{{ item.key }}" 25 | 26 | - name: Download Virtual Machine image 27 | ansible.builtin.get_url: 28 | url: "{{ item.value.vm_image_url }}" 29 | dest: "{{ pve_image_path }}/{{ item.value.vm_image_name }}" 30 | mode: "0644" 31 | when: not image_exists 32 | loop: "{{ vm_config | dict2items }}" 33 | loop_control: 34 | pause: 5 35 | label: "{{ item.key }}" 36 | become: true 37 | 38 | # Extract .xz image if present 39 | - name: Extract .xz image if present 40 | ansible.builtin.command: 41 | cmd: "unxz -f {{ pve_image_path }}/{{ item.value.vm_image_name }}" 42 | chdir: "{{ pve_image_path }}" 43 | when: 44 | - item.value.vm_image_name.endswith('.xz') 45 | - (image_status_all.results | selectattr('item.key', 'equalto', item.key) | first).stat.exists | default(false) 46 | loop: "{{ vm_config | dict2items }}" 47 | loop_control: 48 | label: "{{ item.key }}" 49 | become: true 50 | ignore_errors: false 51 | 52 | # Verify extracted image exists 53 | - name: Verify extracted image exists 54 | ansible.builtin.stat: 55 | path: "{{ pve_image_path }}/{{ item.value.vm_image_name | regex_replace('\\.xz$', '') }}" 56 | register: extracted_image_status 57 | loop: "{{ vm_config | dict2items }}" 58 | loop_control: 59 | label: "{{ item.key }}" 60 | when: item.value.vm_image_name.endswith('.xz') 61 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_post_install/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: pve_post_install 2 | 3 | This Ansible role automates common post-installation tasks for a Proxmox VE server. It configures the system by setting up the no-subscription repository, performing system upgrades, removing the no-subscription warning, installing useful packages, enabling snippets content type, disabling IPv6, configuring DNS, and setting up basic Fail2ban for SSH. 4 | 5 | ## Purpose 6 | 7 | The purpose of this Ansible role is to streamline and automate the initial configuration of a fresh Proxmox VE installation. It addresses the need for a quick and consistent setup by performing essential post-install steps. This role helps users to: 8 | 9 | * Easily switch from the Proxmox Enterprise repository to the No-Subscription repository. 10 | * Ensure the system is up-to-date with the latest packages. 11 | * Remove the nag screen associated with the No-Subscription repository. 12 | * Install commonly used management and utility packages. 13 | * Harden the system by disabling IPv6 and setting up Fail2ban for SSH protection. 14 | * Configure DNS settings for proper name resolution. 15 | 16 | 17 | ## Tasks Performed 18 | 19 | 1. Remove Proxmox Enterprise subscription repositories. 20 | 2. Add the Proxmox PVE No-Subscription repository. 21 | 3. Perform a full system upgrade (distribution upgrade). 22 | 4. Remove the Proxmox No-Subscription subscription warning prompt from the web interface. [ CONSIDER BUYING A PROXMOX LICENCE ] 23 | 5. Install essential packages for Proxmox API access, management, and security (including `python3-proxmoxer`, `sudo`, `fail2ban`, and `python3-hvac`). 24 | 6. Enable the 'snippets' content type for Proxmox local storage to allow uploading snippets through the web interface. 25 | 7. Disable IPv6 system-wide. 26 | 8. Configure DNS servers in `/etc/resolv.conf`. 27 | 9. Configure Fail2ban for SSH with a default jail configuration. 28 | 29 | ## Variables 30 | 31 | * **`ansible_distribution_release`** (*Automatically Detected*): The release of the Debian/Ubuntu distribution used by Proxmox. 32 | * **`dns_servers`** (*Required*): List of DNS servers. Defined in `group_vars/all/vars`. 33 | * **`inventory_hostname`** (*Automatically Detected*): The hostname of the Proxmox node. 34 | * **`domain`** (*Required*): Domain name used in configurations. Defined in `group_vars/all/vars`. 35 | * **`pve_root_user`** (*Required*): Root user for Proxmox. Defined in `group_vars/all/vault`. 36 | * **`pve_root_password`** (*Required*): Password for the Proxmox root user. Defined in `group_vars/all/vault`. 37 | 38 | 39 | ## Important Notes 40 | 41 | * This role requires root privileges on the Proxmox VE nodes. 42 | * Internet connectivity is required for repository updates and package installations. 43 | * This role removes the enterprise repository, switching to the no-subscription repository. If you have a Proxmox subscription, adjust the repository configuration accordingly. 44 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_post_install/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart Fail2ban 3 | ansible.builtin.service: 4 | name: fail2ban 5 | state: restarted 6 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_post_install/tasks/dns.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure resolv.conf for regular DNS 3 | ansible.builtin.lineinfile: 4 | path: /etc/resolv.conf 5 | line: "nameserver {{ item }}" 6 | state: present 7 | loop: "{{ dns_servers }}" 8 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_post_install/tasks/fail2ban.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure fail2ban for SSH 3 | ansible.builtin.copy: 4 | src: templates/jail.local 5 | dest: /etc/fail2ban/jail.local 6 | mode: "0644" 7 | notify: Restart Fail2ban 8 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_post_install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # ansible_proxmox_management/roles/pve_post_install/tasks/main.yml 2 | --- 3 | - name: Post install configuration 4 | ansible.builtin.include_tasks: post_install_configuration.yml 5 | 6 | - name: Disable IPV6 7 | ansible.builtin.include_tasks: network.yml 8 | 9 | - name: Configure DNS 10 | ansible.builtin.include_tasks: dns.yml 11 | 12 | - name: Configure Fail2Ban 13 | ansible.builtin.include_tasks: fail2ban.yml 14 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_post_install/tasks/network.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Disable IPv6 3 | ansible.posix.sysctl: 4 | name: "{{ item }}" 5 | value: "1" 6 | state: "present" 7 | reload: true 8 | with_items: 9 | - net.ipv6.conf.all.disable_ipv6 10 | - net.ipv6.conf.default.disable_ipv6 11 | - net.ipv6.conf.lo.disable_ipv6 12 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_post_install/tasks/post_install_configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove Proxmox Enterprise subscription repository 3 | ansible.builtin.file: 4 | path: "/etc/apt/sources.list.d/{{ item }}" 5 | state: absent 6 | loop: 7 | - pve-enterprise.list.dpkg-dist 8 | - pve-enterprise.list 9 | - ceph.list 10 | 11 | - name: Add Proxmox PVE No-Subscription repository 12 | ansible.builtin.apt_repository: 13 | repo: deb http://download.proxmox.com/debian/pve {{ ansible_distribution_release }} pve-no-subscription 14 | state: present 15 | update_cache: true 16 | filename: pve-no-subscription 17 | 18 | - name: Perform a full system upgrade (dist-upgrade) 19 | ansible.builtin.apt: 20 | update_cache: true 21 | cache_valid_time: 3600 22 | upgrade: dist 23 | 24 | # CONSIDER BUYING A PROXMOX LICENCE # 25 | - name: Remove Proxmox No-Subscription subscription warning prompt - Consider buying a proxmox licence 26 | ansible.builtin.replace: 27 | path: /usr/share/javascript/proxmox-widget-toolkit/proxmoxlib.js 28 | regexp: "data\\.status\\.toLowerCase\\(\\) !== 'active'" 29 | replace: "false" 30 | backup: true 31 | 32 | - name: Install packages for Proxmox API access and management 33 | ansible.builtin.package: 34 | name: 35 | - python3-proxmoxer 36 | - sudo 37 | - fail2ban 38 | - python3-hvac 39 | state: present 40 | 41 | - name: Enable 'snippets' content type on Proxmox local storage 42 | ansible.builtin.command: 43 | cmd: "pvesm set local --content iso,backup,vztmpl,snippets" 44 | register: pvesm_output 45 | changed_when: "'snippets' not in pvesm_output.stdout" 46 | failed_when: "'error' in pvesm_output.stderr" 47 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_post_install/templates/jail.local: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | # Ban hosts for one hour 3 | bantime = 3600 4 | 5 | # Find three failed attempts within ten minutes 6 | findtime = 600 7 | maxretry = 3 8 | 9 | # Action to take: block with iptables 10 | banaction = iptables-multiport 11 | banaction_allports = iptables-allports 12 | 13 | # Email notifications for bans 14 | destemail = root@localhost 15 | sender = {{ inventory_hostname }}@{{ domain }} 16 | mta = sendmail 17 | 18 | # Log level 19 | loglevel = INFO 20 | 21 | [sshd] 22 | enabled = true 23 | port = ssh 24 | logpath = /var/log/auth.log 25 | maxretry = 3 -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_sdn_vlan_setup/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: pve_sdn_vlan_setup 2 | 3 | This Ansible role automates the setup of Software Defined Networking (SDN) VLAN zones and Virtual Networks (VNets) in Proxmox VE. It ensures VLAN zones and VNets are created based on a provided configuration, simplifying VLAN management in your Proxmox cluster. 4 | 5 | ## Purpose 6 | 7 | This role is designed to automate the configuration of VLAN-based SDN within a Proxmox VE cluster. It addresses the need to easily define and deploy VLANs for network segmentation and management within your Proxmox environment. By using this role, you can: 8 | 9 | * Centralize VLAN definitions and apply them consistently across your Proxmox cluster. 10 | * Automate the creation of SDN VLAN zones and VNets, reducing manual configuration and potential errors. 11 | * Simplify network management by defining VLANs as code, making it easier to track and modify your network setup. 12 | * Enable self-service VLAN provisioning for virtual machines within your Proxmox infrastructure. 13 | ter. 14 | 15 | ## Tasks Performed 16 | 17 | 1. Check if SDN VLAN zones already exist for each defined VLAN ID. 18 | 2. Create SDN VLAN zones if they do not exist on the designated primary Proxmox node. 19 | 3. Check if VNets already exist for each defined VLAN ID. 20 | 4. Create VNets if they do not exist on the designated primary Proxmox node. 21 | 5. Reload the Proxmox SDN configuration on the designated primary Proxmox node to apply changes. 22 | 23 | ## Variables 24 | 25 | * **vlan\_definitions** *(Required)*: 26 | * Description: A list of dictionaries defining the VLANs to be configured. Defined in `group_vars/all/vars`. Each dictionary represents a VLAN and must contain the following keys: 27 | * **id** *(Required)*: The VLAN ID (numeric). This ID will be used for both the VLAN tag and to name the SDN zone and VNet (e.g., `vlan` and `vnet`). 28 | * **name** *(Required)*: A descriptive name for the VLAN. This name will be used as the alias for the VNet in Proxmox. 29 | * Example: 30 | ```yaml 31 | vlan_definitions: 32 | - id: 100 33 | name: "Development VLAN" 34 | - id: 200 35 | name: "Production VLAN" 36 | ``` 37 | 38 | * **pve\_primary\_node** *(Required)*: 39 | * Description: The hostname or IP address of the designated primary Proxmox node where SDN configuration changes will be applied. Defined in `group_vars/proxmox/vars.yml` and `group_vars/pve/vars.yml`. This should correspond to a host defined in your Ansible inventory. 40 | * Example: `"proxmox01"` 41 | 42 | * **pve\_compute\_bridge** *(Required)*: 43 | * Description: The name of the Proxmox bridge that will be associated with the SDN VLAN zones. Defined in `group_vars/proxmox/vars.yml` and `group_vars/pve/vars.yml`. This bridge should be configured to handle VLAN-tagged traffic in your Proxmox environment. 44 | * Example: `"vmbr1"` 45 | 46 | ## Important Notes 47 | 48 | * This role requires that the Proxmox cluster is already set up. It uses `pvesh` for interacting with the Proxmox API. Make sure `pvesh` is properly configured. Changes to the SDN configuration are applied cluster-wide, but the reload command only needs to be run on the primary node. -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_sdn_vlan_setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check SDN zone status 3 | ansible.builtin.shell: pvesh get /cluster/sdn/zones/vlan{{ item.id }} 4 | register: zone_check_result 5 | changed_when: false 6 | failed_when: false 7 | loop: "{{ vlan_definitions }}" 8 | loop_control: 9 | label: "Checking SDN zone for VLAN {{ item.id }}" 10 | 11 | - name: Create SDN zone if it doesn't exist 12 | ansible.builtin.shell: | 13 | pvesh create /cluster/sdn/zones \ 14 | --type vlan \ 15 | --zone vlan{{ item.item.id }} \ 16 | --bridge {{ pve_compute_bridge }} 17 | when: item.rc != 0 and inventory_hostname == pve_primary_node 18 | loop: "{{ zone_check_result.results }}" 19 | loop_control: 20 | label: "Creating SDN zone for VLAN {{ item.item.id }}" 21 | register: sdn_zone_result 22 | changed_when: sdn_zone_result.rc == 0 23 | 24 | - name: Check VNet status 25 | ansible.builtin.shell: pvesh get /cluster/sdn/vnets/vnet{{ item.id }} 26 | register: vnet_check_result 27 | changed_when: false 28 | failed_when: false 29 | loop: "{{ vlan_definitions }}" 30 | loop_control: 31 | label: "Checking VNet for VLAN {{ item.id }}" 32 | 33 | - name: Create VNet if it doesn't exist 34 | ansible.builtin.shell: | 35 | pvesh create /cluster/sdn/vnets \ 36 | --vnet vnet{{ item.item.id }} \ 37 | --alias "{{ item.item.name }} VLAN" \ 38 | --zone vlan{{ item.item.id }} \ 39 | --tag {{ item.item.id }} 40 | when: item.rc != 0 and inventory_hostname == pve_primary_node 41 | loop: "{{ vnet_check_result.results }}" 42 | loop_control: 43 | label: "Creating SDN zone for VLAN {{ item.item.id }}" 44 | register: sdn_vnet_result 45 | changed_when: sdn_vnet_result.rc == 0 46 | 47 | - name: Reload SDN configuration 48 | ansible.builtin.command: 49 | cmd: pvesh set /cluster/sdn 50 | changed_when: false 51 | when: inventory_hostname == pve_primary_node 52 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_update_firewall/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Role: proxmox-cluster-firewall 2 | 3 | This Ansible role configures the cluster-wide firewall on Proxmox VE nodes. It ensures the firewall directory and configuration file exist, and deploys firewall rules defined in a template to the Proxmox cluster. 4 | 5 | ## Purpose 6 | 7 | This role simplifies and automates the process of managing the Proxmox cluster-wide firewall. It provides a consistent and repeatable way to define and deploy firewall rules across all nodes in your Proxmox cluster. By using this role, you can centrally manage your firewall policies, ensuring that all nodes adhere to the same security standards and reducing the risk of manual configuration errors. This is particularly useful in larger Proxmox environments where maintaining consistent firewall configurations manually becomes complex and time-consuming. 8 | 9 | ## Tasks Performed 10 | 11 | 1. Ensure the firewall configuration directory `/etc/pve/firewall` exists. 12 | 2. Create the firewall configuration file `/etc/pve/firewall/cluster.fw` if it doesn't exist. 13 | 3. Deploy firewall rules to the `/etc/pve/firewall/cluster.fw` file using a Jinja2 template. 14 | 4. Verify the content of the deployed firewall rules on the target node. 15 | 16 | ## Variables 17 | 18 | * **firewall_rules_template** *(Required)*: 19 | * Description: Path to the Jinja2 template file containing the firewall rules. This template will be rendered and deployed to `/etc/pve/firewall/cluster.fw`. You should define your firewall rules within this template, using variables as needed to customize the configuration. 20 | * Example: `"templates/cluster_fw.j2"` 21 | 22 | * **firewall_rules_content** *(Optional)*: 23 | * Description: This variable allows you to directly define the firewall rules content within your playbook, instead of using a separate template file. If provided, this content will be written to `/etc/pve/firewall/cluster.fw`. If `firewall_rules_template` is also defined, `firewall_rules_template` takes precedence. This is useful for simpler firewall configurations or when you want to dynamically generate rules within your playbook. 24 | 25 | ## Important Notes 26 | 27 | * This role overwrites the existing cluster firewall configuration. Ensure the template file (`cluster_fw.j2`) contains the correct rules for your environment. -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_update_firewall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensuring the firewall directory exists on the nodes 3 | ansible.builtin.file: 4 | path: "/etc/pve/firewall" 5 | mode: "0640" 6 | state: directory 7 | 8 | - name: Ensuring the existence of the firewall configuration file 9 | ansible.builtin.file: 10 | path: "/etc/pve/firewall/cluster.fw" 11 | state: touch 12 | mode: '0640' 13 | modification_time: preserve 14 | access_time: preserve 15 | 16 | - name: Copy the firewall template to the proxmox cluster 17 | ansible.builtin.template: 18 | src: cluster_fw.j2 19 | dest: "/etc/pve/firewall/cluster.fw" 20 | mode: "0640" 21 | force: true 22 | 23 | - name: Verify Proxmox cluster-wide firewall rules 24 | ansible.builtin.command: 25 | cmd: "cat /etc/pve/firewall/cluster.fw" 26 | register: firewall_rules_check 27 | changed_when: false 28 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_update_firewall/templates/cluster_fw.j2: -------------------------------------------------------------------------------- 1 | # Cluster-wide Firewall Rules for Proxmox 2 | [RULES] 3 | IN ACCEPT -p tcp --dport 22 -source {{ management_vlan }} # Allow SSH from Management VLAN 4 | IN ACCEPT -p tcp --dport 8006 -source {{ management_vlan }} # Allow WebUI from Management VLAN 5 | IN ACCEPT -p tcp --dport 22 -source {{ trusted_vlan }} # Allow SSH from Trusted VLAN 6 | IN ACCEPT -p tcp --dport 8006 -source {{ trusted_vlan }} # Allow WebUI from Trusted VLAN 7 | IN ACCEPT -p udp --dport 5405:5412 -source {{ management_vlan }} #allow corosync for clustering 8 | IN DROP # Deny all other incoming traffic -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_update_hosts/README.md: -------------------------------------------------------------------------------- 1 | # pve_update_hosts 2 | 3 | This role updates all packages on the target hosts. 4 | 5 | ## Purpose 6 | 7 | This role ensures that your hosts are up-to-date, enhancing security and providing the latest software features. 8 | 9 | ## Tasks Performed 10 | 11 | 1. Upgrade all installed packages to their latest versions. 12 | 2. Reboot the host(s) if it detects that a reboot is needed by the host. 13 | 14 | ## Variables 15 | This role uses no specific variables. However, it depends on the system's package manager. 16 | 17 | ## Important Notes 18 | 19 | * This role requires root access (`become: true`). 20 | * An active internet connection is required to download package updates. 21 | * This role can take a considerable amount of time, depending on the number of packages that need updating. 22 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_update_hosts/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update apt cache 3 | ansible.builtin.apt: 4 | update_cache: true 5 | cache_valid_time: 3600 6 | 7 | - name: Dist Upgrade packages 8 | ansible.builtin.apt: 9 | upgrade: dist 10 | 11 | - name: Check if reboot is required 12 | ansible.builtin.stat: 13 | path: /var/run/reboot-required 14 | register: reboot_required_file 15 | ignore_errors: true 16 | 17 | - name: Reboot host if required 18 | ansible.builtin.reboot: 19 | msg: "Reboot initiated after package updates" 20 | connect_timeout: 5 21 | reboot_timeout: 300 22 | pre_reboot_delay: 10 23 | post_reboot_delay: 30 24 | when: reboot_required_file.stat.exists -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_vm_post_install_cleanup/README.md: -------------------------------------------------------------------------------- 1 | # Proxmox VM Cloud-Init Cleanup and Startup Role 2 | 3 | This Ansible role is designed to automate the cleanup of cloud-init configurations and the startup of newly provisioned Virtual Machines (VMs) within a Proxmox Virtual Environment (PVE). 4 | 5 | **Purpose:** 6 | 7 | When provisioning VMs using cloud-init templates in Proxmox, the cloud-init configuration often becomes embedded within the VM's configuration and associated files (like snippet templates and cloud-init drives). This role addresses a scenario where you might want to remove these cloud-init artifacts after the initial VM provisioning. This could be for various reasons, such as: 8 | 9 | * **Re-provisioning or Template Updates:** Preparing VMs for re-provisioning with potentially different cloud-init settings. 10 | * **Cleanup of Temporary Configurations:** Removing temporary cloud-init configurations that are no longer needed after the VM's initial setup. 11 | * **Standardization:** Ensuring a consistent state for VMs after their initial deployment, without lingering cloud-init configurations. 12 | * 13 | 14 | **Tasks Performed:** 15 | 16 | This role performs the following high-level actions: 17 | 18 | 1. **Stops Target VMs:** Ensures the VMs targeted for cloud-init cleanup are in a stopped state before proceeding with modifications. 19 | 2. **Removes Cloud-Init Snippet Template Files:** Deletes the cloud-init snippet template files associated with the VMs. 20 | 3. **Removes Cloud-Init IDE Drives:** Detaches and removes the cloud-init IDE drives from the VM configurations. 21 | 4. **Waits for Cleanup:** Introduces a pause to allow for the cleanup operations to complete within Proxmox. 22 | 5. **Starts VMs:** Initiates the startup of the cleaned VMs, allowing them to boot without the previously configured cloud-init settings. 23 | 24 | **Variables:** 25 | 26 | The role relies on the following variables, which **must be defined** in your inventory, group\_vars, or host\_vars files: 27 | 28 | * **`pve_ansible_user_api_realm`**: *(Required)* The Proxmox API username, including the realm (e.g., `user@pve`). This user needs to have sufficient permissions to manage VMs in Proxmox. Defined in your `group_vars`. 29 | * **`pve_ansible_token_id`**: *(Required)* The Proxmox API token ID. This is part of the API token authentication mechanism. Defined in your `group_vars`. 30 | * **`api_token_file_path`**: *(Required)* The path to a file on the Ansible control node that contains the Proxmox API token secret. **Important:** Ensure this file is securely stored and has restricted permissions. Defined in `group_vars/all/vault`. 31 | * **`pve_primary_node`**: *(Required)* The hostname or IP address of your primary Proxmox node. This is used for API communication and delegation. Defined in `group_vars/proxmox/vars.yml` and `group_vars/pve/vars.yml`. 32 | * **`vm_config`**: *(Required)* A dictionary variable defining the VMs to be processed. This is loaded from YAML files in `group_vars/pve/vms/` and/or `group_vars/proxmox/vms/`. 33 | * **`snippets_path`**: *(Required)* The path on the Proxmox host where snippet files are stored. This is typically `/var/lib/vz/snippets/`. Defined in `group_vars/proxmox/vars.yml` and `group_vars/pve/vars.yml`. 34 | 35 | 36 | ## Important Notes 37 | 38 | * This role should be run after the `pve_create_vm` role. Ensure the Proxmox API user and token are configured correctly. The role requires the `community.general` Ansible collection. 39 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_vm_post_install_cleanup/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Sleep 2 | ansible.builtin.wait_for: 3 | timeout: 5 # Wait for 5 seconds (you can adjust this as needed) 4 | -------------------------------------------------------------------------------- /ansible_proxmox_management/roles/pve_vm_post_install_cleanup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Remove cloud-init file 3 | - name: Stop Proxmox Virtual Machine 4 | community.general.proxmox_kvm: 5 | api_user: "{{ pve_ansible_user_api_realm }}" 6 | api_token_id: "{{ pve_ansible_token_id }}" 7 | api_token_secret: "{{ lookup('file', api_token_file_path) }}" 8 | api_host: "{{ inventory_hostname }}" 9 | 10 | vmid: "{{ item.value.vmid }}" 11 | state: "stopped" 12 | force: true 13 | loop: "{{ vm_config | dict2items }}" 14 | loop_control: 15 | label: "{{ item.key }}" 16 | delegate_to: "{{ ansible_play_batch[0] }}" 17 | when: inventory_hostname == ansible_play_batch[0] and item.value.state == "new" 18 | notify: 19 | - Sleep 20 | 21 | - name: Remove cloud-init file 22 | ansible.builtin.file: 23 | path: "{{ snippets_path }}{{ item.value.cloud_init_template_name }}" 24 | state: absent 25 | loop: "{{ vm_config | dict2items }}" 26 | loop_control: 27 | pause: 5 28 | label: "{{ item.key }}" 29 | 30 | # Remove cloud-init drive 31 | - name: Remove cloud-init drive 32 | community.general.proxmox_disk: 33 | api_user: "{{ pve_ansible_user_api_realm }}" 34 | api_token_id: "{{ pve_ansible_token_id }}" 35 | api_token_secret: "{{ lookup('file', api_token_file_path) }}" 36 | api_host: "{{ inventory_hostname }}" 37 | 38 | vmid: "{{ item.value.vmid }}" 39 | disk: "ide2" 40 | state: absent 41 | loop: "{{ vm_config | dict2items }}" 42 | loop_control: 43 | label: "{{ item.key }}" 44 | delegate_to: "{{ ansible_play_batch[0] }}" 45 | when: inventory_hostname == ansible_play_batch[0] and item.value.state == "new" 46 | 47 | - name: Pause 48 | ansible.builtin.pause: 49 | seconds: 30 50 | 51 | - name: Start VMs 52 | community.general.proxmox_kvm: 53 | api_user: "{{ pve_ansible_user_api_realm }}" 54 | api_token_id: "{{ pve_ansible_token_id }}" 55 | api_token_secret: "{{ lookup('file', api_token_file_path) }}" 56 | api_host: "{{ inventory_hostname }}" 57 | 58 | vmid: "{{ item.value.vmid }}" 59 | name: "{{ item.key }}" 60 | state: "started" 61 | loop: "{{ vm_config | dict2items }}" 62 | loop_control: 63 | pause: 5 64 | label: "{{ item.key }}" 65 | delegate_to: "{{ ansible_play_batch[0] }}" 66 | when: inventory_hostname == ansible_play_batch[0] and item.value.state == "new" 67 | --------------------------------------------------------------------------------