├── collection ├── roles │ ├── resources_clean │ │ ├── README.md │ │ └── tasks │ │ │ └── main.yml │ ├── resources_prepare │ │ ├── README.md │ │ └── tasks │ │ │ └── main.yml │ ├── cluster_node │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ └── swapoff.service │ │ ├── tasks │ │ │ ├── hold-packages.yml │ │ │ ├── configure-hostname.yml │ │ │ ├── install-dependencies.yml │ │ │ ├── install-swapoff.yml │ │ │ ├── main.yml │ │ │ └── activate-iptables-legacy.yml │ │ └── meta │ │ │ └── main.yml │ ├── k3s_traefik │ │ ├── README.md │ │ ├── meta │ │ │ └── main.yml │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── traefik.yaml │ │ │ └── traefik-dashboard-ingress.yml │ ├── k3s_dnas │ │ ├── README.md │ │ ├── templates │ │ │ ├── dnas-share.kustomize.secret.env │ │ │ ├── mnt-dnas.mount │ │ │ ├── dnas-share.kustomization.yaml │ │ │ └── dnas-share.kustomize.yml │ │ ├── vars │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── host_install-mount.yml │ │ │ ├── host_install-syncthing.yml │ │ │ ├── host_add-user.yml │ │ │ ├── main.yml │ │ │ ├── k3s_deploy.yml │ │ │ └── host_configure-syncthing.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── k3s_longhorn │ │ ├── README.md │ │ ├── vars │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── create_longhorn_loop.sh.jinja2 │ │ │ ├── longhorn_loop.mount │ │ │ └── longhorn_loop.service │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── configure-block.yml │ │ │ ├── main.yml │ │ │ ├── configure-node.yml │ │ │ ├── deploy.yml │ │ │ └── configure-image.yml │ │ └── defaults │ │ │ └── main.yml │ ├── service_keepalived │ │ ├── templates │ │ │ ├── ip_vs.conf │ │ │ └── keepalived.conf │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── vars │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── k3s_csi_driver_smb │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ └── csi-driver-smb.yaml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── k3s_dashboard │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── templates │ │ │ └── customization.yml │ │ └── tasks │ │ │ └── main.yml │ ├── service_k3s │ │ ├── README.md │ │ ├── tasks │ │ │ ├── get-server-token.yml │ │ │ ├── set-node-labels.yml │ │ │ ├── k3s-server-primary.yml │ │ │ ├── wait-for-nodes.yml │ │ │ ├── set-node-taints.yml │ │ │ ├── k3s-agent.yml │ │ │ ├── k3s-server-secondary.yml │ │ │ ├── configure_nodes.yml │ │ │ ├── configure-local-kubectl.yml │ │ │ ├── k3s-server.yml │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ ├── image_aosc │ │ ├── README.md │ │ ├── vars │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── main.yml │ │ │ ├── release-host-image.yml │ │ │ ├── prepare-workdir.yml │ │ │ ├── get-aosc-image.yml │ │ │ └── patch-host-image.yml │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ └── armbian_first_run.txt │ │ └── files │ │ │ ├── customizer.sh │ │ │ └── mounter.sh │ ├── image_armbian │ │ ├── README.md │ │ ├── vars │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── main.yml │ │ │ ├── release-host-image.yml │ │ │ ├── prepare-workdir.yml │ │ │ ├── get-armbian-image.yml │ │ │ └── patch-host-image.yml │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ ├── customizer.sh │ │ │ └── mounter.sh │ │ └── templates │ │ │ └── armbian_first_run.txt │ └── image_ubuntu_raspi │ │ ├── README.md │ │ ├── vars │ │ └── main.yml │ │ ├── meta │ │ └── main.yml │ │ ├── tasks │ │ ├── main.yml │ │ ├── release-host-image.yml │ │ ├── prepare-workdir.yml │ │ ├── fetch-image.yml │ │ └── patch-host-image.yml │ │ ├── templates │ │ └── network-config │ │ ├── defaults │ │ └── main.yml │ │ └── files │ │ └── mounter.sh ├── meta │ └── runtime.yml ├── galaxy.yml └── README.md ├── .editorconfig ├── molecule ├── ubuntu_raspi │ ├── inventory │ │ ├── hosts.yml │ │ ├── host_vars │ │ │ └── ubuntu-n1.yml │ │ └── group_vars │ │ │ └── all.yml │ ├── converge.yml │ ├── verify.yml │ └── molecule.yml ├── aosc │ ├── inventory │ │ ├── hosts.yml │ │ ├── host_vars │ │ │ ├── aosc-n1.yml │ │ │ └── aosc-n2.yml │ │ └── group_vars │ │ │ └── all.yml │ ├── converge.yml │ ├── verify.yml │ └── molecule.yml ├── k2ha │ ├── inventory │ │ ├── host_vars │ │ │ ├── k2ha-n1.yml │ │ │ └── k2ha-n2.yml │ │ ├── hosts.yml │ │ └── group_vars │ │ │ ├── k3s.yml │ │ │ └── all.yml │ ├── prepare.yml │ ├── converge.yml │ ├── side_effect.yml │ ├── verify.yml │ └── molecule.yml ├── k1 │ ├── prepare.yml │ ├── side_effect.yml │ ├── converge.yml │ ├── inventory │ │ ├── hosts.yml │ │ ├── host_vars │ │ │ └── k1-n1.yml │ │ └── group_vars │ │ │ ├── k3s.yml │ │ │ └── all.yml │ ├── verify.yml │ └── molecule.yml ├── k1ha │ ├── prepare.yml │ ├── converge.yml │ ├── side_effect.yml │ ├── inventory │ │ ├── host_vars │ │ │ └── k1ha-n1.yml │ │ ├── hosts.yml │ │ └── group_vars │ │ │ ├── k3s.yml │ │ │ └── all.yml │ ├── verify.yml │ └── molecule.yml ├── k1lo │ ├── prepare.yml │ ├── converge.yml │ ├── side_effect.yml │ ├── inventory │ │ ├── host_vars │ │ │ └── k1lo-n1.yml │ │ ├── hosts.yml │ │ └── group_vars │ │ │ ├── k3s.yml │ │ │ └── all.yml │ ├── verify.yml │ └── molecule.yml ├── k2 │ ├── prepare.yml │ ├── side_effect.yml │ ├── converge.yml │ ├── inventory │ │ ├── host_vars │ │ │ ├── k2-n1.yml │ │ │ └── k2-n2.yml │ │ ├── hosts.yml │ │ └── group_vars │ │ │ ├── k3s.yml │ │ │ └── all.yml │ ├── verify.yml │ └── molecule.yml ├── armbian │ ├── converge.yml │ ├── inventory │ │ ├── hosts.yml │ │ ├── host_vars │ │ │ ├── armbian-n2.yml │ │ │ ├── armbian-n1.yml │ │ │ ├── armbian-n4.yml │ │ │ └── armbian-n3.yml │ │ └── group_vars │ │ │ └── all.yml │ ├── molecule.yml │ └── verify.yml └── resources │ ├── collections.yml │ ├── playbooks │ ├── verify-service-keepalived.yml │ ├── verify-k3s-deployments.yml │ ├── verify-service-k3s.yml │ ├── k3s-deploy.yml │ ├── prepare.yml │ ├── images-build.yml │ └── cluster-bootstrap.yml │ ├── tasks │ ├── verify-k3s_dashboard.yml │ ├── verify-k8s-http-endpoint.yml │ ├── verify-k3s_dnas.yml │ ├── verify-k3s_traefik.yml │ ├── verify-k3s_csi_driver_smb.yml │ └── verify-k3s_longhorn.yml │ └── deployments │ ├── test-csi-smb-dnas-step2.yml │ ├── test-longhorn-rwo-step2.yml │ ├── test-longhorn-rwx-step3.yml │ ├── test-longhorn-rwx-step2.yml │ ├── test-longhorn-rwo-step1.yml │ ├── test-longhorn-rwx-step1.yml │ ├── test-traefik.yml │ └── test-csi-smb-dnas-step1.yml ├── paper ├── dnas_context.png ├── dnas_software.png ├── vision_context.png ├── services_context.png ├── dnas_infrastructure.png ├── services_software.png ├── vison_work_packages.png ├── illustrations_stacks.odg ├── services_infrastructure.png ├── bs-files_sharing-usecases.png ├── illustrations_dnas_example.odg ├── bs-files_sharing-value_stream.png ├── services_deployment_nextcloud.png ├── bs-files_synchronization-usecases.png ├── bs-photos_synchronization-usecases.png ├── bs-files_synchronization-value_stream.png ├── bs-photos_synchronization-value_stream.png ├── bs-contacts_calendars_management-usecases.png ├── bs-contacts_calendars_management.puml ├── bs-files_synchronization.puml ├── bs-photos_synchronization.puml ├── bs-files_sharing.puml ├── nord.puml └── vision.puml ├── requirements.txt ├── .gitignore ├── lint ├── scripts ├── publish-alpha.sh ├── vm-prepare.sh ├── vm-test-scenario.sh ├── postbump.js ├── ci-install.sh ├── customize-armbian.sh └── customize-aosc.sh ├── .github ├── FUNDING.yml └── workflows │ ├── ci-test.yml │ └── ci-build.yml ├── .yamllint ├── package.json ├── LICENSE ├── .travis.yml └── Vagrantfile /collection/roles/resources_clean/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /collection/roles/resources_prepare/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /collection/roles/cluster_node/README.md: -------------------------------------------------------------------------------- 1 | # Cluster: Node -------------------------------------------------------------------------------- /collection/roles/k3s_traefik/README.md: -------------------------------------------------------------------------------- 1 | # k3s: Traefik 2 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/README.md: -------------------------------------------------------------------------------- 1 | # k3s: Decentralized NAS -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/README.md: -------------------------------------------------------------------------------- 1 | # k3s: Longhorn 2 | -------------------------------------------------------------------------------- /collection/roles/service_keepalived/templates/ip_vs.conf: -------------------------------------------------------------------------------- 1 | ip_vs -------------------------------------------------------------------------------- /collection/roles/service_keepalived/README.md: -------------------------------------------------------------------------------- 1 | # Service: Keepalived -------------------------------------------------------------------------------- /collection/roles/k3s_csi_driver_smb/README.md: -------------------------------------------------------------------------------- 1 | # k3s: CSI driver SMB 2 | -------------------------------------------------------------------------------- /collection/roles/k3s_dashboard/README.md: -------------------------------------------------------------------------------- 1 | # k3s: Kubernetes Dashboard 2 | -------------------------------------------------------------------------------- /collection/meta/runtime.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | requires_ansible: ">=2.10,<3.0" 4 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_size = 2 5 | indent_style = space 6 | -------------------------------------------------------------------------------- /collection/roles/k3s_dashboard/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_dashboard_release: latest 4 | -------------------------------------------------------------------------------- /molecule/ubuntu_raspi/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | all: 4 | hosts: 5 | ubuntu-n1: 6 | -------------------------------------------------------------------------------- /collection/roles/k3s_csi_driver_smb/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_csi_driver_smb_release: "" 4 | -------------------------------------------------------------------------------- /molecule/aosc/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | all: 4 | hosts: 5 | aosc-n1: 6 | # aosc-n2: 7 | -------------------------------------------------------------------------------- /molecule/k2ha/inventory/host_vars/k2ha-n1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_node_ip: "{{ ansible_host }}" 4 | -------------------------------------------------------------------------------- /paper/dnas_context.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/dnas_context.png -------------------------------------------------------------------------------- /paper/dnas_software.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/dnas_software.png -------------------------------------------------------------------------------- /paper/vision_context.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/vision_context.png -------------------------------------------------------------------------------- /molecule/k1/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/prepare.yml 4 | -------------------------------------------------------------------------------- /molecule/k1ha/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/prepare.yml 4 | -------------------------------------------------------------------------------- /molecule/k1lo/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/prepare.yml 4 | -------------------------------------------------------------------------------- /molecule/k2/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/prepare.yml 4 | -------------------------------------------------------------------------------- /molecule/k2ha/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/prepare.yml 4 | -------------------------------------------------------------------------------- /paper/services_context.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/services_context.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible~=4.7 2 | ansible-lint[yamllint]~=5.0 3 | molecule~=3.5 4 | molecule-vagrant!=0.6.3 5 | -------------------------------------------------------------------------------- /molecule/aosc/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/images-build.yml 4 | -------------------------------------------------------------------------------- /molecule/k1/side_effect.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/k3s-deploy.yml 4 | -------------------------------------------------------------------------------- /molecule/k2/side_effect.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/k3s-deploy.yml 4 | -------------------------------------------------------------------------------- /paper/dnas_infrastructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/dnas_infrastructure.png -------------------------------------------------------------------------------- /paper/services_software.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/services_software.png -------------------------------------------------------------------------------- /paper/vison_work_packages.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/vison_work_packages.png -------------------------------------------------------------------------------- /collection/roles/service_k3s/README.md: -------------------------------------------------------------------------------- 1 | # Service: K3S 2 | 3 | > Bootstrap a Kubernetes server based on https://k3s.io 4 | -------------------------------------------------------------------------------- /molecule/armbian/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/images-build.yml 4 | -------------------------------------------------------------------------------- /molecule/k1/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/cluster-bootstrap.yml 4 | -------------------------------------------------------------------------------- /molecule/k1ha/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/cluster-bootstrap.yml 4 | -------------------------------------------------------------------------------- /molecule/k1ha/side_effect.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/k3s-deploy.yml 4 | -------------------------------------------------------------------------------- /molecule/k1lo/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/cluster-bootstrap.yml 4 | -------------------------------------------------------------------------------- /molecule/k1lo/side_effect.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/k3s-deploy.yml 4 | -------------------------------------------------------------------------------- /molecule/k2/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/cluster-bootstrap.yml 4 | -------------------------------------------------------------------------------- /molecule/k2ha/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/cluster-bootstrap.yml 4 | -------------------------------------------------------------------------------- /molecule/k2ha/side_effect.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/k3s-deploy.yml 4 | -------------------------------------------------------------------------------- /paper/illustrations_stacks.odg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/illustrations_stacks.odg -------------------------------------------------------------------------------- /molecule/resources/collections.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | collections: 4 | - name: ansible.posix 5 | - name: community.general 6 | -------------------------------------------------------------------------------- /molecule/ubuntu_raspi/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/images-build.yml 4 | -------------------------------------------------------------------------------- /paper/services_infrastructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/services_infrastructure.png -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/templates/dnas-share.kustomize.secret.env: -------------------------------------------------------------------------------- 1 | username={{ dnas_username }} 2 | password={{ dnas_password }} 3 | -------------------------------------------------------------------------------- /paper/bs-files_sharing-usecases.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/bs-files_sharing-usecases.png -------------------------------------------------------------------------------- /paper/illustrations_dnas_example.odg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/illustrations_dnas_example.odg -------------------------------------------------------------------------------- /collection/roles/cluster_node/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | cluster_node_hold_packages: [ ] 4 | 5 | cluster_node_skip_reboot: false 6 | -------------------------------------------------------------------------------- /paper/bs-files_sharing-value_stream.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/bs-files_sharing-value_stream.png -------------------------------------------------------------------------------- /paper/services_deployment_nextcloud.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/services_deployment_nextcloud.png -------------------------------------------------------------------------------- /paper/bs-files_synchronization-usecases.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/bs-files_synchronization-usecases.png -------------------------------------------------------------------------------- /paper/bs-photos_synchronization-usecases.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/bs-photos_synchronization-usecases.png -------------------------------------------------------------------------------- /molecule/armbian/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | all: 4 | hosts: 5 | armbian-n1: 6 | armbian-n2: 7 | armbian-n3: 8 | armbian-n4: 9 | -------------------------------------------------------------------------------- /molecule/k1lo/inventory/host_vars/k1lo-n1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_node_ip: "{{ ansible_host }}" 4 | 5 | k3s_longhorn_image_device: "/dev/loop100" 6 | -------------------------------------------------------------------------------- /molecule/k2ha/inventory/host_vars/k2ha-n2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_node_ip: "{{ ansible_host }}" 4 | 5 | k3s_longhorn_block_device: "/dev/vdb" 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .gdiag 2 | .idea/ 3 | .tmp/ 4 | .vagrant/ 5 | *.log 6 | *.swp 7 | *.tar.gz 8 | dist/ 9 | node_modules/ 10 | venv/ 11 | venv-vagrant/ 12 | -------------------------------------------------------------------------------- /paper/bs-files_synchronization-value_stream.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/bs-files_synchronization-value_stream.png -------------------------------------------------------------------------------- /paper/bs-photos_synchronization-value_stream.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/bs-photos_synchronization-value_stream.png -------------------------------------------------------------------------------- /paper/bs-contacts_calendars_management-usecases.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tmorin/homecloud-ansible/HEAD/paper/bs-contacts_calendars_management-usecases.png -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_longhorn_disk_name: "{{ k3s_longhorn_disk_path | regex_replace('^/', '', 1) | replace('/', '-') }}.mount" 4 | -------------------------------------------------------------------------------- /molecule/k1ha/inventory/host_vars/k1ha-n1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_node_ip: "{{ ansible_host }}" 4 | 5 | k3s_longhorn_block_device: "/dev/vdb" 6 | 7 | k3s_dnas_mount_what: "/dev/vdc" 8 | -------------------------------------------------------------------------------- /collection/roles/service_keepalived/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_virtual_ip: '' 4 | homecloud_node_ip: '' 5 | homecloud_node_interface: '' 6 | 7 | service_keepalived_router_id: 100 8 | -------------------------------------------------------------------------------- /collection/roles/service_keepalived/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | service_keepalived_peers: "{{ groups.k3s | map('extract', hostvars, 'homecloud_node_ip') | reject('search', homecloud_node_ip) }}" 4 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/README.md: -------------------------------------------------------------------------------- 1 | # Image: aosc 2 | 3 | ## Dependencies 4 | 5 | ### ubuntu (focal) 6 | 7 | ```shell script 8 | sudo apt install p7zip-full jq xz-utils qemu-user-static 9 | ``` 10 | -------------------------------------------------------------------------------- /molecule/k1lo/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-service-k3s.yml 4 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-k3s-deployments.yml 5 | -------------------------------------------------------------------------------- /collection/roles/cluster_node/templates/swapoff.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | 3 | [Service] 4 | Type=oneshot 5 | RemainAfterExit=yes 6 | ExecStart=/sbin/swapoff -a 7 | 8 | [Install] 9 | WantedBy=default.target 10 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/README.md: -------------------------------------------------------------------------------- 1 | # Image: Armbian 2 | 3 | ## Dependencies 4 | 5 | ### ubuntu (focal) 6 | 7 | ```shell script 8 | sudo apt install p7zip-full jq xz-utils qemu-user-static 9 | ``` 10 | -------------------------------------------------------------------------------- /molecule/k2/inventory/host_vars/k2-n1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_node_ip: "{{ ansible_host }}" 4 | 5 | service_k3s_node_labels: 6 | key0: value0 7 | key1: value1 8 | 9 | #service_k3s_node_taints: { } 10 | -------------------------------------------------------------------------------- /molecule/k2/inventory/host_vars/k2-n2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_node_ip: "{{ ansible_host }}" 4 | 5 | service_k3s_node_labels: 6 | key1: value1 7 | key2: value2 8 | 9 | #service_k3s_node_taints: { } 10 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/README.md: -------------------------------------------------------------------------------- 1 | # Image: Ubuntu Raspberry Pi 2 | 3 | ## Dependencies 4 | 5 | ### ubuntu (focal) 6 | 7 | ```shell script 8 | sudo apt install p7zip-full jq xz-utils qemu-user-static 9 | ``` 10 | -------------------------------------------------------------------------------- /collection/roles/resources_prepare/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: 'create the resources directory' 4 | ansible.builtin.file: 5 | path: /tmp/resources 6 | state: directory 7 | mode: 0755 8 | changed_when: false 9 | -------------------------------------------------------------------------------- /collection/roles/cluster_node/tasks/hold-packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Hold packages 4 | become: true 5 | ansible.builtin.command: "apt-mark hold {{ item }}" 6 | loop: "{{ cluster_node_hold_packages | default([]) }}" 7 | changed_when: false 8 | -------------------------------------------------------------------------------- /lint: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | export ANSIBLE_COLLECTIONS_PATH=~/.ansible/collections:/usr/share/ansible/collections:/etc/ansible/collections 3 | export ANSIBLE_ROLES_PATH=~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:collection/roles 4 | ansible-lint 5 | -------------------------------------------------------------------------------- /molecule/k1/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | all: 4 | children: 5 | # K3S 6 | k3s_srv: 7 | hosts: 8 | k1-n1: 9 | k3s_agt: 10 | hosts: { } 11 | k3s: 12 | children: 13 | k3s_srv: { } 14 | k3s_agt: { } 15 | -------------------------------------------------------------------------------- /molecule/k2/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | all: 4 | children: 5 | k3s_srv: 6 | hosts: 7 | k2-n1: 8 | k3s_agt: 9 | hosts: 10 | k2-n2: 11 | k3s: 12 | children: 13 | k3s_srv: { } 14 | k3s_agt: { } 15 | -------------------------------------------------------------------------------- /molecule/resources/playbooks/verify-service-keepalived.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | tasks: 5 | - name: Check the virtual IP is available 6 | ansible.builtin.command: ping -c 2 -i 1 -W 2 {{ homecloud_virtual_ip }} 7 | changed_when: false 8 | -------------------------------------------------------------------------------- /molecule/k1ha/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | all: 4 | children: 5 | # K3S 6 | k3s_srv: 7 | hosts: 8 | k1ha-n1: 9 | k3s_agt: 10 | hosts: { } 11 | k3s: 12 | children: 13 | k3s_srv: { } 14 | k3s_agt: { } 15 | -------------------------------------------------------------------------------- /molecule/k1lo/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | all: 4 | children: 5 | # K3S 6 | k3s_srv: 7 | hosts: 8 | k1lo-n1: 9 | k3s_agt: 10 | hosts: { } 11 | k3s: 12 | children: 13 | k3s_srv: { } 14 | k3s_agt: { } 15 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_dnas_password_sha512: "{{ k3s_dnas_password | password_hash('sha512', 'dnas') }}" 4 | 5 | k3s_dnas_nodes: "{{ (groups.k3s | default([])) | map('extract', hostvars) | list | selectattr('k3s_dnas_mount_what', 'defined') }}" 6 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/get-server-token.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Fetch server token 4 | become: true 5 | ansible.builtin.fetch: 6 | src: /var/lib/rancher/k3s/server/node-token 7 | dest: "{{ homecloud_vault_path }}/k3s-sever-token.txt" 8 | flat: true 9 | -------------------------------------------------------------------------------- /molecule/k2ha/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | all: 4 | children: 5 | k3s_srv: 6 | hosts: 7 | k2ha-n1: 8 | k2ha-n2: 9 | k3s_agt: 10 | hosts: { } 11 | k3s: 12 | children: 13 | k3s_srv: { } 14 | k3s_agt: { } 15 | -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/templates/create_longhorn_loop.sh.jinja2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | looDetail=$(losetup | grep "{{ k3s_longhorn_image_device }}") 4 | if [ -z "$looDetail" ]; then 5 | losetup "{{ k3s_longhorn_image_device }}" "{{ k3s_longhorn_image_file }}" 6 | fi 7 | -------------------------------------------------------------------------------- /molecule/k1ha/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-service-k3s.yml 4 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-service-keepalived.yml 5 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-k3s-deployments.yml 6 | -------------------------------------------------------------------------------- /molecule/k2ha/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-service-k3s.yml 4 | 5 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-service-keepalived.yml 6 | 7 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-k3s-deployments.yml 8 | -------------------------------------------------------------------------------- /molecule/k1/inventory/host_vars/k1-n1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_node_ip: "{{ ansible_host }}" 4 | 5 | service_k3s_node_labels: 6 | key0: value0 7 | key1: value1 8 | 9 | service_k3s_node_taints: 10 | keyA: valueA:NoSchedule- 11 | keyB: valueB:NoSchedule- 12 | 13 | k3s_dnas_mount_what: "/dev/vdb" 14 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # the path of the aosc archive 4 | image_aosc_base_archive_path: "{{ image_aosc_working_directory }}/{{ image_aosc_archive_name }}" 5 | 6 | # the path of the aosc image 7 | image_aosc_base_image_path: "{{ image_aosc_working_directory }}/{{ image_aosc_image_name }}" 8 | -------------------------------------------------------------------------------- /molecule/k1lo/inventory/group_vars/k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_services: 4 | - service_k3s 5 | 6 | k3s_deployments: 7 | - k3s_longhorn 8 | 9 | service_keepalived_router_id: 113 10 | 11 | k3s_longhorn_settings: 12 | default-replica-count: 1 13 | 14 | service_k3s_local_kubectl_config_file: "~/.kube/k1lo" 15 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # the path of the armbian archive 4 | image_armbian_base_archive_path: "{{ image_armbian_working_directory }}/{{ image_armbian_archive_name }}" 5 | 6 | # the path of the armbian image 7 | image_armbian_base_image_path: "{{ image_armbian_working_directory }}/{{ image_armbian_image_name }}" 8 | -------------------------------------------------------------------------------- /scripts/publish-alpha.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -xeo pipefail 4 | 5 | npm run release:alpha 6 | 7 | rm -f collection/tmorin-homecloud-*.tar.gz 8 | 9 | cd collection \ 10 | && ansible-galaxy collection build --force \ 11 | && ansible-galaxy collection publish --token "${ANSIBLE_GALAXY_API_KEY}" tmorin-homecloud-*.tar.gz 12 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # the path of the ubuntu archive 4 | image_ubuntu_raspi_base_archive_path: "{{ image_ubuntu_raspi_working_directory }}/{{ image_ubuntu_raspi_archive_name }}" 5 | 6 | # the path of the ubuntu image 7 | image_ubuntu_raspi_base_image_path: "{{ image_ubuntu_raspi_working_directory }}/{{ image_ubuntu_raspi_image_name }}" 8 | -------------------------------------------------------------------------------- /molecule/ubuntu_raspi/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | tasks: 5 | - name: Stat ubuntu-n1 6 | ansible.builtin.stat: 7 | path: "{{ image_ubuntu_raspi_release_directory }}/ubuntu-n1.img" 8 | register: stat_ubuntu_n1_img 9 | - name: Check ubuntu-n1 10 | ansible.builtin.assert: 11 | that: stat_ubuntu_n1_img is success 12 | -------------------------------------------------------------------------------- /scripts/vm-prepare.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -xeo pipefail 4 | 5 | sudo apt-get install -y vagrant virtualbox virtualbox-ext-pack 6 | 7 | vagrant plugin install vagrant-vbguest 8 | 9 | vagrant up || true 10 | 11 | vagrant ssh -- " 12 | set -ex 13 | cd /vagrant 14 | virtualenv venv-vagrant 15 | source venv-vagrant/bin/activate 16 | pip install -r requirements.txt 17 | " 18 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: create a dedicated .img for each inventory's host 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - focal 11 | - hirsute 12 | 13 | galaxy_tags: 14 | - ubuntu 15 | 16 | dependencies: [ ] 17 | -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/templates/longhorn_loop.mount: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Mount the Longhorm disk 3 | After=longhorn_loop.service 4 | Before=k3s.service 5 | 6 | [Mount] 7 | What={{ k3s_longhorn_image_device }} 8 | Where={{ k3s_longhorn_disk_path }} 9 | Type={{ k3s_longhorn_disk_type }} 10 | Options={{ k3s_longhorn_disk_options }} 11 | 12 | [Install] 13 | WantedBy=local-fs.target 14 | -------------------------------------------------------------------------------- /collection/roles/k3s_csi_driver_smb/templates/csi-driver-smb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.cattle.io/v1 3 | kind: HelmChart 4 | metadata: 5 | name: csi-driver-smb 6 | namespace: kube-system 7 | spec: 8 | repo: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/master/charts 9 | chart: csi-driver-smb 10 | version: {{ k3s_csi_driver_smb_release | default("latest") }} 11 | --- 12 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/templates/mnt-dnas.mount: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Mount the decentralized NAS drive 3 | Before=k3s.service syncthing@{{ k3s_dnas_username }}.service smbd.service 4 | 5 | [Mount] 6 | What={{ k3s_dnas_mount_what }} 7 | Where={{ k3s_dnas_mount_where }} 8 | Type={{ k3s_dnas_mount_type }} 9 | Options={{ k3s_dnas_mount_options }} 10 | 11 | [Install] 12 | WantedBy=local-fs.target 13 | -------------------------------------------------------------------------------- /molecule/resources/playbooks/verify-k3s-deployments.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: k3s 4 | tasks: 5 | - name: Verify k3s deployments 6 | when: inventory_hostname in groups.k3s_srv[:1] | default([]) 7 | ansible.builtin.include_tasks: ../tasks/verify-{{ k3s_deployment }}.yml 8 | loop: "{{ k3s_deployments | default([]) }}" 9 | loop_control: 10 | loop_var: k3s_deployment 11 | -------------------------------------------------------------------------------- /molecule/k1ha/inventory/group_vars/k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_services: 4 | - service_keepalived 5 | - service_k3s 6 | 7 | k3s_deployments: 8 | - k3s_csi_driver_smb 9 | - k3s_longhorn 10 | - k3s_traefik 11 | - k3s_dnas 12 | 13 | service_keepalived_router_id: 112 14 | 15 | k3s_longhorn_settings: 16 | default-replica-count: 1 17 | 18 | service_k3s_local_kubectl_config_file: "~/.kube/k1ha" 19 | -------------------------------------------------------------------------------- /molecule/resources/playbooks/verify-service-k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: k3s 4 | tasks: 5 | - name: Get K3S info 6 | ansible.builtin.command: which k3s 7 | failed_when: false 8 | register: k3s_info 9 | changed_when: false 10 | - name: Verify K3S installation 11 | ansible.builtin.assert: 12 | quiet: true 13 | that: 14 | - not k3s_info.failed 15 | -------------------------------------------------------------------------------- /scripts/vm-test-scenario.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -xeo pipefail 4 | 5 | vagrant up || true 6 | 7 | SCENARIO="$1" 8 | 9 | if [[ -z "$SCENARIO" ]]; then 10 | echo "usage: ./scripts/vm-test-scenario.sh SCENARIO" 11 | return 1 12 | fi 13 | 14 | vagrant ssh -- " 15 | set -ex 16 | cd /vagrant 17 | virtualenv venv-vagrant 18 | source venv-vagrant/bin/activate 19 | molecule test -s $1 20 | " 21 | -------------------------------------------------------------------------------- /molecule/armbian/inventory/host_vars/armbian-n2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ansible_host: 192.168.100.72 4 | homecloud_node_ip: "{{ ansible_host }}" 5 | 6 | # https://www.armbian.com/sopine-a64/ 7 | # https://redirect.armbian.com/region/EU/pine64so/Focal_current 8 | image_armbian_image_name: "armbian_pine64so_focal_current.img" 9 | image_armbian_image_url: "https://redirect.armbian.com/region/EU/pine64so/Focal_current" 10 | -------------------------------------------------------------------------------- /molecule/armbian/inventory/host_vars/armbian-n1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ansible_host: 192.168.100.71 4 | homecloud_node_ip: "{{ ansible_host }}" 5 | 6 | # https://www.armbian.com/sopine-a64/ 7 | # https://redirect.armbian.com/region/EU/pine64so/Buster_current 8 | image_armbian_image_name: "armbian_pine64so_buster_current.img" 9 | image_armbian_image_url: "https://redirect.armbian.com/region/EU/pine64so/Buster_current" 10 | -------------------------------------------------------------------------------- /molecule/armbian/inventory/host_vars/armbian-n4.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ansible_host: 192.168.100.74 4 | homecloud_node_ip: "{{ ansible_host }}" 5 | homecloud_node_interface: "eth0" 6 | homecloud_node_mac: "b6:09:a4:06:01:2d" 7 | 8 | # https://www.armbian.com/rock64 9 | image_armbian_image_name: "armbian_rock64_focal_current.img" 10 | image_armbian_image_url: "https://redirect.armbian.com/region/EU/rock64/Focal_current" 11 | -------------------------------------------------------------------------------- /collection/roles/resources_clean/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: 'find resources' 4 | ansible.builtin.find: 5 | paths: /tmp/resources 6 | patterns: "*" 7 | register: files_to_delete 8 | changed_when: false 9 | 10 | - name: 'delete resources' 11 | ansible.builtin.file: 12 | path: "{{ item.path }}" 13 | state: absent 14 | with_items: "{{ files_to_delete.files }}" 15 | changed_when: false 16 | -------------------------------------------------------------------------------- /molecule/k2ha/inventory/group_vars/k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_services: 4 | - service_keepalived 5 | - service_k3s 6 | 7 | k3s_deployments: 8 | - k3s_longhorn 9 | - k3s_traefik 10 | 11 | service_keepalived_router_id: 122 12 | 13 | k3s_longhorn_settings: 14 | default-replica-count: 1 15 | 16 | service_k3s_local_kubectl_config_file: "~/.kube/k2ha" 17 | 18 | k3s_traefik_persistence_storage_class: "longhorn" 19 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/templates/dnas-share.kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | commonLabels: 4 | dnas.morin.io/smb: "{{ item.ansible_hostname }}" 5 | nameSuffix: "-{{ item.ansible_hostname }}" 6 | namespace: dnas 7 | resources: 8 | - dnas-share.kustomize.yml 9 | secretGenerator: 10 | - name: dnas-share 11 | envs: 12 | - dnas-share.kustomize.secret.env 13 | -------------------------------------------------------------------------------- /molecule/k1/inventory/group_vars/k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_services: 4 | - service_k3s 5 | 6 | k3s_deployments: 7 | - k3s_csi_driver_smb 8 | - k3s_traefik 9 | - k3s_dnas 10 | - k3s_dashboard 11 | 12 | service_k3s_local_kubectl_config_file: "~/.kube/k1" 13 | 14 | # htpasswd -nb admin admin_password | openssl base64 15 | k3s_traefik_dashboard_users: |- 16 | YWRtaW46JGFwcjEkNFgyVzVLcWwkZEh0SUJFUC54Q2E3Z0hyV2lwNG5zMQoK 17 | -------------------------------------------------------------------------------- /collection/roles/cluster_node/tasks/configure-hostname.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set hostname 4 | become: true 5 | ansible.builtin.hostname: 6 | name: "{{ inventory_hostname }}" 7 | 8 | - name: Build hosts file 9 | become: true 10 | ansible.builtin.lineinfile: 11 | dest: /etc/hosts 12 | regexp: '.*{{ item }}$' 13 | line: "{{ hostvars[item].homecloud_node_ip }} {{ item }}" 14 | state: present 15 | loop: "{{ groups.all }}" 16 | -------------------------------------------------------------------------------- /molecule/k2/inventory/group_vars/k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_services: 4 | - service_keepalived 5 | - service_k3s 6 | 7 | k3s_deployments: 8 | - k3s_longhorn 9 | - k3s_traefik 10 | 11 | service_keepalived_router_id: 121 12 | 13 | k3s_longhorn_settings: 14 | default-replica-count: 2 15 | 16 | service_k3s_local_kubectl_config_file: "~/.kube/k2" 17 | 18 | k3s_longhorn_block_device: "/dev/vdb" 19 | 20 | k3s_traefik_persistence_storage_class: "longhorn" 21 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/set-node-labels.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set node labels 4 | become: true 5 | ansible.builtin.command: kubectl label node --overwrite {{ node_name }} {{ node_label[0] }}={{ node_label[1] | default('') }} 6 | loop: "{{ (hostvars[node_name].service_k3s_node_labels | default({})).items() }}" 7 | loop_control: 8 | loop_var: node_label 9 | register: set_node_label 10 | changed_when: not 'not labeled' in set_node_label.stdout 11 | -------------------------------------------------------------------------------- /collection/roles/cluster_node/tasks/install-dependencies.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install packages 4 | become: true 5 | ansible.builtin.package: 6 | name: ntp,python3,python3-dev,python3-pip,python3-setuptools,python3-wheel 7 | state: present 8 | force_apt_get: true 9 | 10 | - name: Install dependencies from pip 11 | become: true 12 | ansible.builtin.pip: 13 | name: 14 | - wheel 15 | - setuptools 16 | - pyaml 17 | extra_args: --user 18 | -------------------------------------------------------------------------------- /collection/roles/k3s_traefik/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: deploy Traefik (https://traefik.io) 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Debian 9 | versions: 10 | - buster 11 | - bullseye 12 | - name: Ubuntu 13 | versions: 14 | - bionic 15 | - focal 16 | 17 | galaxy_tags: 18 | - kubernetes 19 | - traefik 20 | 21 | dependencies: [ ] 22 | -------------------------------------------------------------------------------- /molecule/aosc/inventory/host_vars/aosc-n1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ansible_host: 192.168.100.61 4 | homecloud_node_ip: "{{ ansible_host }}" 5 | 6 | # https://aosc.io/downloads/alternative/#base 7 | # https://releases.aosc.io/os-arm64/rockchip64/base/rock64/rk3328-rock64_rk64-base_2020-06-08_emmc.img.lz4 8 | image_aosc_image_name: "rk3328-rock64_rk64-base_2020-06-08_emmc.img" 9 | image_aosc_image_url: "https://releases.aosc.io/os-arm64/rockchip64/base/rock64/{{ image_aosc_archive_name }}" 10 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: create a dedicated .img for each inventory's host 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Debian 9 | versions: 10 | - buster 11 | - bullseye 12 | - name: Ubuntu 13 | versions: 14 | - bionic 15 | - focal 16 | 17 | galaxy_tags: 18 | - debian 19 | - ubuntu 20 | 21 | dependencies: [ ] 22 | -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: deploy Longhorn (https://longhorn.io) 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Debian 9 | versions: 10 | - buster 11 | - bullseye 12 | - name: Ubuntu 13 | versions: 14 | - bionic 15 | - focal 16 | 17 | galaxy_tags: 18 | - kubernetes 19 | - longhorn 20 | 21 | dependencies: [ ] 22 | -------------------------------------------------------------------------------- /collection/roles/cluster_node/tasks/install-swapoff.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy swapoff.service 4 | become: true 5 | ansible.builtin.template: 6 | src: swapoff.service 7 | dest: /etc/systemd/system/swapoff.service 8 | owner: root 9 | group: root 10 | mode: 0644 11 | 12 | - name: Enable and start swapoff.service 13 | become: true 14 | ansible.builtin.systemd: 15 | daemon_reload: true 16 | name: swapoff.service 17 | enabled: true 18 | state: started 19 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: create a dedicated .img for each inventory's host 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Debian 9 | versions: 10 | - buster 11 | - bullseye 12 | - name: Ubuntu 13 | versions: 14 | - bionic 15 | - focal 16 | 17 | galaxy_tags: 18 | - debian 19 | - ubuntu 20 | 21 | dependencies: [ ] 22 | -------------------------------------------------------------------------------- /scripts/postbump.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | const path = require('path'); 3 | const fs = require('fs'); 4 | const YAML = require('yaml') 5 | const pkg = require('../package.json'); 6 | const dstPath = path.join(__dirname, '..', 'collection', 'galaxy.yml') 7 | const dstContent = YAML.parse(fs.readFileSync(dstPath, 'utf8')); 8 | console.log(dstPath, 'from', dstContent.version, 'to', pkg.version); 9 | dstContent.version = pkg.version; 10 | fs.writeFileSync(dstPath, YAML.stringify(dstContent)); 11 | -------------------------------------------------------------------------------- /collection/roles/cluster_node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Include task 4 | ansible.builtin.import_tasks: configure-hostname.yml 5 | 6 | - name: Include task 7 | ansible.builtin.import_tasks: hold-packages.yml 8 | 9 | - name: Include task 10 | ansible.builtin.import_tasks: install-dependencies.yml 11 | 12 | - name: Include task 13 | ansible.builtin.import_tasks: install-swapoff.yml 14 | 15 | - name: Include task 16 | ansible.builtin.import_tasks: activate-iptables-legacy.yml 17 | -------------------------------------------------------------------------------- /collection/roles/cluster_node/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: Apply basic configurations (hostname, deactivate swap ...). 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Debian 9 | versions: 10 | - buster 11 | - bullseye 12 | - name: Ubuntu 13 | versions: 14 | - bionic 15 | - focal 16 | 17 | galaxy_tags: 18 | - debian 19 | - ubuntu 20 | 21 | dependencies: [ ] 22 | -------------------------------------------------------------------------------- /collection/roles/k3s_dashboard/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: deploy the Kubernetes Dashboard (https://github.com/kubernetes/dashboard) 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Debian 9 | versions: 10 | - buster 11 | - bullseye 12 | - name: Ubuntu 13 | versions: 14 | - bionic 15 | - focal 16 | 17 | galaxy_tags: 18 | - kubernetes 19 | - dashboard 20 | 21 | dependencies: [ ] 22 | -------------------------------------------------------------------------------- /collection/roles/service_keepalived/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: install and configure Keepalived with Docker container 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Debian 9 | versions: 10 | - buster 11 | - bullseye 12 | - name: Ubuntu 13 | versions: 14 | - bionic 15 | - focal 16 | 17 | galaxy_tags: 18 | - debian 19 | - ubuntu 20 | - keepalived 21 | 22 | dependencies: [ ] 23 | -------------------------------------------------------------------------------- /collection/roles/k3s_dashboard/templates/customization.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: admin-user 6 | namespace: kubernetes-dashboard 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRoleBinding 10 | metadata: 11 | name: admin-user 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: cluster-admin 16 | subjects: 17 | - kind: ServiceAccount 18 | name: admin-user 19 | namespace: kubernetes-dashboard 20 | --- 21 | -------------------------------------------------------------------------------- /molecule/aosc/inventory/host_vars/aosc-n2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ansible_host: 192.168.100.62 4 | homecloud_node_ip: "{{ ansible_host }}" 5 | 6 | # https://aosc.io/downloads/alternative/#base 7 | # https://releases.aosc.io/os-arm64/sunxi64/base/sopine-baseboard/sun50i-a64-sopine-baseboard_sunxi64-base_2020-06-07_emmc.img.lz4 8 | image_aosc_image_name: "sun50i-a64-sopine-baseboard_sunxi64-base_2020-06-07_emmc.img" 9 | image_aosc_image_url: "https://releases.aosc.io/os-arm64/sunxi64/base/sopine-baseboard/{{ image_aosc_archive_name }}" 10 | -------------------------------------------------------------------------------- /molecule/resources/tasks/verify-k3s_dashboard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Verify Kubernetes dashboard 4 | block: 5 | - name: Configure endpoint verifier 6 | ansible.builtin.set_fact: 7 | k8s_namespace: kubernetes-dashboard 8 | k8s_deploy_name: kubernetes-dashboard 9 | k8s_service_name: kubernetes-dashboard-external 10 | k8s_url_protocol: https 11 | k8s_expected_status: HTTP/2 200 12 | - name: Include task 13 | ansible.builtin.include_tasks: verify-k8s-http-endpoint.yml 14 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/k3s-server-primary.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install primary server 4 | become: true 5 | ansible.builtin.shell: | 6 | set -o pipefail 7 | curl -sfL https://get.k3s.io | \ 8 | INSTALL_K3S_VERSION="{{ service_k3s_version }}" \ 9 | sh -s - server \ 10 | --cluster-init \ 11 | --node-name {{ inventory_hostname }} \ 12 | --disable traefik \ 13 | --disable metrics-server 14 | args: 15 | executable: /bin/bash 16 | creates: /usr/local/bin/k3s 17 | warn: false 18 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/wait-for-nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Wait for nodes availability 4 | become: true 5 | ansible.builtin.command: kubectl get node -o jsonpath='{.items[?(@.status.conditions[-1].type=="Ready")].metadata.name}' 6 | changed_when: false 7 | register: kubcetl_result 8 | delay: "{{ homecloud_k8s_deploy_timeout_delay | default(3) }}" 9 | retries: "{{ homecloud_k8s_deploy_timeout_retries | default(40) }}" 10 | until: kubcetl_result.stdout.split(' ') | unique | length == groups.k3s | length 11 | -------------------------------------------------------------------------------- /molecule/ubuntu_raspi/inventory/host_vars/ubuntu-n1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ansible_host: 192.168.100.81 4 | homecloud_node_ip: "{{ ansible_host }}" 5 | 6 | # https://ubuntu.com/download/raspberry-pi 7 | # https://cdimage.ubuntu.com/releases/20.04.3/release/ubuntu-20.04.3-preinstalled-server-arm64+raspi.img.xz 8 | image_ubuntu_raspi_image_name: "ubuntu-20.04.3-preinstalled-server-arm64+raspi.img" 9 | image_ubuntu_raspi_image_url: "https://cdimage.ubuntu.com/releases/20.04.3/release/ubuntu-20.04.3-preinstalled-server-arm64+raspi.img.xz" 10 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/set-node-taints.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set node taints 4 | become: true 5 | ansible.builtin.shell: | 6 | set -o pipefail 7 | kubectl taint node --overwrite {{ node_name }} {{ node_taint[0] }}={{ node_taint[1] | default('') }} || true 8 | args: 9 | executable: /bin/bash 10 | warn: false 11 | loop: "{{ (hostvars[node_name].service_k3s_node_taints | default({})).items() }}" 12 | loop_control: 13 | loop_var: node_taint 14 | register: set_node_taint 15 | changed_when: false 16 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/tasks/host_install-mount.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Transfer the mount configuration 4 | become: true 5 | ansible.builtin.template: 6 | src: mnt-dnas.mount 7 | dest: /etc/systemd/system/{{ k3s_dnas_mount_name }}.mount 8 | owner: root 9 | group: root 10 | mode: 0644 11 | 12 | - name: Enable and start the mount configuration 13 | become: true 14 | ansible.builtin.systemd: 15 | daemon_reload: true 16 | name: "{{ k3s_dnas_mount_name }}.mount" 17 | enabled: true 18 | state: started 19 | -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/templates/longhorn_loop.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Setup a loop device for Longhorn 3 | DefaultDependencies=false 4 | ConditionFileIsExecutable={{ k3s_longhorn_script_create_loop }} 5 | Before=local-fs.target 6 | After=systemd-udev-trigger.service 7 | Requires=systemd-udev-trigger.service 8 | 9 | [Service] 10 | Type=oneshot 11 | ExecStart={{ k3s_longhorn_script_create_loop }} 12 | TimeoutSec=60 13 | RemainAfterExit=yes 14 | 15 | [Install] 16 | WantedBy=local-fs.target 17 | Also=systemd-udev-trigger.service 18 | 19 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: bootstrap a Kubernetes server based on https://k3s.io 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Debian 9 | versions: 10 | - buster 11 | - bullseye 12 | - name: Ubuntu 13 | versions: 14 | - bionic 15 | - focal 16 | 17 | galaxy_tags: 18 | - debian 19 | - ubuntu 20 | - kubernete 21 | - k3s 22 | - k8s 23 | 24 | dependencies: [ ] 25 | -------------------------------------------------------------------------------- /scripts/ci-install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -xeo pipefail 4 | 5 | id 6 | 7 | echo "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | tee -a /etc/apt/sources.list.d/hashicorp.list 8 | curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add - 9 | apt-get update 10 | apt-get install -y vagrant 11 | apt-get install -y bridge-utils qemu-kvm virtinst libvirt-daemon-system 12 | 13 | kvm-ok || true 14 | 15 | apt-get install -y ruby-dev libvirt-dev libssl-dev 16 | vagrant plugin install vagrant-libvirt 17 | -------------------------------------------------------------------------------- /molecule/resources/playbooks/k3s-deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | pre_tasks: 5 | - name: prepare 6 | ansible.builtin.include_role: 7 | name: resources_prepare 8 | tasks: 9 | - name: Apply k3s deployments 10 | ansible.builtin.include_role: 11 | name: "{{ k3s_deployment }}" 12 | loop: "{{ k3s_deployments | default([]) }}" 13 | loop_control: 14 | loop_var: "k3s_deployment" 15 | post_tasks: 16 | - name: clean 17 | ansible.builtin.include_role: 18 | name: resources_clean 19 | -------------------------------------------------------------------------------- /collection/roles/k3s_csi_driver_smb/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: deploy csi-driver-smb (https://github.com/kubernetes-csi/csi-driver-smb) 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Debian 9 | versions: 10 | - buster 11 | - bullseye 12 | - name: Ubuntu 13 | versions: 14 | - bionic 15 | - focal 16 | 17 | galaxy_tags: 18 | - stack 19 | - kubernetes 20 | - cifs 21 | - samba 22 | 23 | dependencies: [ ] 24 | -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/tasks/configure-block.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Format the block device of Longhorn" 4 | become: true 5 | community.general.filesystem: 6 | dev: "{{ k3s_longhorn_block_device }}" 7 | force: false 8 | fstype: "{{ k3s_longhorn_disk_type }}" 9 | 10 | - name: "Mount the block device on boot" 11 | become: true 12 | ansible.posix.mount: 13 | boot: true 14 | state: mounted 15 | src: "{{ k3s_longhorn_block_device }}" 16 | path: "{{ k3s_longhorn_disk_path }}" 17 | fstype: "{{ k3s_longhorn_disk_type }}" 18 | -------------------------------------------------------------------------------- /molecule/k2/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_vault_path: "/tmp/molecule/vault-k2" 4 | 5 | homecloud_virtual_ip: 192.168.200.10 6 | 7 | homecloud_node_interface: eth1 8 | 9 | homecloud_k8s_deploy_timeout_delay: 3 10 | homecloud_k8s_deploy_timeout_retries: 80 11 | 12 | # dev-sec.ssh-hardening 13 | ssh_allow_users: "{{ ansible_user }}" 14 | ssh_use_pam: true 15 | ssh_max_auth_retries: 10 16 | sftp_enabled: true 17 | # dev-sec.os-hardening 18 | os_auditd_enabled: false 19 | ufw_manage_defaults: false 20 | sysctl_overwrite: 21 | net.ipv4.ip_forward: 1 22 | -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_longhorn_release: "v1.2.5" 4 | 5 | k3s_longhorn_script_create_loop: "/usr/local/share/homecloud/scripts/create_longhorn_loop.sh" 6 | 7 | k3s_longhorn_image_dd_bs: "10M" 8 | k3s_longhorn_image_dd_cnt: "100" 9 | k3s_longhorn_image_device: "" 10 | k3s_longhorn_image_file: "/var/lib/homecloud/longhorn.img" 11 | 12 | k3s_longhorn_block_device: "" 13 | 14 | k3s_longhorn_disk_path: "/var/lib/longhorn" 15 | k3s_longhorn_disk_type: "ext4" 16 | k3s_longhorn_disk_options: "defaults" 17 | 18 | k3s_longhorn_settings: { } 19 | -------------------------------------------------------------------------------- /molecule/k1/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_vault_path: "/tmp/molecule/vault-k1" 4 | 5 | homecloud_virtual_ip: "{{ ansible_host }}" 6 | 7 | homecloud_node_interface: eth1 8 | 9 | homecloud_k8s_deploy_timeout_delay: 3 10 | homecloud_k8s_deploy_timeout_retries: 80 11 | 12 | # dev-sec.ssh-hardening 13 | ssh_allow_users: '{{ ansible_user }}' 14 | ssh_use_pam: true 15 | ssh_max_auth_retries: 10 16 | sftp_enabled: true 17 | # dev-sec.os-hardening 18 | os_auditd_enabled: false 19 | ufw_manage_defaults: false 20 | sysctl_overwrite: 21 | net.ipv4.ip_forward: 1 22 | -------------------------------------------------------------------------------- /molecule/k1ha/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_vault_path: "/tmp/molecule/vault-k1ha" 4 | 5 | homecloud_virtual_ip: 192.168.100.20 6 | 7 | homecloud_node_interface: eth1 8 | 9 | homecloud_k8s_deploy_timeout_delay: 3 10 | homecloud_k8s_deploy_timeout_retries: 80 11 | 12 | # dev-sec.ssh-hardening 13 | ssh_allow_users: '{{ ansible_user }}' 14 | ssh_use_pam: true 15 | ssh_max_auth_retries: 10 16 | sftp_enabled: true 17 | # dev-sec.os-hardening 18 | os_auditd_enabled: false 19 | ufw_manage_defaults: false 20 | sysctl_overwrite: 21 | net.ipv4.ip_forward: 1 22 | -------------------------------------------------------------------------------- /molecule/k1lo/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_vault_path: "/tmp/molecule/vault-k1lo" 4 | 5 | homecloud_virtual_ip: 192.168.100.31 6 | 7 | homecloud_node_interface: eth1 8 | 9 | homecloud_k8s_deploy_timeout_delay: 3 10 | homecloud_k8s_deploy_timeout_retries: 80 11 | 12 | # dev-sec.ssh-hardening 13 | ssh_allow_users: '{{ ansible_user }}' 14 | ssh_use_pam: true 15 | ssh_max_auth_retries: 10 16 | sftp_enabled: true 17 | # dev-sec.os-hardening 18 | os_auditd_enabled: false 19 | ufw_manage_defaults: false 20 | sysctl_overwrite: 21 | net.ipv4.ip_forward: 1 22 | -------------------------------------------------------------------------------- /molecule/k2ha/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_vault_path: "/tmp/molecule/vault-k2ha" 4 | 5 | homecloud_virtual_ip: 192.168.200.20 6 | 7 | homecloud_node_interface: eth1 8 | 9 | homecloud_k8s_deploy_timeout_delay: 3 10 | homecloud_k8s_deploy_timeout_retries: 80 11 | 12 | # dev-sec.ssh-hardening 13 | ssh_allow_users: "{{ ansible_user }}" 14 | ssh_use_pam: true 15 | ssh_max_auth_retries: 10 16 | sftp_enabled: true 17 | # dev-sec.os-hardening 18 | os_auditd_enabled: false 19 | ufw_manage_defaults: false 20 | sysctl_overwrite: 21 | net.ipv4.ip_forward: 1 22 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Thibault Morin 3 | description: Install and configure a decentralized NAS, that means an external drive, Samba and Syncthing. 4 | license: MIT 5 | min_ansible_version: "3.1" 6 | 7 | platforms: 8 | - name: Debian 9 | versions: 10 | - buster 11 | - bullseye 12 | - name: Ubuntu 13 | versions: 14 | - bionic 15 | - focal 16 | 17 | galaxy_tags: 18 | - debian 19 | - ubuntu 20 | - samba 21 | - syncthing 22 | - nfs 23 | 24 | dependencies: [ ] 25 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/k3s-agent.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install agent 4 | become: true 5 | ansible.builtin.shell: | 6 | set -o pipefail 7 | curl -sfL https://get.k3s.io | \ 8 | INSTALL_K3S_VERSION="{{ service_k3s_version }}" \ 9 | K3S_URL="https://{{ service_k3s_server_primary_ip }}:6443" \ 10 | K3S_TOKEN="{{ lookup('file', homecloud_vault_path~'/k3s-sever-token.txt') }}" \ 11 | sh -s - agent \ 12 | --node-name {{ inventory_hostname }} 13 | args: 14 | executable: /bin/bash 15 | creates: /usr/local/bin/k3s 16 | warn: false 17 | -------------------------------------------------------------------------------- /molecule/resources/deployments/test-csi-smb-dnas-step2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: test-csi-smb-dnas-step2 6 | namespace: default 7 | spec: 8 | containers: 9 | - name: step2 10 | image: busybox 11 | imagePullPolicy: IfNotPresent 12 | command: 13 | - "sh" 14 | - "-c" 15 | - "cat /data/file" 16 | volumeMounts: 17 | - name: data 18 | mountPath: /data 19 | restartPolicy: Never 20 | volumes: 21 | - name: data 22 | persistentVolumeClaim: 23 | claimName: test-csi-smb-dnas 24 | --- 25 | -------------------------------------------------------------------------------- /molecule/aosc/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_vault_path: "/tmp/molecule/vault" 4 | image_aosc_working_directory: "/tmp/molecule/image_aosc/working_directory" 5 | image_aosc_release_directory: "/tmp/molecule/image_aosc/release_directory" 6 | 7 | homecloud_virtual_ip: '192.168.0.60' 8 | homecloud_network_netmask: '255.255.255.0' 9 | homecloud_network_gateway: '192.168.0.1' 10 | homecloud_network_nameservers: [ '1.1.1.1', '1.0.0.1' ] 11 | homecloud_is_https: false 12 | 13 | image_aosc_ssh_pub_key_path: "/tmp/dummy_pub_rsa" 14 | 15 | image_aosc_archive_name: "{{ image_aosc_image_name }}.lz4" 16 | -------------------------------------------------------------------------------- /molecule/resources/deployments/test-longhorn-rwo-step2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: test-longhorn-rwo-step2 6 | namespace: default 7 | spec: 8 | containers: 9 | - name: step2 10 | image: busybox 11 | imagePullPolicy: IfNotPresent 12 | command: 13 | - "sh" 14 | - "-c" 15 | - "cat /data/file" 16 | volumeMounts: 17 | - name: data 18 | mountPath: /data 19 | restartPolicy: Never 20 | volumes: 21 | - name: data 22 | persistentVolumeClaim: 23 | claimName: test-longhorn-rwo-pvc 24 | --- 25 | -------------------------------------------------------------------------------- /molecule/resources/deployments/test-longhorn-rwx-step3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: test-longhorn-rwx-step3 6 | namespace: default 7 | spec: 8 | containers: 9 | - name: step3 10 | image: busybox 11 | imagePullPolicy: IfNotPresent 12 | command: 13 | - "sh" 14 | - "-c" 15 | - "cat /data/file" 16 | volumeMounts: 17 | - name: data 18 | mountPath: /data 19 | restartPolicy: Never 20 | volumes: 21 | - name: data 22 | persistentVolumeClaim: 23 | claimName: test-longhorn-rwx-pvc 24 | --- 25 | -------------------------------------------------------------------------------- /molecule/armbian/inventory/host_vars/armbian-n3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ansible_host: 192.168.100.73 4 | homecloud_node_ip: "{{ ansible_host }}" 5 | homecloud_node_interface: "eth0" 6 | homecloud_node_mac: "b6:09:a4:06:01:2c" 7 | 8 | # https://www.armbian.com/rock64 9 | # https://redirect.armbian.com/region/EU/rock64/Buster_current 10 | # https://archive.armbian.com/rock64/archive/Armbian_20.08.1_Rock64_buster_legacy_4.4.213.img.xz 11 | image_armbian_image_name: "Armbian_20.08.1_Rock64_buster_legacy_4.4.213.img" 12 | image_armbian_image_url: "https://archive.armbian.com/rock64/archive/Armbian_20.08.1_Rock64_buster_legacy_4.4.213.img.xz" 13 | -------------------------------------------------------------------------------- /molecule/armbian/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_vault_path: "/tmp/molecule/vault" 4 | image_armbian_working_directory: "/tmp/molecule/image_armbian/working_directory" 5 | image_armbian_release_directory: "/tmp/molecule/image_armbian/release_directory" 6 | 7 | homecloud_virtual_ip: '192.168.0.70' 8 | homecloud_network_netmask: '255.255.255.0' 9 | homecloud_network_gateway: '192.168.0.1' 10 | homecloud_network_nameservers: [ '1.1.1.1', '1.0.0.1' ] 11 | homecloud_is_https: false 12 | 13 | image_armbian_ssh_pub_key_path: "/tmp/dummy_pub_rsa" 14 | 15 | image_armbian_archive_name: "{{ image_armbian_image_name }}.xz" 16 | -------------------------------------------------------------------------------- /molecule/resources/deployments/test-longhorn-rwx-step2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: test-longhorn-rwx-step2 6 | namespace: default 7 | spec: 8 | containers: 9 | - name: step2 10 | image: busybox 11 | imagePullPolicy: IfNotPresent 12 | command: 13 | - "sh" 14 | - "-c" 15 | - "echo 'hello from step2' >> /data/file" 16 | volumeMounts: 17 | - name: data 18 | mountPath: /data 19 | restartPolicy: Never 20 | volumes: 21 | - name: data 22 | persistentVolumeClaim: 23 | claimName: test-longhorn-rwx-pvc 24 | --- 25 | -------------------------------------------------------------------------------- /molecule/aosc/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | tasks: 5 | 6 | - name: Stat aosc-n1 7 | ansible.builtin.stat: 8 | path: "{{ image_aosc_release_directory }}/aosc-n1.img" 9 | register: stat_aosc_n1_img 10 | - name: Check aosc-n1 11 | ansible.builtin.assert: 12 | that: stat_aosc_n1_img is success 13 | 14 | # - name: Stat aosc-n2 15 | # ansible.builtin.stat: 16 | # path: "{{ image_aosc_release_directory }}/aosc-n2.img" 17 | # register: stat_aosc_n2_img 18 | # - name: Check aosc-n2 19 | # ansible.builtin.assert: 20 | # that: stat_aosc_n2_img is success 21 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/k3s-server-secondary.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install secondary server 4 | become: true 5 | ansible.builtin.shell: | 6 | set -o pipefail 7 | curl -sfL https://get.k3s.io | \ 8 | INSTALL_K3S_VERSION="{{ service_k3s_version }}" \ 9 | K3S_TOKEN="{{ lookup('file', homecloud_vault_path~'/k3s-sever-token.txt') }}" \ 10 | sh -s - server \ 11 | --node-name {{ inventory_hostname }} \ 12 | --server https://{{ service_k3s_server_primary_ip }}:6443 \ 13 | --disable traefik \ 14 | --disable metrics-server 15 | args: 16 | executable: /bin/bash 17 | creates: /usr/local/bin/k3s 18 | warn: false 19 | -------------------------------------------------------------------------------- /molecule/ubuntu_raspi/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_vault_path: "/tmp/molecule/vault" 4 | image_ubuntu_raspi_working_directory: "/tmp/molecule/ubuntu_raspi/working_directory" 5 | image_ubuntu_raspi_release_directory: "/tmp/molecule/ubuntu_raspi/release_directory" 6 | 7 | homecloud_virtual_ip: '192.168.100.80' 8 | homecloud_network_netmask: '255.255.255.0' 9 | homecloud_network_gateway: '192.168.0.1' 10 | homecloud_network_nameservers: [ '1.1.1.1', '1.0.0.1' ] 11 | homecloud_is_https: false 12 | 13 | image_ubuntu_raspi_ssh_pub_key_path: "/tmp/dummy_pub_rsa" 14 | 15 | image_ubuntu_raspi_archive_name: "{{ image_ubuntu_raspi_image_name }}.xz" 16 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: tmorin 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: thibault.morin 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 13 | -------------------------------------------------------------------------------- /molecule/ubuntu_raspi/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | driver: 4 | name: delegated 5 | options: 6 | managed: false 7 | ansible_connection_options: 8 | ansible_connection: local 9 | 10 | platforms: 11 | - name: ubuntu-n1 12 | 13 | provisioner: 14 | name: ansible 15 | env: 16 | ANSIBLE_ROLES_PATH: "$ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:$MOLECULE_PROJECT_DIRECTORY/collection/roles" 17 | inventory: 18 | links: 19 | hosts: inventory/hosts.yml 20 | group_vars: inventory/group_vars/ 21 | host_vars: inventory/host_vars/ 22 | 23 | verifier: 24 | name: ansible 25 | -------------------------------------------------------------------------------- /molecule/aosc/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | driver: 4 | name: delegated 5 | options: 6 | managed: false 7 | ansible_connection_options: 8 | ansible_connection: local 9 | 10 | platforms: 11 | - name: aosc-n1 12 | # - name: aosc-n2 13 | 14 | provisioner: 15 | name: ansible 16 | env: 17 | ANSIBLE_ROLES_PATH: "$ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:$MOLECULE_PROJECT_DIRECTORY/collection/roles" 18 | inventory: 19 | links: 20 | hosts: inventory/hosts.yml 21 | group_vars: inventory/group_vars/ 22 | host_vars: inventory/host_vars/ 23 | 24 | verifier: 25 | name: ansible 26 | -------------------------------------------------------------------------------- /scripts/customize-armbian.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -xeo pipefail 4 | 5 | hd=".tmp/armbian" 6 | rf=".tmp/armbian/rootfs" 7 | bi=".tmp/Armbian_20.08.1_Rock64_buster_legacy_4.4.213.img" 8 | 9 | if [ ! -f "$bi" ]; then 10 | curl -o "$bi.xz" https://archive.armbian.com/rock64/archive/Armbian_20.08.1_Rock64_buster_legacy_4.4.213.img.xz 11 | unxz "$bi.xz" 12 | fi 13 | 14 | sudo collection/roles/image_armbian/files/mounter.sh -hd "$hd" -bi "$bi" -c 15 | 16 | sudo collection/roles/image_armbian/files/mounter.sh -hd "$hd" -bi "$bi" -p 17 | 18 | sudo collection/roles/image_armbian/files/customizer.sh -rf "$rf" -dlr 19 | 20 | sudo collection/roles/image_armbian/files/mounter.sh -hd "$hd" -bi "$bi" -u 21 | -------------------------------------------------------------------------------- /collection/galaxy.yml: -------------------------------------------------------------------------------- 1 | namespace: tmorin 2 | name: homecloud 3 | version: 2.9.0 4 | readme: README.md 5 | authors: 6 | - Thibault Morin (https://tmorin.github.io) 7 | description: The collection provides a ready-to-use set of resources to 8 | bootstrap a cloud at home based on Kubernetes and Syncthing. 9 | license: 10 | - MIT 11 | tags: 12 | - syncthing 13 | - samba 14 | - kubernetes 15 | - k3s 16 | - k8s 17 | - longhorn 18 | - traefik 19 | repository: https://github.com/tmorin/homecloud-ansible 20 | homepage: https://github.com/tmorin/homecloud-ansible#readme 21 | issues: https://github.com/tmorin/homecloud-ansible/issues 22 | dependencies: 23 | ansible.posix: "*" 24 | community.general: "*" 25 | -------------------------------------------------------------------------------- /scripts/customize-aosc.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -xeo pipefail 4 | 5 | hd=".tmp/rk64" 6 | rf=".tmp/rk64/rootfs" 7 | bi=".tmp/rk3328-rock64_rk64-base_2020-06-08_emmc.img" 8 | 9 | if [ ! -f "$bi" ]; then 10 | curl -o "$bi.lz4" https://releases.aosc.io/os-arm64/rockchip64/xfce/rock64/rk3328-rock64_rk64-xfce_2020-06-09_emmc.img.lz4 11 | unlz4 "$bi.lz4" 12 | rm "$bi.lz4" 13 | fi 14 | 15 | sudo collection/roles/image_aosc/files/mounter.sh -hd "$hd" -bi "$bi" -c 16 | 17 | sudo collection/roles/image_aosc/files/mounter.sh -hd "$hd" -bi "$bi" -p 18 | 19 | sudo collection/roles/image_aosc/files/customizer.sh -rf "$rf" 20 | 21 | sudo collection/roles/image_aosc/files/mounter.sh -hd "$hd" -bi "$bi" -u 22 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_vault_path: /tmp/homecloud/vault 4 | homecloud_k8s_deploy_timeout_delay: 3 5 | homecloud_k8s_deploy_timeout_retries: 40 6 | 7 | service_k3s_server_primary_ip: "{{ hostvars[groups['k3s_srv'][0]].homecloud_node_ip }}" 8 | 9 | # see `INSTALL_K3S_VERSION` on https://rancher.com/docs/k3s/latest/en/installation/install-options/ 10 | service_k3s_version: "" 11 | 12 | # The value should be overridden from a host variable. 13 | service_k3s_node_labels: { } 14 | 15 | # The value should be overridden from a host variable. 16 | service_k3s_node_taints: { } 17 | 18 | service_k3s_local_kubectl_config_file: "~/.kube/homecloud" 19 | 20 | service_k3s_kustomize_release: v4.5.5 21 | -------------------------------------------------------------------------------- /molecule/armbian/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | driver: 4 | name: delegated 5 | options: 6 | managed: false 7 | ansible_connection_options: 8 | ansible_connection: local 9 | 10 | platforms: 11 | - name: armbian-n1 12 | - name: armbian-n2 13 | - name: armbian-n3 14 | - name: armbian-n4 15 | 16 | provisioner: 17 | name: ansible 18 | env: 19 | ANSIBLE_ROLES_PATH: "$ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:$MOLECULE_PROJECT_DIRECTORY/collection/roles" 20 | inventory: 21 | links: 22 | hosts: inventory/hosts.yml 23 | group_vars: inventory/group_vars/ 24 | host_vars: inventory/host_vars/ 25 | 26 | verifier: 27 | name: ansible 28 | -------------------------------------------------------------------------------- /molecule/resources/playbooks/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | tasks: 5 | 6 | - name: Update packages 7 | become: true 8 | ansible.builtin.apt: 9 | update_cache: true 10 | autoclean: true 11 | autoremove: true 12 | force_apt_get: true 13 | 14 | - name: Force a DNS server 15 | become: true 16 | ansible.builtin.copy: 17 | content: "nameserver 1.1.1.1" 18 | dest: /etc/resolv.conf 19 | mode: "0644" 20 | 21 | - name: Format the block device of DNAS 22 | when: k3s_dnas_mount_what is defined 23 | become: true 24 | community.general.filesystem: 25 | dev: "{{ k3s_dnas_mount_what }}" 26 | force: false 27 | fstype: ext4 28 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/tasks/host_install-syncthing.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install dependencies 4 | become: true 5 | ansible.builtin.package: 6 | name: apt-transport-https 7 | state: present 8 | force_apt_get: true 9 | 10 | - name: Import syncthing GPG key to apt 11 | become: true 12 | ansible.builtin.apt_key: 13 | url: 'https://syncthing.net/release-key.txt' 14 | state: present 15 | 16 | - name: Add syncthing repository 17 | become: true 18 | ansible.builtin.apt_repository: 19 | repo: 'deb https://apt.syncthing.net/ syncthing stable' 20 | state: present 21 | 22 | - name: Install syncthing 23 | become: true 24 | ansible.builtin.package: 25 | name: syncthing 26 | state: present 27 | force_apt_get: true 28 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_dnas_username: "dnas" 4 | k3s_dnas_password: "dnas" 5 | k3s_dnas_uid: "1010" 6 | k3s_dnas_gid: "1010" 7 | 8 | k3s_dnas_mount_what: "" 9 | k3s_dnas_mount_where: "/mnt/dnas" 10 | k3s_dnas_mount_type: "ext4" 11 | k3s_dnas_mount_options: "" 12 | k3s_dnas_mount_name: '{{ k3s_dnas_mount_where | regex_replace("^/") | regex_replace("/", "-") }}' 13 | 14 | k3s_dnas_syncthing_gui_address: "0.0.0.0:8384" 15 | k3s_dnas_syncthing_gui_username: "dnas" 16 | # https://https://bcrypthashgenerator.tool-kit.dev 17 | k3s_dnas_syncthing_gui_password: "$2a$10$dvemZCjuv8CDNfMVexU1t.v0or0Q1PKlSVHl7tPThuEkmBq7B3fbi" # == dnas 18 | # k3s_dnas_syncthing_gui_password: "{{ 'dnas' | password_hash('bcrypt', 'dnas') }}" # :( it doesn't work 19 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/configure_nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Set role for agents 4 | become: true 5 | ansible.builtin.command: kubectl label node --overwrite {{ node_name }} node-role.kubernetes.io/worker=true 6 | loop: "{{ groups.k3s_agt | default([]) }}" 7 | loop_control: 8 | loop_var: node_name 9 | register: set_role_agents 10 | changed_when: not 'not labeled' in set_role_agents.stdout 11 | 12 | - name: Set node labels 13 | ansible.builtin.include_tasks: set-node-labels.yml 14 | loop: "{{ groups.k3s | default([]) }}" 15 | loop_control: 16 | loop_var: node_name 17 | 18 | - name: Set node taints 19 | ansible.builtin.include_tasks: set-node-taints.yml 20 | loop: "{{ groups.k3s | default([]) }}" 21 | loop_control: 22 | loop_var: node_name 23 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: 'check parameters' 4 | delegate_to: localhost 5 | ansible.builtin.assert: 6 | that: 7 | - homecloud_node_ip | length > 0 8 | - homecloud_network_netmask | length > 0 9 | - homecloud_network_gateway | length > 0 10 | - image_aosc_image_url | length > 0 11 | - image_aosc_archive_name | length > 0 12 | - image_aosc_image_name | length > 0 13 | 14 | - name: Include task 15 | ansible.builtin.import_tasks: prepare-workdir.yml 16 | 17 | - name: Include task 18 | ansible.builtin.import_tasks: get-aosc-image.yml 19 | 20 | #- name: Include task 21 | # ansible.builtin.import_tasks: patch-host-image.yml 22 | # 23 | #- name: Include task 24 | # ansible.builtin.import_tasks: release-host-image.yml 25 | -------------------------------------------------------------------------------- /collection/roles/service_keepalived/templates/keepalived.conf: -------------------------------------------------------------------------------- 1 | global_defs { 2 | default_interface {{ homecloud_node_interface }} 3 | } 4 | 5 | vrrp_instance homecloud { 6 | 7 | state {{ (groups['k3s_srv'][0] == inventory_hostname) | ternary('MASTER', 'BACKUP') }} 8 | 9 | priority {{ (groups['k3s_srv'][0] == inventory_hostname) | ternary('255', '100') }} 10 | 11 | interface {{ homecloud_node_interface }} 12 | 13 | unicast_peer { 14 | {% for peer in service_keepalived_peers -%} 15 | {{ peer }} 16 | {% endfor %} 17 | } 18 | 19 | virtual_router_id {{ service_keepalived_router_id }} 20 | 21 | virtual_ipaddress { 22 | {{ homecloud_virtual_ip }} 23 | } 24 | 25 | {% if groups['k3s_srv'][0] != inventory_hostname -%} 26 | nopreempt 27 | {% endif %} 28 | 29 | } 30 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: 'check parameters' 4 | delegate_to: localhost 5 | ansible.builtin.assert: 6 | that: 7 | - homecloud_node_ip | length > 0 8 | - homecloud_network_netmask | length > 0 9 | - homecloud_network_gateway | length > 0 10 | - image_armbian_image_url | length > 0 11 | - image_armbian_archive_name | length > 0 12 | - image_armbian_image_name | length > 0 13 | 14 | - name: Include task 15 | ansible.builtin.import_tasks: prepare-workdir.yml 16 | 17 | - name: Include task 18 | ansible.builtin.import_tasks: get-armbian-image.yml 19 | 20 | - name: Include task 21 | ansible.builtin.import_tasks: patch-host-image.yml 22 | 23 | - name: Include task 24 | ansible.builtin.import_tasks: release-host-image.yml 25 | -------------------------------------------------------------------------------- /collection/roles/k3s_traefik/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | k3s_traefik_dashboard_ingress_host: "traefik.home.cloud" 4 | 5 | k3s_traefik_dashboard_users: "" 6 | 7 | # htpasswd -nb admin admin_password | openssl base64 8 | #k3s_traefik_dashboard_users: |- 9 | # YWRtaW46JGFwcjEkNFgyVzVLcWwkZEh0SUJFUC54Q2E3Z0hyV2lwNG5zMQoK 10 | 11 | k3s_traefik_acme_email: "admin@homecloud.local" 12 | k3s_traefik_acme_server: "https://acme-v02.api.letsencrypt.org/directory" 13 | k3s_traefik_persistence_storage_class: "local-path" 14 | 15 | # echo $(htpasswd -nb admin admin_password) | sed -e s/\\$/\\$\\$/g 16 | k3s_traefik_auth_admin: "admin:$$apr1$$dAp9Vuir$$wxnIUy1XNHHSawNfWRSyg0" 17 | 18 | k3s_traefik_resources: { } 19 | #k3s_traefik_resources: 20 | # requests: 21 | # memory: 50Mi 22 | # limits: 23 | # memory: 100Mi 24 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Based on ansible-lint config 4 | extends: default 5 | 6 | rules: 7 | braces: 8 | max-spaces-inside: 1 9 | level: error 10 | brackets: 11 | max-spaces-inside: 1 12 | level: error 13 | colons: 14 | max-spaces-after: -1 15 | level: error 16 | commas: 17 | max-spaces-after: -1 18 | level: error 19 | comments: disable 20 | comments-indentation: disable 21 | document-start: disable 22 | empty-lines: 23 | max: 3 24 | level: error 25 | hyphens: 26 | level: error 27 | indentation: disable 28 | key-duplicates: enable 29 | line-length: disable 30 | new-line-at-end-of-file: disable 31 | new-lines: 32 | type: unix 33 | trailing-spaces: disable 34 | truthy: disable 35 | 36 | ignore: | 37 | .* 38 | env 39 | node_modules 40 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "private": true, 3 | "version": "2.9.0", 4 | "scripts": { 5 | "gdiag": "gdiag --wd=paper", 6 | "gdiag:clean": "gdiag --wd=paper -c", 7 | "release:alpha": "standard-version --commit-all --prerelease alpha --skip.changelog --skip.tag", 8 | "release:beta": "standard-version --commit-all --prerelease beta --skip.changelog", 9 | "release:publish": "git push --tags && git push --all", 10 | "release:standard": "standard-version --commit-all" 11 | }, 12 | "standard-version": { 13 | "scripts": { 14 | "postbump": "node ./scripts/postbump.js > postbump.log", 15 | "precommit": "git add -A" 16 | } 17 | }, 18 | "dependencies": { 19 | "@tmorin/plantuml-libs": "^11.1.0", 20 | "standard-version": "^9.1.1", 21 | "yaml": "^2.2.2" 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/tasks/release-host-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Chown {{ image_aosc_host_directory }}" 4 | delegate_to: localhost 5 | become: true 6 | ansible.builtin.file: 7 | path: "{{ image_aosc_host_directory }}" 8 | mode: u=rwX,g=rX,o=rX 9 | owner: "1000" 10 | group: "1000" 11 | recurse: true 12 | changed_when: false 13 | 14 | - name: Create the image release directory 15 | delegate_to: localhost 16 | ansible.builtin.file: 17 | path: "{{ image_aosc_release_directory }}" 18 | state: directory 19 | mode: 0755 20 | 21 | - name: Move host images 22 | delegate_to: localhost 23 | become: true 24 | ansible.builtin.raw: "mv -f {{ image_aosc_host_directory }}/image.img {{ image_aosc_release_directory }}/{{ inventory_hostname }}.img" 25 | changed_when: false 26 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: 'check parameters' 4 | delegate_to: localhost 5 | ansible.builtin.assert: 6 | that: 7 | - homecloud_node_ip | length > 0 8 | - homecloud_network_netmask | length > 0 9 | - homecloud_network_gateway | length > 0 10 | - image_ubuntu_raspi_image_url | length > 0 11 | - image_ubuntu_raspi_archive_name | length > 0 12 | - image_ubuntu_raspi_image_name | length > 0 13 | 14 | - name: Include task 15 | ansible.builtin.import_tasks: prepare-workdir.yml 16 | 17 | - name: Include task 18 | ansible.builtin.import_tasks: fetch-image.yml 19 | 20 | - name: Include task 21 | ansible.builtin.import_tasks: patch-host-image.yml 22 | 23 | - name: Include task 24 | ansible.builtin.import_tasks: release-host-image.yml 25 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/tasks/release-host-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Chown {{ image_armbian_host_directory }}" 4 | delegate_to: localhost 5 | become: true 6 | ansible.builtin.file: 7 | path: "{{ image_armbian_host_directory }}" 8 | mode: u=rwX,g=rX,o=rX 9 | owner: "1000" 10 | group: "1000" 11 | recurse: true 12 | changed_when: false 13 | 14 | - name: Create the image release directory 15 | delegate_to: localhost 16 | ansible.builtin.file: 17 | path: "{{ image_armbian_release_directory }}" 18 | state: directory 19 | mode: 0755 20 | 21 | - name: Move host images 22 | delegate_to: localhost 23 | become: true 24 | ansible.builtin.raw: "mv -f {{ image_armbian_host_directory }}/image.img {{ image_armbian_release_directory }}/{{ inventory_hostname }}.img" 25 | changed_when: false 26 | -------------------------------------------------------------------------------- /molecule/resources/playbooks/images-build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | gather_facts: false 5 | become: false 6 | serial: 1 7 | tasks: 8 | - name: Create the dummy rsa key 9 | ansible.builtin.copy: 10 | dest: /tmp/dummy_pub_rsa 11 | content: dummy key 12 | mode: 0644 13 | - name: Build Armbian image 14 | when: image_armbian_image_url | default("") | length > 0 15 | ansible.builtin.include_role: 16 | name: image_armbian 17 | - name: Build Ubuntu image 18 | when: image_ubuntu_raspi_image_url | default("") | length > 0 19 | ansible.builtin.include_role: 20 | name: image_ubuntu_raspi 21 | - name: Build AOSC image 22 | when: image_aosc_image_url | default("") | length > 0 23 | ansible.builtin.include_role: 24 | name: image_aosc 25 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/tasks/host_add-user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Add the group 4 | become: true 5 | ansible.builtin.group: 6 | name: "{{ k3s_dnas_username }}" 7 | gid: "{{ k3s_dnas_gid }}" 8 | state: present 9 | 10 | - name: Add the user 11 | become: true 12 | ansible.builtin.user: 13 | name: "{{ k3s_dnas_username }}" 14 | group: "{{ k3s_dnas_username }}" 15 | uid: "{{ k3s_dnas_uid }}" 16 | home: "{{ k3s_dnas_mount_where }}" 17 | create_home: false 18 | password: "{{ k3s_dnas_password_sha512 }}" 19 | state: present 20 | 21 | - name: Chown the home directory 22 | become: true 23 | ansible.builtin.file: 24 | path: "{{ k3s_dnas_mount_where }}" 25 | state: directory 26 | recurse: true 27 | owner: "{{ k3s_dnas_username }}" 28 | group: "{{ k3s_dnas_username }}" 29 | mode: "0755" 30 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/tasks/release-host-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Chown {{ image_ubuntu_raspi_host_directory }}" 4 | delegate_to: localhost 5 | become: true 6 | ansible.builtin.file: 7 | path: "{{ image_ubuntu_raspi_host_directory }}" 8 | mode: u=rwX,g=rX,o=rX 9 | owner: "1000" 10 | group: "1000" 11 | recurse: true 12 | changed_when: false 13 | 14 | - name: "Create the image release directory" 15 | delegate_to: localhost 16 | ansible.builtin.file: 17 | path: "{{ image_ubuntu_raspi_release_directory }}" 18 | state: directory 19 | mode: 0755 20 | 21 | - name: "Move host images" 22 | delegate_to: localhost 23 | become: true 24 | ansible.builtin.raw: "mv -f {{ image_ubuntu_raspi_host_directory }}/image.img {{ image_ubuntu_raspi_release_directory }}/{{ inventory_hostname }}.img" 25 | changed_when: false 26 | -------------------------------------------------------------------------------- /molecule/resources/deployments/test-longhorn-rwo-step1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: test-longhorn-rwo-pvc 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 10Mi 13 | --- 14 | apiVersion: v1 15 | kind: Pod 16 | metadata: 17 | name: test-longhorn-rwo-step1 18 | namespace: default 19 | spec: 20 | containers: 21 | - name: step1 22 | image: busybox 23 | imagePullPolicy: IfNotPresent 24 | command: 25 | - "sh" 26 | - "-c" 27 | - "echo 'hello from step1' > /data/file" 28 | volumeMounts: 29 | - name: data 30 | mountPath: /data 31 | restartPolicy: Never 32 | volumes: 33 | - name: data 34 | persistentVolumeClaim: 35 | claimName: test-longhorn-rwo-pvc 36 | --- 37 | -------------------------------------------------------------------------------- /molecule/resources/deployments/test-longhorn-rwx-step1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: test-longhorn-rwx-pvc 6 | spec: 7 | accessModes: 8 | - ReadWriteMany 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 10Mi 13 | --- 14 | apiVersion: v1 15 | kind: Pod 16 | metadata: 17 | name: test-longhorn-rwx-step1 18 | namespace: default 19 | spec: 20 | containers: 21 | - name: step1 22 | image: busybox 23 | imagePullPolicy: IfNotPresent 24 | command: 25 | - "sh" 26 | - "-c" 27 | - "echo 'hello from step1' > /data/file" 28 | volumeMounts: 29 | - name: data 30 | mountPath: /data 31 | restartPolicy: Never 32 | volumes: 33 | - name: data 34 | persistentVolumeClaim: 35 | claimName: test-longhorn-rwx-pvc 36 | --- 37 | -------------------------------------------------------------------------------- /collection/roles/cluster_node/tasks/activate-iptables-legacy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Stats /usr/sbin/iptables-legacy 4 | become: true 5 | ansible.builtin.stat: 6 | path: /usr/sbin/iptables-legacy 7 | get_attributes: no 8 | get_checksum: no 9 | get_mime: no 10 | register: stat_iptables_legacy 11 | 12 | - name: Switch to iptables legacy 13 | when: stat_iptables_legacy.stat.exists 14 | become: true 15 | ansible.builtin.shell: | 16 | iptables -F 17 | update-alternatives --set iptables /usr/sbin/iptables-legacy 18 | update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy 19 | args: 20 | executable: /bin/bash 21 | changed_when: false 22 | register: switch_iptables_legacy 23 | 24 | - name: Reboot 25 | when: not cluster_node_skip_reboot and stat_iptables_legacy.stat.exists and switch_iptables_legacy.stdout_lines | length > 0 26 | become: true 27 | ansible.builtin.reboot: { } 28 | -------------------------------------------------------------------------------- /molecule/resources/playbooks/cluster-bootstrap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | pre_tasks: 5 | - name: prepare 6 | ansible.builtin.include_role: 7 | name: resources_prepare 8 | tasks: 9 | # CLUSTER 10 | - name: Bootstrap cluster node 11 | ansible.builtin.include_role: 12 | name: cluster_node 13 | # DNAS 14 | - name: Bootstrap dnas services 15 | ansible.builtin.include_role: 16 | name: "{{ dnas_service }}" 17 | loop: "{{ dnas_services | default([]) }}" 18 | loop_control: 19 | loop_var: "dnas_service" 20 | # K3S 21 | - name: Bootstrap k3s services 22 | ansible.builtin.include_role: 23 | name: "{{ k3s_service }}" 24 | loop: "{{ k3s_services | default([]) }}" 25 | loop_control: 26 | loop_var: "k3s_service" 27 | post_tasks: 28 | - name: clean 29 | ansible.builtin.include_role: 30 | name: resources_clean 31 | -------------------------------------------------------------------------------- /molecule/k1/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-service-k3s.yml 4 | 5 | - hosts: k3s_srv[0] 6 | tasks: 7 | # verify labels 8 | - name: Get nodes with label key0 9 | become: true 10 | ansible.builtin.command: kubectl get node -l key0 -o custom-columns=name:.metadata.name --no-headers 11 | changed_when: false 12 | register: get_label_key0 13 | - name: Get nodes with label key1 14 | become: true 15 | ansible.builtin.command: kubectl get node -l key1 -o custom-columns=name:.metadata.name --no-headers 16 | changed_when: false 17 | register: get_label_key1 18 | - name: Verify node labels 19 | ansible.builtin.assert: 20 | quiet: true 21 | that: 22 | - get_label_key0.stdout_lines | length == 1 23 | - get_label_key1.stdout_lines | length == 1 24 | 25 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-k3s-deployments.yml 26 | -------------------------------------------------------------------------------- /molecule/resources/deployments/test-traefik.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: whoami 6 | labels: 7 | app: whoami 8 | spec: 9 | containers: 10 | - name: whoami 11 | image: containous/whoami:latest 12 | ports: 13 | - containerPort: 80 14 | --- 15 | apiVersion: v1 16 | kind: Service 17 | metadata: 18 | name: whoami 19 | spec: 20 | ports: 21 | - port: 80 22 | protocol: TCP 23 | targetPort: 80 24 | selector: 25 | app: whoami 26 | type: ClusterIP 27 | --- 28 | apiVersion: traefik.containo.us/v1alpha1 29 | kind: IngressRoute 30 | metadata: 31 | name: whoami 32 | spec: 33 | entryPoints: 34 | - websecure 35 | routes: 36 | - match: Host(`whoami.homecloud.local`) 37 | kind: Rule 38 | services: 39 | - name: whoami 40 | port: 80 41 | --- 42 | # curl -IkH host:whoami.home.cloud http://192.168.121.176:32080 -> HTTP/1.1 308 Permanent Redirect 43 | # curl -IkH host:whoami.home.cloud https://192.168.121.176:32443 -> HTTP/2 200 44 | -------------------------------------------------------------------------------- /molecule/k1lo/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | #options: 6 | #requirements-file: molecule/resources/collections.yml 7 | 8 | driver: 9 | name: vagrant 10 | provider: 11 | name: libvirt 12 | 13 | platforms: 14 | - name: k1lo-n1 15 | box: "${MOLECULE_N1_BOX-generic/debian11}" 16 | memory: 4096 17 | cpu: 2 18 | interfaces: 19 | - network_name: 'private_network' 20 | ip: '192.168.100.31' 21 | libvirt__domain_name: 'homecloud.local' 22 | 23 | provisioner: 24 | name: ansible 25 | env: 26 | ANSIBLE_ROLES_PATH: "$ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:$MOLECULE_PROJECT_DIRECTORY/collection/roles" 27 | config_options: 28 | ssh_connection: 29 | scp_if_ssh: true 30 | control_path: null 31 | inventory: 32 | links: 33 | hosts: inventory/hosts.yml 34 | group_vars: inventory/group_vars/ 35 | host_vars: inventory/host_vars/ 36 | 37 | verifier: 38 | name: ansible 39 | -------------------------------------------------------------------------------- /paper/bs-contacts_calendars_management.puml: -------------------------------------------------------------------------------- 1 | @startuml bs-contacts_calendars_management-usecases 2 | !global $INCLUSION_MODE="local" 3 | !global $LIB_BASE_LOCATION="../.gdiag/plantuml-libs" 4 | !include $LIB_BASE_LOCATION/bootstrap.puml 5 | include('homecloud-2/bootstrap') 6 | !include ./nord.puml 7 | include('material-4/Social/Person') 8 | include('material-4/Device/Devices') 9 | Person("owner", "owner") 10 | Devices("devices", "devices") 11 | Title("Contacts & Calendars Management", "Business Scenario / Usecase Diagram") 12 | rectangle homecloud { 13 | usecase manage_contacts as "manage contacts" 14 | usecase synchronize_contacts as "synchronize contacts" 15 | synchronize_contacts <.. manage_contacts : ««extends» » 16 | usecase manage_calendars as "manage calendars" 17 | usecase synchronize_files as "synchronize calendars" 18 | synchronize_files <.. manage_calendars : ««extends» » 19 | } 20 | owner -u-> manage_contacts 21 | owner -u-> manage_calendars 22 | devices -d-> synchronize_files 23 | devices -d-> synchronize_contacts 24 | @enduml 25 | -------------------------------------------------------------------------------- /molecule/k2/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-service-k3s.yml 4 | 5 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-service-keepalived.yml 6 | 7 | - hosts: k3s_srv[0] 8 | tasks: 9 | - name: Get nodes with label key1 10 | become: true 11 | ansible.builtin.command: kubectl get node -l key1 -o custom-columns=name:.metadata.name --no-headers 12 | changed_when: false 13 | register: get_label_key1 14 | - name: Get nodes with label key2 15 | become: true 16 | ansible.builtin.command: kubectl get node -l key2 -o custom-columns=name:.metadata.name --no-headers 17 | changed_when: false 18 | register: get_label_key2 19 | - name: Verify node labels 20 | ansible.builtin.assert: 21 | quiet: true 22 | that: 23 | - get_label_key1.stdout_lines | length == 2 24 | - get_label_key2.stdout_lines | length == 1 25 | 26 | - ansible.builtin.import_playbook: ../resources/playbooks/verify-k3s-deployments.yml 27 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/tasks/prepare-workdir.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create working directory 4 | delegate_to: localhost 5 | ansible.builtin.file: 6 | path: "{{ image_aosc_working_directory }}/{{ inventory_hostname }}" 7 | state: directory 8 | mode: 0755 9 | 10 | - name: Resolve image_aosc_base_image_path 11 | delegate_to: localhost 12 | ansible.builtin.raw: "realpath {{ image_aosc_base_image_path }}" 13 | changed_when: false 14 | register: image_aosc_base_image_path_cmd 15 | - name: Resolve image_aosc_base_image_path 16 | ansible.builtin.set_fact: image_aosc_base_image_path="{{ image_aosc_base_image_path_cmd.stdout_lines[0] }}" 17 | 18 | - name: Tesolve image_aosc_host_directory_cmd 19 | delegate_to: localhost 20 | ansible.builtin.raw: "realpath {{ image_aosc_working_directory }}/{{ inventory_hostname }}" 21 | changed_when: false 22 | register: image_aosc_host_directory_cmd 23 | - name: Resolve image_aosc_host_directory 24 | ansible.builtin.set_fact: image_aosc_host_directory="{{ image_aosc_host_directory_cmd.stdout_lines[0] }}" 25 | -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Install dependencies from apt" 4 | become: true 5 | ansible.builtin.package: 6 | name: open-iscsi,nfs-common,cifs-utils 7 | state: present 8 | force_apt_get: true 9 | 10 | - name: "Configure the disk" 11 | when: k3s_longhorn_block_device | default('') | length > 0 12 | block: 13 | - name: "Include task" 14 | ansible.builtin.import_tasks: configure-block.yml 15 | 16 | - name: "Configure the image" 17 | when: k3s_longhorn_image_device | default('') | length > 0 18 | block: 19 | - name: "Include task" 20 | ansible.builtin.import_tasks: configure-image.yml 21 | 22 | - name: "Configure Longhorn nodes" 23 | when: inventory_hostname in groups.k3s_srv[:1] | default([]) 24 | block: 25 | - name: "Include task" 26 | ansible.builtin.import_tasks: configure-node.yml 27 | 28 | - name: "Deploy Longhorn" 29 | when: inventory_hostname in groups.k3s_srv[:1] | default([]) 30 | block: 31 | - name: "Include task" 32 | ansible.builtin.import_tasks: deploy.yml 33 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_node_ip: '' 4 | homecloud_network_netmask: '' 5 | homecloud_network_gateway: '' 6 | homecloud_network_nameserver: [ ] 7 | 8 | # the url to get the aosc archive (c.f. https://aosc.io/downloads/alternative/#base) 9 | image_aosc_image_url: "" 10 | # the name of the aosc archive (for instance rk3328-rock64_rk64-base_2020-06-08_emmc.img.lz4) 11 | image_aosc_archive_name: "" 12 | # the name of the aosc image (for instance rk3328-rock64_rk64-base_2020-06-08_emmc.img) 13 | image_aosc_image_name: "" 14 | # clean the working directory of each host 15 | image_aosc_clean_working_directory: true 16 | # the directory where images will be temporary mounted and patched 17 | image_aosc_working_directory: "/tmp/homecloud/aosc_workdir" 18 | # the path to the SSH public key used by ansible 19 | image_aosc_ssh_pub_key_path: "~/.ssh/id_rsa.pub" 20 | # the username of the user which will be used by ansible 21 | image_aosc_username: "homecloud" 22 | # the directory where images will be released 23 | image_aosc_release_directory: "/tmp/homecloud/aosc_release" 24 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/tasks/prepare-workdir.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create working directory 4 | delegate_to: localhost 5 | ansible.builtin.file: 6 | path: "{{ image_armbian_working_directory }}/{{ inventory_hostname }}" 7 | state: directory 8 | mode: 0755 9 | 10 | - name: Resolve image_armbian_base_image_path 11 | delegate_to: localhost 12 | ansible.builtin.raw: "realpath {{ image_armbian_base_image_path }}" 13 | changed_when: false 14 | register: image_armbian_base_image_path_cmd 15 | - name: Resolve image_armbian_base_image_path 16 | ansible.builtin.set_fact: image_armbian_base_image_path="{{ image_armbian_base_image_path_cmd.stdout_lines[0] }}" 17 | 18 | - name: Tesolve image_armbian_host_directory_cmd 19 | delegate_to: localhost 20 | ansible.builtin.raw: "realpath {{ image_armbian_working_directory }}/{{ inventory_hostname }}" 21 | changed_when: false 22 | register: image_armbian_host_directory_cmd 23 | - name: Resolve image_armbian_host_directory 24 | ansible.builtin.set_fact: image_armbian_host_directory="{{ image_armbian_host_directory_cmd.stdout_lines[0] }}" 25 | -------------------------------------------------------------------------------- /molecule/k1/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | options: 6 | requirements-file: molecule/resources/collections.yml 7 | 8 | driver: 9 | name: vagrant 10 | provider: 11 | name: libvirt 12 | 13 | platforms: 14 | - name: k1-n1 15 | box: "${MOLECULE_N1_BOX-generic/debian11}" 16 | memory: 4096 17 | cpu: 2 18 | interfaces: 19 | - network_name: 'private_network' 20 | ip: '192.168.100.11' 21 | libvirt__domain_name: 'homecloud.local' 22 | provider_raw_config_args: 23 | - "storage :file, :size => '2G'" 24 | 25 | provisioner: 26 | name: ansible 27 | env: 28 | ANSIBLE_ROLES_PATH: "$ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:$MOLECULE_PROJECT_DIRECTORY/collection/roles" 29 | config_options: 30 | ssh_connection: 31 | scp_if_ssh: true 32 | control_path: null 33 | inventory: 34 | links: 35 | hosts: inventory/hosts.yml 36 | group_vars: inventory/group_vars/ 37 | host_vars: inventory/host_vars/ 38 | 39 | verifier: 40 | name: ansible 41 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Thibault Morin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/templates/network-config: -------------------------------------------------------------------------------- 1 | # This file contains a netplan-compatible configuration which cloud-init 2 | # will apply on first-boot. Please refer to the cloud-init documentation and 3 | # the netplan reference for full details: 4 | # 5 | # https://cloudinit.readthedocs.io/ 6 | # https://netplan.io/reference 7 | # 8 | # Some additional examples are commented out below 9 | 10 | version: 2 11 | ethernets: 12 | eth0: 13 | #dhcp4: true 14 | optional: true 15 | addresses: 16 | - {{ homecloud_node_ip }}/24 17 | gateway4: {{ homecloud_network_gateway }} 18 | nameservers: 19 | addresses: [{{ homecloud_network_nameservers | join(', ') }}] 20 | #wifis: 21 | # wlan0: 22 | # dhcp4: true 23 | # optional: true 24 | # access-points: 25 | # myhomewifi: 26 | # password: "S3kr1t" 27 | # myworkwifi: 28 | # password: "correct battery horse staple" 29 | # workssid: 30 | # auth: 31 | # key-management: eap 32 | # method: peap 33 | # identity: "me@example.com" 34 | # password: "passw0rd" 35 | # ca-certificate: /etc/my_ca.pem 36 | -------------------------------------------------------------------------------- /collection/roles/k3s_csi_driver_smb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install dependencies from apt 4 | become: true 5 | ansible.builtin.package: 6 | name: cifs-utils 7 | state: present 8 | force_apt_get: true 9 | 10 | - name: Deploy csi-driver-smb 11 | when: inventory_hostname in groups.k3s_srv[:1] | default([]) 12 | block: 13 | - name: Deploy the manifest 14 | become: true 15 | ansible.builtin.template: 16 | src: csi-driver-smb.yaml 17 | dest: /var/lib/rancher/k3s/server/manifests/homecloud-csi-driver-smb.yaml 18 | mode: '0644' 19 | force: true 20 | changed_when: false 21 | - name: Wait for the controller 22 | become: true 23 | ansible.builtin.command: kubectl -n kube-system get deploy csi-smb-controller \ 24 | -o jsonpath='{.status.conditions[?(@.type=="Available")].status}' 25 | changed_when: false 26 | register: kubcetl_get_controller 27 | delay: "{{ homecloud_k8s_deploy_timeout_delay | default(3) }}" 28 | retries: "{{ homecloud_k8s_deploy_timeout_retries | default(40) }}" 29 | until: "'True' in kubcetl_get_controller.stdout" 30 | -------------------------------------------------------------------------------- /molecule/k1ha/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | options: 6 | requirements-file: molecule/resources/collections.yml 7 | 8 | driver: 9 | name: vagrant 10 | provider: 11 | name: libvirt 12 | 13 | platforms: 14 | - name: k1ha-n1 15 | box: "${MOLECULE_N1_BOX-generic/ubuntu2110}" 16 | memory: 4096 17 | cpu: 2 18 | interfaces: 19 | - network_name: 'private_network' 20 | ip: '192.168.100.21' 21 | libvirt__domain_name: 'homecloud.local' 22 | provider_raw_config_args: 23 | - "storage :file, :size => '2G'" 24 | - "storage :file, :size => '2G'" 25 | 26 | provisioner: 27 | name: ansible 28 | env: 29 | ANSIBLE_ROLES_PATH: "$ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:$MOLECULE_PROJECT_DIRECTORY/collection/roles" 30 | config_options: 31 | ssh_connection: 32 | scp_if_ssh: true 33 | control_path: null 34 | inventory: 35 | links: 36 | hosts: inventory/hosts.yml 37 | group_vars: inventory/group_vars/ 38 | host_vars: inventory/host_vars/ 39 | 40 | verifier: 41 | name: ansible 42 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/tasks/prepare-workdir.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Create working directory" 4 | delegate_to: localhost 5 | ansible.builtin.file: 6 | path: "{{ image_ubuntu_raspi_working_directory }}/{{ inventory_hostname }}" 7 | state: directory 8 | mode: 0755 9 | 10 | - name: "Resolve image_ubuntu_raspi_base_image_path" 11 | delegate_to: localhost 12 | ansible.builtin.raw: "realpath {{ image_ubuntu_raspi_base_image_path }}" 13 | changed_when: false 14 | register: image_ubuntu_raspi_base_image_path_cmd 15 | - name: Resolve image_ubuntu_raspi_base_image_path 16 | ansible.builtin.set_fact: image_ubuntu_raspi_base_image_path="{{ image_ubuntu_raspi_base_image_path_cmd.stdout_lines[0] }}" 17 | 18 | - name: "Resolve image_ubuntu_raspi_host_directory_cmd" 19 | delegate_to: localhost 20 | ansible.builtin.raw: "realpath {{ image_ubuntu_raspi_working_directory }}/{{ inventory_hostname }}" 21 | changed_when: false 22 | register: image_ubuntu_raspi_host_directory_cmd 23 | - name: Resolve image_ubuntu_raspi_host_directory 24 | ansible.builtin.set_fact: image_ubuntu_raspi_host_directory="{{ image_ubuntu_raspi_host_directory_cmd.stdout_lines[0] }}" 25 | -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/tasks/configure-node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Add node label for Create Default Disk Device" 4 | become: true 5 | ansible.builtin.command: kubectl label node --overwrite {{ item.ansible_hostname }} \ 6 | node.longhorn.io/create-default-disk=true \ 7 | homecloud.github.io/longhorn-disk=true 8 | loop: "{{ (groups.k3s | default([])) | map('extract', hostvars) | list | selectattr('k3s_longhorn_block_device', 'defined') }}" 9 | loop_control: 10 | label: "{{ item.ansible_hostname }}" 11 | register: set_node_label 12 | changed_when: not 'not labeled' in set_node_label.stdout 13 | 14 | - name: "Add node label for Create Default Image Device" 15 | become: true 16 | ansible.builtin.command: kubectl label node --overwrite {{ item.ansible_hostname }} \ 17 | node.longhorn.io/create-default-disk=true \ 18 | homecloud.github.io/longhorn-image=true 19 | loop: "{{ (groups.k3s | default([])) | map('extract', hostvars) | list | selectattr('k3s_longhorn_image_device', 'defined') }}" 20 | loop_control: 21 | label: "{{ item.ansible_hostname }}" 22 | register: set_node_label 23 | changed_when: not 'not labeled' in set_node_label.stdout 24 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_node_ip: '' 4 | homecloud_network_netmask: '' 5 | homecloud_network_gateway: '' 6 | homecloud_network_nameserver: [ ] 7 | 8 | # the url to get the ubuntu archive (c.f. https://ubuntu.com/download/raspberry-pi) 9 | image_ubuntu_raspi_image_url: "" 10 | # the name of the ubuntu archive (for instance ubuntu-20.04.3-preinstalled-server-arm64+raspi.img.xz) 11 | image_ubuntu_raspi_archive_name: "" 12 | # the name of the ubuntu image (for instance ubuntu-20.04.3-preinstalled-server-arm64+raspi.img) 13 | image_ubuntu_raspi_image_name: "" 14 | # clean the working directory of each host 15 | image_ubuntu_raspi_clean_working_directory: true 16 | # the directory where images will be temporary mounted and patched 17 | image_ubuntu_raspi_working_directory: "/tmp/homecloud/ubuntu_raspi_workdir" 18 | # the path to the SSH public key used by ansible 19 | image_ubuntu_raspi_ssh_pub_key_path: "~/.ssh/id_rsa.pub" 20 | # the username of the user which will be used by ansible 21 | image_ubuntu_raspi_username: "homecloud" 22 | # the directory where images will be released 23 | image_ubuntu_raspi_release_directory: "/tmp/homecloud/ubuntu_raspi_release" 24 | -------------------------------------------------------------------------------- /collection/roles/service_keepalived/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check parameters 4 | delegate_to: localhost 5 | ansible.builtin.assert: 6 | quiet: true 7 | that: 8 | - homecloud_virtual_ip | length > 0 9 | - homecloud_node_ip | length > 0 10 | - homecloud_node_interface | length > 0 11 | 12 | - name: Load the ip_vs module 13 | become: true 14 | community.general.modprobe: 15 | name: ip_vs 16 | state: present 17 | 18 | - name: Load the ip_vs module at bootstrap 19 | become: true 20 | ansible.builtin.template: 21 | src: ip_vs.conf 22 | dest: /etc/modules-load.d/ip_vs.conf 23 | owner: root 24 | group: root 25 | mode: 0644 26 | 27 | - name: Install keepalived 28 | become: true 29 | ansible.builtin.package: 30 | name: keepalived 31 | state: present 32 | force_apt_get: true 33 | 34 | - name: Configure keepalived 35 | become: true 36 | ansible.builtin.template: 37 | src: keepalived.conf 38 | dest: /etc/keepalived/keepalived.conf 39 | owner: root 40 | group: root 41 | mode: 0644 42 | 43 | - name: Enable and start keepalived 44 | become: true 45 | ansible.builtin.systemd: 46 | name: keepalived 47 | enabled: true 48 | state: started 49 | -------------------------------------------------------------------------------- /molecule/armbian/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | tasks: 5 | 6 | - name: Stat armbian-n1 7 | ansible.builtin.stat: 8 | path: "{{ image_armbian_release_directory }}/armbian-n1.img" 9 | register: stat_armbian_n1_img 10 | - name: Check armbian-n1 11 | ansible.builtin.assert: 12 | that: stat_armbian_n1_img is success 13 | 14 | - name: Stat armbian-n2 15 | ansible.builtin.stat: 16 | path: "{{ image_armbian_release_directory }}/armbian-n2.img" 17 | register: stat_armbian_n2_img 18 | - name: Check armbian-n2 19 | ansible.builtin.assert: 20 | that: stat_armbian_n2_img is success 21 | 22 | - name: Stat armbian-n3 23 | ansible.builtin.stat: 24 | path: "{{ image_armbian_release_directory }}/armbian-n3.img" 25 | register: stat_armbian_n3_img 26 | - name: Check armbian-n3 27 | ansible.builtin.assert: 28 | that: stat_armbian_n3_img is success 29 | 30 | - name: Stat armbian-n4 31 | ansible.builtin.stat: 32 | path: "{{ image_armbian_release_directory }}/armbian-n4.img" 33 | register: stat_armbian_n4_img 34 | - name: Check armbian-n4 35 | ansible.builtin.assert: 36 | that: stat_armbian_n4_img is success 37 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/tasks/get-aosc-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Stats {{ image_aosc_base_archive_path }}" 4 | delegate_to: localhost 5 | ansible.builtin.stat: 6 | path: "{{ image_aosc_base_archive_path }}" 7 | get_attributes: no 8 | get_checksum: no 9 | get_mime: no 10 | register: downloaded_aosc_archive_file 11 | 12 | - name: "Stats {{ image_aosc_base_image_path }}" 13 | delegate_to: localhost 14 | ansible.builtin.stat: 15 | path: "{{ image_aosc_base_image_path }}" 16 | get_attributes: no 17 | get_checksum: no 18 | get_mime: no 19 | register: unpacked_aosc_image_file 20 | 21 | - name: Fetch aosc image 22 | delegate_to: localhost 23 | when: not downloaded_aosc_archive_file.stat.exists and not unpacked_aosc_image_file.stat.exists 24 | ansible.builtin.get_url: 25 | url: "{{ image_aosc_image_url }}" 26 | dest: "{{ image_aosc_base_archive_path }}" 27 | force: false 28 | 29 | - name: Unpack aosc archive 30 | delegate_to: localhost 31 | when: not unpacked_aosc_image_file.stat.exists 32 | ansible.builtin.command: 33 | chdir: "{{ image_aosc_working_directory }}" 34 | cmd: "unlz4 {{ image_aosc_base_archive_path }}" 35 | register: unpack 36 | 37 | - name: debug unpack 38 | ansible.builtin.debug: 39 | var: unpack 40 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | homecloud_node_ip: '' 4 | homecloud_node_mac: '' 5 | homecloud_network_netmask: '' 6 | homecloud_network_gateway: '' 7 | homecloud_network_nameserver: [ ] 8 | 9 | homecloud_wifi_enabled: '0' 10 | homecloud_wifi_ssid: '' 11 | homecloud_wifi_key: '' 12 | homecloud_wifi_countrycode: '' 13 | 14 | # the url to get the armbian archive (c.f. https://dl.armbian.com) 15 | image_armbian_image_url: "" 16 | # the name of the armbian archive (for instance Armbian_20.05.1_Rock64_bionic_current_5.4.43_desktop.img.xz) 17 | image_armbian_archive_name: "" 18 | # the name of the armbian image (for instance Armbian_20.05.1_Rock64_bionic_current_5.4.43_desktop.img) 19 | image_armbian_image_name: "" 20 | # clean the working directory of each host 21 | image_armbian_clean_working_directory: true 22 | # the directory where images will be temporary mounted and patched 23 | image_armbian_working_directory: "/tmp/homecloud/armbian_workdir" 24 | # the path to the SSH public key used by ansible 25 | image_armbian_ssh_pub_key_path: "~/.ssh/id_rsa.pub" 26 | # the username of the user which will be used by ansible 27 | image_armbian_username: "homecloud" 28 | # the directory where images will be released 29 | image_armbian_release_directory: "/tmp/homecloud/armbian_release" 30 | -------------------------------------------------------------------------------- /collection/roles/k3s_traefik/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Deploy Traefik 4 | when: inventory_hostname in groups.k3s_srv[:1] | default([]) 5 | block: 6 | - name: Deploy traefik.yaml 7 | become: true 8 | ansible.builtin.template: 9 | src: traefik.yaml 10 | dest: /var/lib/rancher/k3s/server/manifests/homecloud-traefik.yaml 11 | mode: '0644' 12 | force: true 13 | changed_when: false 14 | - name: Deploy traefik-dashboard-ingress.yml 15 | when: k3s_traefik_dashboard_users 16 | become: true 17 | ansible.builtin.template: 18 | src: traefik-dashboard-ingress.yml 19 | dest: /var/lib/rancher/k3s/server/manifests/homecloud-traefik-dashboard-ingress.yml 20 | mode: '0644' 21 | force: true 22 | changed_when: false 23 | - name: Wait for Traefik 24 | become: true 25 | ansible.builtin.command: kubectl -n kube-system get deploy traefik \ 26 | -o jsonpath='{.status.conditions[?(@.type=="Available")].status}' 27 | changed_when: false 28 | register: kubcetl_get_traefik 29 | delay: "{{ homecloud_k8s_deploy_timeout_delay | default(3) }}" 30 | retries: "{{ homecloud_k8s_deploy_timeout_retries | default(40) }}" 31 | until: "'True' in kubcetl_get_traefik.stdout" 32 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/tasks/get-armbian-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Stats {{ image_armbian_base_archive_path }}" 4 | delegate_to: localhost 5 | ansible.builtin.stat: 6 | path: "{{ image_armbian_base_archive_path }}" 7 | get_attributes: no 8 | get_checksum: no 9 | get_mime: no 10 | register: downloaded_armbian_archive_file 11 | 12 | - name: "Stats {{ image_armbian_base_image_path }}" 13 | delegate_to: localhost 14 | ansible.builtin.stat: 15 | path: "{{ image_armbian_base_image_path }}" 16 | get_attributes: no 17 | get_checksum: no 18 | get_mime: no 19 | register: unpacked_armbian_image_file 20 | 21 | - name: Fetch armbian image 22 | delegate_to: localhost 23 | when: not downloaded_armbian_archive_file.stat.exists and not unpacked_armbian_image_file.stat.exists 24 | ansible.builtin.get_url: 25 | url: "{{ image_armbian_image_url }}" 26 | dest: "{{ image_armbian_base_archive_path }}" 27 | force: false 28 | 29 | - name: Unpack armbian archive 30 | delegate_to: localhost 31 | when: not unpacked_armbian_image_file.stat.exists 32 | ansible.builtin.command: 33 | chdir: "{{ image_armbian_working_directory }}" 34 | cmd: "unxz {{ image_armbian_base_archive_path }}" 35 | register: unpack 36 | 37 | - name: debug unpack 38 | ansible.builtin.debug: 39 | var: unpack 40 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install and bootstrap Synchting 4 | when: k3s_dnas_mount_what | length > 0 5 | block: 6 | - name: Include task 7 | ansible.builtin.import_tasks: host_install-mount.yml 8 | - name: Include task 9 | ansible.builtin.import_tasks: host_add-user.yml 10 | - name: Include task 11 | ansible.builtin.import_tasks: host_install-syncthing.yml 12 | - name: Include task 13 | ansible.builtin.import_tasks: host_configure-syncthing.yml 14 | 15 | - name: Deploy dnas nodes 16 | when: inventory_hostname in groups.k3s_srv[:1] | default([]) 17 | block: 18 | - name: Add node label for dnas node 19 | become: true 20 | ansible.builtin.command: | 21 | kubectl label node --overwrite {{ item.ansible_hostname }} \ 22 | dnas.morin.io/node=true \ 23 | dnas.morin.io/hostname={{ item.ansible_hostname }} 24 | loop: "{{ k3s_dnas_nodes }}" 25 | loop_control: 26 | label: "{{ item.ansible_hostname }}" 27 | register: set_node_label 28 | changed_when: not 'not labeled' in set_node_label.stdout 29 | - name: Deploy the Kubernetes manifest 30 | ansible.builtin.include_tasks: k3s_deploy.yml 31 | loop: "{{ k3s_dnas_nodes }}" 32 | loop_control: 33 | label: "{{ item.ansible_hostname }}" 34 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/configure-local-kubectl.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Configure the ansible agent 4 | when: service_k3s_local_kubectl_config_file | length > 0 5 | block: 6 | - name: Create the kubectl config directory 7 | delegate_to: localhost 8 | ansible.builtin.file: 9 | path: "{{ service_k3s_local_kubectl_config_file | dirname }}" 10 | mode: '0700' 11 | state: directory 12 | recurse: true 13 | changed_when: false 14 | - name: Fetch the k8s configuration 15 | become: true 16 | ansible.builtin.fetch: 17 | src: /etc/rancher/k3s/k3s.yaml 18 | dest: "{{ service_k3s_local_kubectl_config_file }}" 19 | flat: true 20 | changed_when: false 21 | - name: Fix the k8s configuration content 22 | delegate_to: localhost 23 | ansible.builtin.lineinfile: 24 | path: "{{ service_k3s_local_kubectl_config_file }}" 25 | regexp: " server: https://127.0.0.1:6443" 26 | line: " server: https://{{ service_k3s_server_primary_ip }}:6443" 27 | changed_when: false 28 | - name: Fix the k8s configuration permissions 29 | delegate_to: localhost 30 | ansible.builtin.file: 31 | path: "{{ service_k3s_local_kubectl_config_file }}" 32 | mode: '0600' 33 | state: file 34 | changed_when: false 35 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/tasks/fetch-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Stats {{ image_ubuntu_raspi_base_archive_path }}" 4 | delegate_to: localhost 5 | ansible.builtin.stat: 6 | path: "{{ image_ubuntu_raspi_base_archive_path }}" 7 | get_attributes: no 8 | get_checksum: no 9 | get_mime: no 10 | register: downloaded_ubuntu_archive_file 11 | 12 | - name: "Stats {{ image_ubuntu_raspi_base_image_path }}" 13 | delegate_to: localhost 14 | ansible.builtin.stat: 15 | path: "{{ image_ubuntu_raspi_base_image_path }}" 16 | get_attributes: no 17 | get_checksum: no 18 | get_mime: no 19 | register: unpacked_ubuntu_image_file 20 | 21 | - name: Fetch ubuntu image 22 | delegate_to: localhost 23 | when: not downloaded_ubuntu_archive_file.stat.exists and not unpacked_ubuntu_image_file.stat.exists 24 | ansible.builtin.get_url: 25 | url: "{{ image_ubuntu_raspi_image_url }}" 26 | dest: "{{ image_ubuntu_raspi_base_archive_path }}" 27 | force: false 28 | 29 | - name: Unpack ubuntu archive 30 | delegate_to: localhost 31 | when: not unpacked_ubuntu_image_file.stat.exists 32 | ansible.builtin.command: 33 | chdir: "{{ image_ubuntu_raspi_working_directory }}" 34 | cmd: "unxz {{ image_ubuntu_raspi_base_archive_path }}" 35 | register: unpack 36 | 37 | - name: debug unpack 38 | ansible.builtin.debug: 39 | var: unpack 40 | -------------------------------------------------------------------------------- /collection/roles/k3s_traefik/templates/traefik.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.cattle.io/v1 3 | kind: HelmChart 4 | metadata: 5 | name: traefik 6 | namespace: kube-system 7 | spec: 8 | repo: https://helm.traefik.io/traefik 9 | chart: traefik 10 | valuesContent: |- 11 | ports: 12 | web: 13 | nodePort: 32080 14 | redirectTo: websecure 15 | websecure: 16 | nodePort: 32443 17 | tls: 18 | enabled: true 19 | certResolver: le 20 | #netbios-ss: 21 | # nodePort: 30139 22 | # port: 30139 23 | # exposedPort: 139 24 | # expose: true 25 | # protocol: TCP 26 | #microsoft-ds: 27 | # nodePort: 30445 28 | # port: 30445 29 | # exposedPort: 445 30 | # expose: true 31 | # protocol: TCP 32 | persistence: 33 | enabled: true 34 | storageClass: {{ k3s_traefik_persistence_storage_class }} 35 | additionalArguments: 36 | - "--certificatesresolvers.le.acme.httpchallenge=true" 37 | - "--certificatesresolvers.le.acme.httpchallenge.entrypoint=web" 38 | - "--certificatesresolvers.le.acme.email={{ k3s_traefik_acme_email }}" 39 | - "--certificatesresolvers.le.acme.caserver={{ k3s_traefik_acme_server }}" 40 | - "--certificatesresolvers.le.acme.storage=/data/acme.json" 41 | resources: {{ k3s_traefik_resources | to_json }} 42 | --- 43 | -------------------------------------------------------------------------------- /paper/bs-files_synchronization.puml: -------------------------------------------------------------------------------- 1 | @startuml bs-files_synchronization-usecases 2 | !global $INCLUSION_MODE="local" 3 | !global $LIB_BASE_LOCATION="../.gdiag/plantuml-libs" 4 | !include $LIB_BASE_LOCATION/bootstrap.puml 5 | include('homecloud-2/bootstrap') 6 | !include ./nord.puml 7 | include('material-4/Social/Person') 8 | include('material-4/Device/Devices') 9 | Person("owner", "owner") 10 | Devices("devices", "devices") 11 | Title("Files Synchronization", "Business Scenario / Usecase Diagram") 12 | rectangle homecloud { 13 | usecase manage_files as "manage files" 14 | usecase synchronize_filess as "synchronize files" 15 | synchronize_filess <.r. manage_files : ««extends» » 16 | } 17 | owner -u-> manage_files 18 | devices -u-> synchronize_filess 19 | @enduml 20 | 21 | @startuml bs-files_synchronization-value_stream 22 | !global $INCLUSION_MODE="local" 23 | !global $LIB_BASE_LOCATION="../.gdiag/plantuml-libs" 24 | !include $LIB_BASE_LOCATION/bootstrap.puml 25 | include('homecloud-2/bootstrap') 26 | !include ./nord.puml 27 | Title("Handle Files Changes", "Business Scenario / Value Stream") 28 | left to right direction 29 | rectangle "Change Files\non Device" as change_files <> 30 | rectangle "Synchronize Files" as synchronize_files <> 31 | rectangle "Manage Files\non //homecloud//" as manage_files <> 32 | change_files --> synchronize_files 33 | synchronize_files --> manage_files 34 | @enduml 35 | -------------------------------------------------------------------------------- /molecule/k2/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | options: 6 | requirements-file: molecule/resources/collections.yml 7 | 8 | driver: 9 | name: vagrant 10 | provider: 11 | name: libvirt 12 | 13 | platforms: 14 | - name: k2-n1 15 | box: "${MOLECULE_N1_BOX-generic/debian11}" 16 | memory: 2096 17 | cpu: 2 18 | interfaces: 19 | - network_name: 'private_network' 20 | ip: '192.168.200.11' 21 | libvirt__domain_name: 'homecloud.local' 22 | provider_raw_config_args: 23 | - "storage :file, :size => '2G'" 24 | - name: k2-n2 25 | box: "${MOLECULE_N2_BOX-generic/ubuntu2110}" 26 | memory: 2096 27 | cpu: 2 28 | interfaces: 29 | - network_name: 'private_network' 30 | ip: '192.168.200.12' 31 | libvirt__domain_name: 'homecloud.local' 32 | provider_raw_config_args: 33 | - "storage :file, :size => '2G'" 34 | 35 | provisioner: 36 | name: ansible 37 | env: 38 | ANSIBLE_ROLES_PATH: "$ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:$MOLECULE_PROJECT_DIRECTORY/collection/roles" 39 | config_options: 40 | ssh_connection: 41 | scp_if_ssh: true 42 | control_path: null 43 | inventory: 44 | links: 45 | hosts: inventory/hosts.yml 46 | group_vars: inventory/group_vars/ 47 | host_vars: inventory/host_vars/ 48 | 49 | verifier: 50 | name: ansible 51 | -------------------------------------------------------------------------------- /molecule/k2ha/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | dependency: 4 | name: galaxy 5 | options: 6 | requirements-file: molecule/resources/collections.yml 7 | 8 | driver: 9 | name: vagrant 10 | provider: 11 | name: libvirt 12 | 13 | platforms: 14 | - name: k2ha-n1 15 | box: "${MOLECULE_N1_BOX-generic/debian11}" 16 | memory: 2096 17 | cpu: 2 18 | interfaces: 19 | - network_name: 'private_network' 20 | ip: '192.168.200.21' 21 | libvirt__domain_name: 'homecloud.local' 22 | provider_raw_config_args: 23 | - "storage :file, :size => '2G'" 24 | - name: k2ha-n2 25 | box: "${MOLECULE_N2_BOX-generic/ubuntu2110}" 26 | memory: 2096 27 | cpu: 2 28 | interfaces: 29 | - network_name: 'private_network' 30 | ip: '192.168.200.22' 31 | libvirt__domain_name: 'homecloud.local' 32 | provider_raw_config_args: 33 | - "storage :file, :size => '2G'" 34 | 35 | provisioner: 36 | name: ansible 37 | env: 38 | ANSIBLE_ROLES_PATH: "$ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:$MOLECULE_PROJECT_DIRECTORY/collection/roles" 39 | config_options: 40 | ssh_connection: 41 | scp_if_ssh: true 42 | control_path: null 43 | inventory: 44 | links: 45 | hosts: inventory/hosts.yml 46 | group_vars: inventory/group_vars/ 47 | host_vars: inventory/host_vars/ 48 | 49 | verifier: 50 | name: ansible 51 | -------------------------------------------------------------------------------- /paper/bs-photos_synchronization.puml: -------------------------------------------------------------------------------- 1 | @startuml bs-photos_synchronization-usecases 2 | !global $INCLUSION_MODE="local" 3 | !global $LIB_BASE_LOCATION="../.gdiag/plantuml-libs" 4 | !include $LIB_BASE_LOCATION/bootstrap.puml 5 | include('homecloud-2/bootstrap') 6 | !include ./nord.puml 7 | include('material-4/Social/Person') 8 | include('material-4/Device/Devices') 9 | Person("owner", "owner") 10 | Devices("devices", "devices") 11 | Title("Photos Synchronization", "Business Scenario / Usecase Diagram") 12 | rectangle homecloud { 13 | usecase manage_photos as "manage photos" 14 | usecase synchronize_photos as "synchronize photos" 15 | synchronize_photos <.r. manage_photos : ««extends» » 16 | } 17 | owner -u-> manage_photos 18 | devices -u-> synchronize_photos 19 | @enduml 20 | 21 | @startuml bs-photos_synchronization-value_stream 22 | !global $INCLUSION_MODE="local" 23 | !global $LIB_BASE_LOCATION="../.gdiag/plantuml-libs" 24 | !include $LIB_BASE_LOCATION/bootstrap.puml 25 | include('homecloud-2/bootstrap') 26 | !include ./nord.puml 27 | Title("Handle Photos Changes", "Business Scenario / Value Stream") 28 | left to right direction 29 | rectangle "Change Photos\non Device" as change_photos <> 30 | rectangle "Synchronize Photos" as synchronize_photo <> 31 | rectangle "Manage Photos\non //homecloud//" as manage_photos <> 32 | change_photos --> synchronize_photo 33 | synchronize_photo --> manage_photos 34 | @enduml 35 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/k3s-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Add kubectl completion 4 | become: true 5 | ansible.builtin.shell: kubectl completion bash > /etc/bash_completion.d/kubectl 6 | changed_when: false 7 | 8 | - name: Install helm 9 | become: true 10 | ansible.builtin.shell: | 11 | set -eo pipefail 12 | curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash 13 | /usr/local/bin/helm completion bash > /etc/bash_completion.d/helm 14 | args: 15 | executable: /bin/bash 16 | warn: false 17 | changed_when: false 18 | 19 | - name: Install kustomize 20 | become: true 21 | ansible.builtin.shell: | 22 | set -eo pipefail 23 | ARCH=$(uname -m) 24 | case $ARCH in 25 | amd64) 26 | ARCH=amd64 27 | ;; 28 | x86_64) 29 | ARCH=amd64 30 | ;; 31 | arm64) 32 | ARCH=arm64 33 | ;; 34 | aarch64) 35 | ARCH=arm64 36 | ;; 37 | *) 38 | fatal "Unsupported architecture $ARCH" 39 | esac 40 | curl -sfL https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2F{{ service_k3s_kustomize_release }}/kustomize_{{ service_k3s_kustomize_release }}_linux_${ARCH}.tar.gz | \ 41 | tar xz -C /usr/local/bin/ 42 | /usr/local/bin/kustomize completion bash > /etc/bash_completion.d/kustomize 43 | args: 44 | executable: /bin/bash 45 | creates: /usr/local/bin/kustomize 46 | warn: false 47 | failed_when: false 48 | -------------------------------------------------------------------------------- /paper/bs-files_sharing.puml: -------------------------------------------------------------------------------- 1 | @startuml bs-files_sharing-usecases 2 | !global $INCLUSION_MODE="local" 3 | !global $LIB_BASE_LOCATION="../.gdiag/plantuml-libs" 4 | !include $LIB_BASE_LOCATION/bootstrap.puml 5 | include('homecloud-2/bootstrap') 6 | !include ./nord.puml 7 | include('material-4/Social/Person') 8 | include('material-4/Device/Devices') 9 | Person("owner", "owner") 10 | Person("recipient", "recipient") 11 | Devices("devices", "devices") 12 | Title("Files sharing", "Business Scenario / Usecase Diagram") 13 | rectangle homecloud { 14 | usecase share_files as "share files" 15 | usecase get_shared_files as "get shared files" 16 | share_files <.r. get_shared_files : ««extends» » 17 | } 18 | owner -u-> share_files 19 | recipient -u-> get_shared_files 20 | devices -u-> get_shared_files 21 | @enduml 22 | 23 | @startuml bs-files_sharing-value_stream 24 | !global $INCLUSION_MODE="local" 25 | !global $LIB_BASE_LOCATION="../.gdiag/plantuml-libs" 26 | !include $LIB_BASE_LOCATION/bootstrap.puml 27 | include('homecloud-2/bootstrap') 28 | !include ./nord.puml 29 | Title("Share Files", "Business Scenario / Value Stream") 30 | left to right direction 31 | rectangle "Select Shared Files" as select_shared_files <> 32 | rectangle "Provide Shared Files" as provide_shared_files <> 33 | rectangle "Consume Shared Files" as consume_shared_files <> 34 | select_shared_files --> provide_shared_files 35 | provide_shared_files --> consume_shared_files 36 | @enduml 37 | -------------------------------------------------------------------------------- /collection/roles/k3s_traefik/templates/traefik-dashboard-ingress.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: traefik-dashboard-auth 6 | namespace: kube-system 7 | data: 8 | users: "{{ k3s_traefik_dashboard_users }}" 9 | --- 10 | apiVersion: traefik.containo.us/v1alpha1 11 | kind: Middleware 12 | metadata: 13 | name: traefik-dashboard-auth 14 | namespace: kube-system 15 | spec: 16 | basicAuth: 17 | secret: traefik-dashboard-auth 18 | removeHeader: true 19 | --- 20 | apiVersion: traefik.containo.us/v1alpha1 21 | kind: Middleware 22 | metadata: 23 | name: traefik-dashboard-redirect 24 | namespace: kube-system 25 | spec: 26 | redirectRegex: 27 | regex: ^(.*)://{{ k3s_traefik_dashboard_ingress_host }}(/|/dashboard)$ 28 | replacement: ${1}://{{ k3s_traefik_dashboard_ingress_host }}/dashboard/ 29 | permanent: true 30 | --- 31 | apiVersion: traefik.containo.us/v1alpha1 32 | kind: IngressRoute 33 | metadata: 34 | name: traefik-dashboard 35 | namespace: kube-system 36 | spec: 37 | entryPoints: 38 | - websecure 39 | routes: 40 | - match: Host(`{{ k3s_traefik_dashboard_ingress_host }}`) 41 | kind: Rule 42 | services: 43 | - name: api@internal 44 | kind: TraefikService 45 | middlewares: 46 | - name: traefik-dashboard-auth 47 | - name: traefik-dashboard-redirect 48 | --- 49 | # curl -IkH host:traefik.home.cloud http://localhost:32080 50 | # curl -IkH host:traefik.home.cloud https://localhost:32443 51 | -------------------------------------------------------------------------------- /collection/README.md: -------------------------------------------------------------------------------- 1 | # tmorin.homecloud 2 | 3 | [![badge for github repository](https://img.shields.io/badge/GitHub-tmorin/homecloud--ansible-informational?logo=github&logoColor=white)](https://github.com/tmorin/homecloud-ansible) 4 | [![badge for HTML paper](https://img.shields.io/badge/Paper-HTML-informational)](https://tmorin.github.io/homecloud-ansible) 5 | [![badge for PDF paper](https://img.shields.io/badge/Paper-PDF-informational)](https://tmorin.github.io/homecloud-ansible/homecloud-paper.pdf) 6 | 7 | [![GitHub Workflow Status (branch)](https://img.shields.io/github/workflow/status/tmorin/homecloud-ansible/Continous%20Integration/master?label=GitHub%20Actions&logo=github+actions&logoColor=white)](https://github.com/tmorin/homecloud-ansible/actions?query=workflow%3A%22Continous+Integration%22+branch%3Amaster) 8 | [![Travis (.org) branch](https://img.shields.io/travis/tmorin/homecloud-ansible/master?label=Travis%20CI&logo=travis+CI&logoColor=white)](https://travis-ci.org/github/tmorin/homecloud-ansible) 9 | 10 | > `homecloud` provides a ready-to-use set of resources to bootstrap a cloud at home mainly based on Kubernetes and Syncthing. 11 | 12 | ## Presentation 13 | 14 | `homecloud` aims to provide a cloud like environment, especially an internal cloud, at home. 15 | The underlying infrastructure is primarily based on low cost ARM boards, like Raspberry Pi, and powered by open source solutions like Kubernetes or Syncthing. 16 | 17 | The main artifact is an Ansible collection designed to bootstrap a ready to use cloud like environment as well as a couple of end-users services. 18 | 19 | The documentation is available on [GitHub](https://github.com/tmorin/homecloud-ansible#readme). 20 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/files/customizer.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ROOT_FS="" 4 | CREATE_USER="homecloud" 5 | LOCK_ROOT="yes" 6 | 7 | POSITIONAL=() 8 | while [[ $# -gt 0 ]]; do 9 | key="$1" 10 | case ${key} in 11 | -rf | --root-fs) 12 | ROOT_FS="$2" 13 | shift 14 | shift 15 | ;; 16 | -cu | --change-username) 17 | CREATE_USER="$2" 18 | shift 19 | shift 20 | ;; 21 | -dcu | --disable-create-user) 22 | CREATE_USER="" 23 | shift 24 | ;; 25 | -dlr | --disable-lock-root) 26 | LOCK_ROOT="no" 27 | shift 28 | ;; 29 | *) 30 | POSITIONAL+=("$1") 31 | shift 32 | ;; 33 | esac 34 | done 35 | set -- "${POSITIONAL[@]}" # restore positional parameters 36 | 37 | if [[ -z "${ROOT_FS}" ]]; then 38 | echo "error : [-rf|--root-fs ] is required" 39 | exit 1 40 | fi 41 | 42 | if [[ -n "${CREATE_USER}" ]]; then 43 | chroot "${ROOT_FS}" /bin/bash -exc " 44 | echo create the user ${CREATE_USER} 45 | # create the user with the password disabled 46 | useradd -m -d /home/${CREATE_USER} -s /bin/bash ${CREATE_USER} 47 | # prepare the .ssh directory 48 | mkdir -p /home/${CREATE_USER}/.ssh 49 | touch /home/${CREATE_USER}/.ssh/authorized_keys 50 | chown -R ${CREATE_USER}:${CREATE_USER} /home/${CREATE_USER} 51 | chmod 700 /home/${CREATE_USER}/.ssh 52 | chmod 600 /home/${CREATE_USER}/.ssh/authorized_keys 53 | # add the user to sudoers people without password 54 | echo \"${CREATE_USER} ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers.d/${CREATE_USER} 55 | chmod 0440 /etc/sudoers.d/${CREATE_USER}" 56 | fi 57 | 58 | if [[ "${LOCK_ROOT}" == "yes" ]]; then 59 | chroot "${ROOT_FS}" /bin/bash -exc " 60 | usermod --lock root 61 | passwd -l root 62 | chage -d $(date "+%F") -E 2999-01-01 -I -1 -m 0 -M 999999 -W 31 root" 63 | fi 64 | -------------------------------------------------------------------------------- /collection/roles/k3s_dashboard/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Deploy the k8s dashboard 4 | when: inventory_hostname in groups.k3s_srv[:1] | default([]) 5 | block: 6 | - name: Copy customization.yml 7 | become: true 8 | ansible.builtin.template: 9 | src: customization.yml 10 | dest: /var/lib/rancher/k3s/server/manifests/homecloud-dashboard-customization.yml 11 | force: true 12 | mode: 0644 13 | changed_when: false 14 | - name: Resolve latest dashboard version 15 | ansible.builtin.shell: curl -w '%{url_effective}' -I -L -s \ 16 | -S https://github.com/kubernetes/dashboard/releases/{{ k3s_dashboard_release }} \ 17 | -o /dev/null | sed -e 's|.*/||' 18 | register: resolve_latest_dashboard_version 19 | changed_when: false 20 | args: 21 | warn: false 22 | - name: Deploy the k8s dashboard 23 | become: true 24 | ansible.builtin.get_url: 25 | url: "https://raw.githubusercontent.com/kubernetes/dashboard/{{ resolve_latest_dashboard_version.stdout }}/aio/deploy/recommended.yaml" 26 | dest: /var/lib/rancher/k3s/server/manifests/homecloud-dashboard-recommended.yaml 27 | force: true 28 | register: deploy_k3s_dashboard 29 | changed_when: false 30 | - name: Wait for k8s dashboard 31 | become: true 32 | ansible.builtin.command: kubectl -n kubernetes-dashboard get deploy kubernetes-dashboard \ 33 | -o jsonpath='{.status.conditions[?(@.type=="Available")].status}' 34 | changed_when: false 35 | register: kubcetl_result 36 | delay: "{{ homecloud_k8s_deploy_timeout_delay | default(3) }}" 37 | retries: "{{ homecloud_k8s_deploy_timeout_retries | default(40) }}" 38 | until: "'True' in kubcetl_result.stdout" 39 | -------------------------------------------------------------------------------- /molecule/resources/deployments/test-csi-smb-dnas-step1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: test-csi-smb-dnas 6 | namespace: default 7 | data: 8 | password: ZG5hcw== 9 | username: ZG5hcw== 10 | --- 11 | apiVersion: v1 12 | kind: PersistentVolume 13 | metadata: 14 | name: test-csi-smb-dnas 15 | namespace: default 16 | spec: 17 | storageClassName: samba 18 | capacity: 19 | storage: 10Mi 20 | accessModes: 21 | - ReadWriteOnce 22 | persistentVolumeReclaimPolicy: Retain 23 | mountOptions: 24 | - vers=3.0 25 | csi: 26 | driver: smb.csi.k8s.io 27 | readOnly: false 28 | volumeHandle: test-csi-smb-dnas 29 | volumeAttributes: 30 | source: "//{{ (groups.k3s | map('extract', hostvars) | list | selectattr('k3s_dnas_mount_what', 'defined'))[0].homecloud_node_ip }}/dnas" 31 | nodeStageSecretRef: 32 | name: test-csi-smb-dnas 33 | namespace: default 34 | --- 35 | kind: PersistentVolumeClaim 36 | apiVersion: v1 37 | metadata: 38 | name: test-csi-smb-dnas 39 | spec: 40 | storageClassName: samba 41 | accessModes: 42 | - ReadWriteOnce 43 | resources: 44 | requests: 45 | storage: 10Mi 46 | volumeName: test-csi-smb-dnas 47 | --- 48 | apiVersion: v1 49 | kind: Pod 50 | metadata: 51 | name: test-csi-smb-dnas-step1 52 | namespace: default 53 | spec: 54 | containers: 55 | - name: step1 56 | image: busybox 57 | imagePullPolicy: IfNotPresent 58 | command: 59 | - "sh" 60 | - "-c" 61 | - "echo 'test-csi-smb-dnas-step1' > /data/file" 62 | volumeMounts: 63 | - name: data 64 | mountPath: /data 65 | restartPolicy: Never 66 | volumes: 67 | - name: data 68 | persistentVolumeClaim: 69 | claimName: test-csi-smb-dnas 70 | --- 71 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/templates/armbian_first_run.txt: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------------------- 2 | # aosc first run configuration 3 | # Set optional end user configuration 4 | # - Rename this file from /boot/aosc_first_run.txt.template to /boot/aosc_first_run.txt 5 | # - Settings below will be applied only on 1st run of aosc 6 | #----------------------------------------------------------------- 7 | 8 | #----------------------------------------------------------------- 9 | # General: 10 | # 1 = delete this file, after first run setup is completed. 11 | 12 | FR_general_delete_this_file_after_completion=1 13 | 14 | #----------------------------------------------------------------- 15 | #Networking: 16 | # Change default network settings 17 | # Set to 1 to apply any network related settings below 18 | 19 | FR_net_change_defaults=1 20 | 21 | # Enable WiFi or Ethernet. 22 | # NB: If both are enabled, WiFi will take priority and Ethernet will be disabled. 23 | 24 | FR_net_ethernet_enabled=1 25 | FR_net_wifi_enabled=0 26 | 27 | #Enter your WiFi creds 28 | # SECURITY WARN: Your wifi keys will be stored in plaintext, no encryption. 29 | 30 | FR_net_wifi_ssid='MySSID' 31 | FR_net_wifi_key='MyWiFiKEY' 32 | 33 | # Country code to enable power ratings and channels for your country. eg: GB US DE | https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 34 | 35 | FR_net_wifi_countrycode='GB' 36 | 37 | #If you want to use a static ip, set it here 38 | 39 | FR_net_use_static=1 40 | FR_net_static_ip='{{ homecloud_node_ip }}' 41 | FR_net_static_mask='{{ homecloud_network_netmask }}' 42 | FR_net_static_gateway='{{ homecloud_network_gateway }}' 43 | FR_net_static_dns='{{ homecloud_network_nameservers | join(' ') }}' #2 entries max, seperated by a space. 44 | #----------------------------------------------------------------- 45 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/templates/armbian_first_run.txt: -------------------------------------------------------------------------------- 1 | #----------------------------------------------------------------- 2 | # Armbian first run configuration 3 | # Set optional end user configuration 4 | # - Rename this file from /boot/armbian_first_run.txt.template to /boot/armbian_first_run.txt 5 | # - Settings below will be applied only on 1st run of Armbian 6 | #----------------------------------------------------------------- 7 | 8 | #----------------------------------------------------------------- 9 | # General: 10 | # 1 = delete this file, after first run setup is completed. 11 | 12 | FR_general_delete_this_file_after_completion=1 13 | 14 | #----------------------------------------------------------------- 15 | #Networking: 16 | # Change default network settings 17 | # Set to 1 to apply any network related settings below 18 | 19 | FR_net_change_defaults=1 20 | 21 | # Enable WiFi or Ethernet. 22 | # NB: If both are enabled, WiFi will take priority and Ethernet will be disabled. 23 | 24 | FR_net_ethernet_enabled=1 25 | FR_net_wifi_enabled={{ homecloud_wifi_enabled }} 26 | 27 | #Enter your WiFi creds 28 | # SECURITY WARN: Your wifi keys will be stored in plaintext, no encryption. 29 | 30 | FR_net_wifi_ssid='{{ homecloud_wifi_ssid }}' 31 | FR_net_wifi_key='{{ homecloud_wifi_key }}' 32 | 33 | # Country code to enable power ratings and channels for your country. eg: GB US DE | https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2 34 | 35 | FR_net_wifi_countrycode='{{ homecloud_wifi_countrycode }}' 36 | 37 | #If you want to use a static ip, set it here 38 | 39 | FR_net_use_static=1 40 | FR_net_static_ip='{{ homecloud_node_ip }}' 41 | FR_net_static_mask='{{ homecloud_network_netmask }}' 42 | FR_net_static_gateway='{{ homecloud_network_gateway }}' 43 | FR_net_static_dns='{{ homecloud_network_nameservers | join(' ') }}' #2 entries max, seperated by a space. 44 | #----------------------------------------------------------------- 45 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/files/customizer.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ROOT_FS="" 4 | CREATE_USER="homecloud" 5 | DELETE_INITIAL_USER="yes" 6 | LOCK_ROOT="yes" 7 | 8 | POSITIONAL=() 9 | while [[ $# -gt 0 ]]; do 10 | key="$1" 11 | case ${key} in 12 | -rf | --root-fs) 13 | ROOT_FS="$2" 14 | shift 15 | shift 16 | ;; 17 | -cu | --change-username) 18 | CREATE_USER="$2" 19 | shift 20 | shift 21 | ;; 22 | -dcu | --disable-create-user) 23 | CREATE_USER="" 24 | shift 25 | ;; 26 | -dlr | --disable-lock-root) 27 | LOCK_ROOT="no" 28 | shift 29 | ;; 30 | *) 31 | POSITIONAL+=("$1") 32 | shift 33 | ;; 34 | esac 35 | done 36 | set -- "${POSITIONAL[@]}" # restore positional parameters 37 | 38 | if [[ -z "${ROOT_FS}" ]]; then 39 | echo "error : [-rf|--root-fs ] is required" 40 | exit 1 41 | fi 42 | 43 | if [[ "${DELETE_INITIAL_USER}" == "yes" ]]; then 44 | chroot "${ROOT_FS}" /bin/bash -exc "userdel -rf aosc" 45 | fi 46 | 47 | if [[ -n "${CREATE_USER}" ]]; then 48 | chroot "${ROOT_FS}" /bin/bash -exc " 49 | # create the user with the password disabled 50 | useradd -m -d /home/${CREATE_USER} -s /bin/bash ${CREATE_USER} 51 | # prepare the .ssh directory 52 | mkdir -p /home/${CREATE_USER}/.ssh 53 | touch /home/${CREATE_USER}/.ssh/authorized_keys 54 | chown -R ${CREATE_USER}:${CREATE_USER} /home/${CREATE_USER} 55 | chmod 700 /home/${CREATE_USER}/.ssh 56 | chmod 600 /home/${CREATE_USER}/.ssh/authorized_keys 57 | # add the user to sudoers people without password 58 | echo \"${CREATE_USER} ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers.d/${CREATE_USER} 59 | chmod 0440 /etc/sudoers.d/${CREATE_USER}" 60 | fi 61 | 62 | if [[ "${LOCK_ROOT}" == "yes" ]]; then 63 | chroot "${ROOT_FS}" /bin/bash -exc " 64 | usermod --lock root 65 | passwd -l root 66 | chage -d $(date "+%F") -E 2999-01-01 -I -1 -m 0 -M 999999 -W 31 root" 67 | fi 68 | 69 | chroot "${ROOT_FS}" /bin/bash -exc "apt-get update --assume-yes" 70 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/tasks/k3s_deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: set fact 4 | ansible.builtin.set_fact: 5 | dnas_username: "{{ item.k3s_dnas_username | default(k3s_dnas_username) }}" 6 | dnas_password: "{{ item.k3s_dnas_password | default(k3s_dnas_password) }}" 7 | dnas_uid: "{{ item.k3s_dnas_uid | default(k3s_dnas_uid) }}" 8 | dnas_gid: "{{ item.k3s_dnas_gid | default(k3s_dnas_gid) }}" 9 | dnas_mount_where: "{{ item.k3s_dnas_mount_where | default(k3s_dnas_mount_where) }}" 10 | 11 | - name: Install NFS dependencies 12 | become: true 13 | ansible.builtin.package: 14 | name: nfs-common 15 | state: present 16 | force_apt_get: true 17 | 18 | - name: Create kustomize directory 19 | ansible.builtin.file: 20 | path: "/tmp/resources/kustomize/{{ item.ansible_hostname }}" 21 | state: directory 22 | mode: "0755" 23 | 24 | - name: Copy dnas-share.kustomization.yaml 25 | ansible.builtin.template: 26 | src: dnas-share.kustomization.yaml 27 | dest: "/tmp/resources/kustomize/{{ item.ansible_hostname }}/kustomization.yaml" 28 | mode: '0644' 29 | force: true 30 | changed_when: false 31 | 32 | - name: Copy dnas-share.kustomize.secret.env 33 | ansible.builtin.template: 34 | src: dnas-share.kustomize.secret.env 35 | dest: "/tmp/resources/kustomize/{{ item.ansible_hostname }}/dnas-share.kustomize.secret.env" 36 | mode: '0644' 37 | force: true 38 | changed_when: false 39 | 40 | - name: Copy dnas-share.kustomize.yml 41 | ansible.builtin.template: 42 | src: dnas-share.kustomize.yml 43 | dest: "/tmp/resources/kustomize/{{ item.ansible_hostname }}/dnas-share.kustomize.yml" 44 | mode: '0644' 45 | force: true 46 | changed_when: false 47 | 48 | - name: Deploy manifest 49 | become: true 50 | ansible.builtin.shell: | 51 | set -eo pipefail 52 | kustomize build /tmp/resources/kustomize/{{ item.ansible_hostname }} | kubectl apply -f- 53 | args: 54 | executable: /bin/bash 55 | changed_when: false 56 | -------------------------------------------------------------------------------- /collection/roles/service_k3s/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create the vault directory 4 | delegate_to: localhost 5 | ansible.builtin.file: 6 | path: "{{ homecloud_vault_path }}" 7 | state: directory 8 | mode: 0755 9 | 10 | - name: "Install dependencies from apt" 11 | become: true 12 | ansible.builtin.package: 13 | name: 14 | - apparmor-utils 15 | - bash-completion 16 | state: present 17 | force_apt_get: true 18 | 19 | - name: "Create /etc/bash_completion.d" 20 | become: true 21 | ansible.builtin.file: 22 | path: /etc/bash_completion.d 23 | state: directory 24 | owner: root 25 | group: root 26 | mode: "0755" 27 | 28 | - name: Install the primary k3s server 29 | when: inventory_hostname in groups.k3s_srv[:1] | default([]) 30 | block: 31 | - name: Include task 32 | ansible.builtin.import_tasks: k3s-server-primary.yml 33 | - name: Include task 34 | ansible.builtin.import_tasks: get-server-token.yml 35 | - name: Include task 36 | ansible.builtin.import_tasks: configure-local-kubectl.yml 37 | 38 | - name: Install the secondaries k3s servers 39 | when: inventory_hostname in groups.k3s_srv[1:] | default([]) 40 | block: 41 | - name: Include task 42 | ansible.builtin.import_tasks: k3s-server-secondary.yml 43 | 44 | - name: Configure k3s servers 45 | when: inventory_hostname in groups.k3s_srv | default([]) 46 | block: 47 | - name: Include task 48 | ansible.builtin.import_tasks: k3s-server.yml 49 | 50 | - name: Install the k3s agents 51 | when: inventory_hostname in groups.k3s_agt | default([]) 52 | block: 53 | - name: Include task 54 | ansible.builtin.import_tasks: k3s-agent.yml 55 | 56 | - name: Configure cluster nodes 57 | when: inventory_hostname in groups.k3s_srv[:1] | default([]) 58 | block: 59 | - name: Include task 60 | ansible.builtin.import_tasks: wait-for-nodes.yml 61 | - name: Include task 62 | ansible.builtin.import_tasks: configure_nodes.yml 63 | -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/tasks/deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Resolve latest longhorn version" 4 | ansible.builtin.shell: | 5 | set -eo pipefail 6 | curl -w '%{url_effective}' -I -L -s \ 7 | -S https://github.com/longhorn/longhorn/releases/{{ k3s_longhorn_release }} \ 8 | -o /dev/null | sed -e 's|.*/||' 9 | register: resolve_latest_longhorn_version 10 | changed_when: false 11 | args: 12 | warn: false 13 | executable: /usr/bin/bash 14 | 15 | - name: "Fetch deployment latest manifest" 16 | ansible.builtin.get_url: 17 | url: https://raw.githubusercontent.com/longhorn/longhorn/{{ resolve_latest_longhorn_version.stdout }}/deploy/longhorn.yaml 18 | dest: /tmp/resources/longhorn.yaml 19 | force: true 20 | changed_when: false 21 | 22 | - name: "Apply custom settings" 23 | ansible.builtin.lineinfile: 24 | path: /tmp/resources/longhorn.yaml 25 | regexp: '{{ item[0] }}:' 26 | line: ' {{ item[0] }}: {{ item[1] }}' 27 | changed_when: false 28 | loop: "{{ (k3s_longhorn_settings | default({})).items() }}" 29 | 30 | - name: "Enable Create Default Disk on Labeled Nodes" 31 | ansible.builtin.lineinfile: 32 | path: /tmp/resources/longhorn.yaml 33 | regexp: 'create-default-disk-labeled-nodes:' 34 | line: ' create-default-disk-labeled-nodes: true' 35 | changed_when: false 36 | 37 | - name: "Deploy Longhorn" 38 | become: true 39 | ansible.builtin.command: mv -f /tmp/resources/longhorn.yaml /var/lib/rancher/k3s/server/manifests/homecloud-longhorn.yaml 40 | register: deploy_longhorn 41 | changed_when: false 42 | 43 | - name: "Wait for Longhorn" 44 | become: true 45 | ansible.builtin.command: kubectl -n longhorn-system get deploy longhorn-driver-deployer \ 46 | -o jsonpath='{.status.conditions[?(@.type=="Available")].status}' 47 | changed_when: false 48 | register: kubcetl_get_longhorn 49 | delay: "{{ homecloud_k8s_deploy_timeout_delay | default(5) }}" 50 | retries: "{{ homecloud_k8s_deploy_timeout_retries | default(80) }}" 51 | until: "'True' in kubcetl_get_longhorn.stdout" 52 | -------------------------------------------------------------------------------- /molecule/resources/tasks/verify-k8s-http-endpoint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Verify {{ k8s_service_name }}/{{ k8s_deploy_name }}" 4 | block: 5 | - name: Create the service 6 | become: true 7 | ansible.builtin.command: kubectl -n {{ k8s_namespace }} expose deploy {{ k8s_deploy_name }} --name {{ k8s_service_name }} --type LoadBalancer 8 | changed_when: false 9 | failed_when: false 10 | - name: Wait for the service {{ k8s_service_name }} 11 | become: true 12 | ansible.builtin.command: kubectl -n {{ k8s_namespace }} get service {{ k8s_service_name }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 13 | changed_when: false 14 | register: kubcetl_result 15 | delay: 5 16 | retries: 6 17 | until: kubcetl_result.stdout | length > 0 18 | - name: Get the load balancer ip 19 | become: true 20 | ansible.builtin.command: kubectl -n {{ k8s_namespace }} get service {{ k8s_service_name }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}' 21 | changed_when: false 22 | register: load_balancer_ip 23 | - name: Get the load balancer port 24 | become: true 25 | ansible.builtin.command: kubectl -n {{ k8s_namespace }} get service {{ k8s_service_name }} -o jsonpath='{.spec.ports[0].nodePort}' 26 | changed_when: false 27 | register: load_balancer_port 28 | - name: Fetch the landing page 29 | ansible.builtin.command: curl -Ik {{ k8s_url_protocol }}://{{ load_balancer_ip.stdout }}:{{ load_balancer_port.stdout }} 30 | changed_when: false 31 | register: curl_dashboard 32 | args: 33 | warn: false 34 | - name: Verify the landing page 35 | ansible.builtin.assert: 36 | quiet: true 37 | that: 38 | - not curl_dashboard.failed 39 | - curl_dashboard.stdout_lines[0] | trim == k8s_expected_status 40 | always: 41 | - name: Delete the service 42 | become: true 43 | ansible.builtin.command: kubectl -n {{ k8s_namespace }} delete service {{ k8s_service_name }} 44 | changed_when: false 45 | failed_when: false 46 | -------------------------------------------------------------------------------- /molecule/resources/tasks/verify-k3s_dnas.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Verify Syncthing 4 | when: k3s_dnas_mount_what | length > 0 5 | block: 6 | # MOUNT 7 | - name: Get DNAS mount status # noqa command-instead-of-module 8 | ansible.builtin.shell: | 9 | set -o pipefail 10 | mount | grep '{{ k3s_dnas_mount_what }}'>/dev/null && echo ok || echo ko 11 | args: 12 | executable: /bin/bash 13 | register: dnas_mount_status 14 | changed_when: false 15 | - name: Verify DNAS mount output 16 | ansible.builtin.assert: 17 | quiet: true 18 | that: 19 | - dnas_mount_status.stdout == 'ok' 20 | # GUI 21 | - name: Get DNAS GUI status 22 | ansible.builtin.shell: "curl --user dnas:dnas http://{{ homecloud_node_ip }}:8384>/dev/null && echo ok || echo ko" 23 | args: 24 | warn: false 25 | changed_when: false 26 | register: dnas_gui_status 27 | - name: Verify DNAS GUI output 28 | ansible.builtin.assert: 29 | quiet: true 30 | that: 31 | - dnas_gui_status.stdout == 'ok' 32 | # SMB SHARE 33 | - name: Get DNAS share status 34 | become: true 35 | ansible.posix.mount: 36 | path: /mnt/test_smb_share 37 | src: "//{{ homecloud_node_ip }}/dnas" 38 | fstype: cifs 39 | opts: username=dnas,password=dnas 40 | state: present 41 | register: dnas_share_status 42 | - name: Get DNAS share status 43 | become: true 44 | ansible.posix.mount: 45 | path: /mnt/test_smb_share 46 | state: absent 47 | register: dnas_share_status 48 | # NFS SHARE 49 | - name: Get DNAS NFS share status 50 | become: true 51 | ansible.posix.mount: 52 | path: /mnt/test_share_nfs 53 | src: "{{ homecloud_node_ip }}:/" 54 | fstype: nfs 55 | state: present 56 | register: dnas_share_status 57 | - name: Get DNAS NFS share status 58 | become: true 59 | ansible.posix.mount: 60 | path: /mnt/test_share_nfs 61 | state: absent 62 | register: dnas_share_status 63 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/tasks/host_configure-syncthing.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install dependencies from apt 4 | become: true 5 | ansible.builtin.package: 6 | name: python3-lxml,python3-bcrypt,python3-passlib 7 | state: present 8 | force_apt_get: true 9 | 10 | - name: Enable and start syncthing.service 11 | become: true 12 | ansible.builtin.systemd: 13 | daemon_reload: true 14 | name: "syncthing@{{ k3s_dnas_username }}.service" 15 | enabled: true 16 | state: started 17 | changed_when: false 18 | 19 | - name: Wait for Synchting initialization 20 | ansible.builtin.wait_for: 21 | timeout: 2 22 | 23 | - name: Stop syncthing.service 24 | become: true 25 | ansible.builtin.systemd: 26 | name: "syncthing@{{ k3s_dnas_username }}.service" 27 | state: stopped 28 | changed_when: false 29 | 30 | - name: Remove the Sync folder from configuration 31 | become: true 32 | community.general.xml: 33 | path: "{{ k3s_dnas_mount_where }}/.config/syncthing/config.xml" 34 | xpath: /configuration/folder[@id='default'] 35 | state: absent 36 | 37 | - name: Remove the Sync folder from file system 38 | become: true 39 | ansible.builtin.file: 40 | path: "{{ k3s_dnas_mount_where }}/Sync" 41 | state: absent 42 | 43 | - name: Set address 44 | become: true 45 | community.general.xml: 46 | path: "{{ k3s_dnas_mount_where }}/.config/syncthing/config.xml" 47 | xpath: /configuration/gui/address 48 | value: "{{ k3s_dnas_syncthing_gui_address }}" 49 | 50 | - name: Set syncthing user 51 | become: true 52 | community.general.xml: 53 | path: "{{ k3s_dnas_mount_where }}/.config/syncthing/config.xml" 54 | xpath: /configuration/gui/user 55 | value: "{{ k3s_dnas_syncthing_gui_username }}" 56 | 57 | - name: Set syncthing password 58 | become: true 59 | community.general.xml: 60 | path: "{{ k3s_dnas_mount_where }}/.config/syncthing/config.xml" 61 | xpath: /configuration/gui/password 62 | value: "{{ k3s_dnas_syncthing_gui_password }}" 63 | 64 | - name: Restart syncthing service 65 | become: true 66 | ansible.builtin.systemd: 67 | name: "syncthing@{{ k3s_dnas_username }}.service" 68 | state: restarted 69 | changed_when: false 70 | -------------------------------------------------------------------------------- /paper/nord.puml: -------------------------------------------------------------------------------- 1 | ' Polar Night 2 | !global $THEME_NORD_0="#2e3440" 3 | !global $THEME_NORD_1="#3b4252" 4 | !global $THEME_NORD_2="#434c5e" 5 | !global $THEME_NORD_3="#4c566a" 6 | ' Snow Storm 7 | !global $THEME_NORD_4="#d8dee9" 8 | !global $THEME_NORD_5="#e5e9f0" 9 | !global $THEME_NORD_6="#eceff4" 10 | ' Frost 11 | !global $THEME_NORD_7="#8fbcbb" 12 | !global $THEME_NORD_8="#88c0d0" 13 | !global $THEME_NORD_9="#81a1c1" 14 | !global $THEME_NORD_10="#5e81ac" 15 | ' Aurora 16 | !global $THEME_NORD_11="#bf616a" 17 | !global $THEME_NORD_12="#d08770" 18 | !global $THEME_NORD_13="#ebcb8b" 19 | !global $THEME_NORD_14="#a3be8c" 20 | !global $THEME_NORD_15="#b48ead" 21 | 22 | !global $FONT_COLOR=$THEME_NORD_0 23 | !global $FONT_COLOR_LIGHT=$THEME_NORD_3 24 | !global $BORDER_COLOR=$THEME_NORD_3 25 | 26 | skinparam shadowing false 27 | 28 | skinparam Arrow { 29 | FontColor $THEME_NORD_3 30 | Color $THEME_NORD_3 31 | Thickness 1 32 | } 33 | 34 | skinparam usecase { 35 | BorderThickness 1 36 | BorderColor $THEME_NORD_3 37 | BackgroundColor $THEME_NORD_5 38 | FontColor $FONT_COLOR 39 | } 40 | 41 | skinparam State { 42 | BorderThickness 3 43 | BorderColor $THEME_NORD_3 44 | BackgroundColor $THEME_NORD_5 45 | FontColor $FONT_COLOR 46 | } 47 | 48 | skinparam package { 49 | BorderThickness 1 50 | BorderColor $THEME_NORD_3 51 | BackgroundColor white 52 | FontColor $FONT_COLOR 53 | } 54 | 55 | skinparam rectangle { 56 | BorderThickness 1 57 | BorderColor $THEME_NORD_3 58 | BackgroundColor white 59 | FontColor $FONT_COLOR 60 | } 61 | 62 | skinparam component { 63 | BorderThickness 1 64 | BorderColor $THEME_NORD_3 65 | BackgroundColor $THEME_NORD_5 66 | FontColor $FONT_COLOR 67 | } 68 | 69 | skinparam interface { 70 | BorderThickness 1 71 | BorderColor $THEME_NORD_3 72 | BackgroundColor $THEME_NORD_5 73 | FontColor $FONT_COLOR 74 | } 75 | 76 | skinparam Actor { 77 | BorderColor $THEME_NORD_3 78 | BackgroundColor $THEME_NORD_5 79 | FontColor $FONT_COLOR 80 | } 81 | 82 | skinparam Rectangle<> { 83 | BackgroundColor $THEME_NORD_5 84 | } 85 | skinparam Rectangle<> { 86 | BorderStyle dotted 87 | BorderThickness 1 88 | BackgroundColor $THEME_NORD_5 89 | } 90 | -------------------------------------------------------------------------------- /molecule/resources/tasks/verify-k3s_traefik.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Prepare testing environment 4 | block: 5 | - name: Create working directory 6 | ansible.builtin.file: 7 | dest: /tmp/resources/deployments 8 | state: directory 9 | mode: '755' 10 | - name: Clean resources 11 | become: true 12 | ansible.builtin.command: kubectl delete \ 13 | -f /tmp/resources/test-traefik.yml 14 | changed_when: false 15 | failed_when: false 16 | - name: Copy deployment manifests 17 | ansible.builtin.copy: 18 | src: ../deployments/ 19 | dest: /tmp/resources/ 20 | mode: '644' 21 | 22 | - name: Verify whoami 23 | block: 24 | - name: Deploy whoami 25 | become: true 26 | ansible.builtin.command: kubectl apply \ 27 | -f /tmp/resources/test-traefik.yml 28 | changed_when: false 29 | failed_when: false 30 | - name: Wait for whoami 31 | become: true 32 | ansible.builtin.command: kubectl get pod whoami \ 33 | -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}' 34 | changed_when: false 35 | register: kubcetl_result 36 | delay: "{{ homecloud_k8s_deploy_timeout_delay | default(3) }}" 37 | retries: "{{ homecloud_k8s_deploy_timeout_retries | default(40) }}" 38 | until: "'True' in kubcetl_result.stdout" 39 | - name: Fetch the landing page with HTTP 40 | ansible.builtin.command: "curl -IkH host:whoami.homecloud.local http://{{ homecloud_virtual_ip }}:32080" 41 | changed_when: false 42 | register: curl_dashboard 43 | args: 44 | warn: false 45 | - name: Verify the landing page with HTTP 46 | ansible.builtin.assert: 47 | quiet: true 48 | that: 49 | - not curl_dashboard.failed 50 | - curl_dashboard.stdout_lines[0] | trim == "HTTP/1.1 308 Permanent Redirect" 51 | - name: Fetch the landing page with HTTPs 52 | ansible.builtin.command: "curl -IkH host:whoami.homecloud.local https://{{ homecloud_virtual_ip }}:32443" 53 | changed_when: false 54 | register: curl_dashboard 55 | args: 56 | warn: false 57 | - name: Verify the landing page with HTTPs 58 | ansible.builtin.assert: 59 | quiet: true 60 | that: 61 | - not curl_dashboard.failed 62 | - curl_dashboard.stdout_lines[0] | trim == "HTTP/2 200" 63 | -------------------------------------------------------------------------------- /paper/vision.puml: -------------------------------------------------------------------------------- 1 | @startuml vision_context 2 | !global $INCLUSION_MODE="local" 3 | !global $LIB_BASE_LOCATION="../.gdiag/plantuml-libs" 4 | !include $LIB_BASE_LOCATION/bootstrap.puml 5 | include('c4nord/bootstrap') 6 | include('c4model/Element/System') 7 | include('c4model/Element/ExternalSystem') 8 | include('c4model/Element/Person') 9 | include('c4model/Element/ExternalPerson') 10 | Title("Context of //homecloud//", "Vision / Context Diagram") 11 | System("homecloud", "homecloud", "A cloud at home which provides services keeping private data private.") 12 | ExternalSystem("external_system", "External System", "Smartphones, laptops or any other systems consuming homecloud services.") 13 | Person("user", "User", "A person taking care ot his/her private data.") 14 | Person("administrator", "Administrator", "A person allowed to interact with the homecloud platform.") 15 | ExternalPerson("guest", "Guest", "A person allowed by a //User// to interact with some homecloud services.") 16 | administrator --> homecloud : Relationship("Administrates the platform running") 17 | user --> homecloud : Relationship("Consumes services provided by") 18 | guest -u-> homecloud : Relationship("Consumes services provided by") 19 | external_system -u-> homecloud : Relationship("Consumes services provided by") 20 | @enduml 21 | 22 | @startuml vison_work_packages 23 | !global $INCLUSION_MODE="local" 24 | !global $LIB_BASE_LOCATION="../.gdiag/plantuml-libs" 25 | !include $LIB_BASE_LOCATION/bootstrap.puml 26 | include('homecloud-2/bootstrap') 27 | !include ./nord.puml 28 | Title("Overview of the Work Packages", "Vision / Component Diagram") 29 | actor "Devices" as devices 30 | package homecloud { 31 | component "Platform" as platform 32 | component "Decentralized NAS" as dnas 33 | interface decentralized [ 34 | P2P 35 | [decentralized] 36 | ] 37 | interface centralized [ 38 | NFS/CIFS 39 | [centralized] 40 | ] 41 | component "Services" as services 42 | interface HTTP 43 | interface WebDav 44 | [platform] <~~ [dnas] : <> 45 | [dnas] -- decentralized 46 | [dnas] -- centralized 47 | [platform] <~~ [services] : <> 48 | [services] -- WebDav 49 | [services] -- HTTP 50 | [services] --> centralized 51 | } 52 | devices -u-> WebDav 53 | devices -u-> HTTP 54 | devices -u-> decentralized 55 | devices -u-> centralized 56 | @enduml 57 | -------------------------------------------------------------------------------- /molecule/resources/tasks/verify-k3s_csi_driver_smb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Prepare testing environment 4 | block: 5 | - name: Create working directory 6 | ansible.builtin.file: 7 | dest: /tmp/resources/deployments 8 | state: directory 9 | mode: '755' 10 | - name: Clean resources 11 | become: true 12 | ansible.builtin.command: kubectl delete \ 13 | -f /tmp/resources/test-csi-smb-dnas-step2.yml \ 14 | -f /tmp/resources/test-csi-smb-dnas-step1.yml 15 | changed_when: false 16 | failed_when: false 17 | - name: Copy test-csi-smb-dnas-step1.yml 18 | ansible.builtin.template: 19 | src: ../deployments/test-csi-smb-dnas-step1.yml 20 | dest: /tmp/resources/test-csi-smb-dnas-step1.yml 21 | mode: '644' 22 | - name: Copy test-csi-smb-dnas-step2.yml 23 | ansible.builtin.template: 24 | src: ../deployments/test-csi-smb-dnas-step2.yml 25 | dest: /tmp/resources/test-csi-smb-dnas-step2.yml 26 | mode: '644' 27 | 28 | - name: Verify csi and dnas 29 | block: 30 | - name: Apply step1 31 | become: true 32 | ansible.builtin.command: kubectl apply -f /tmp/resources/test-csi-smb-dnas-step1.yml 33 | changed_when: false 34 | - name: Wait for step1 35 | become: true 36 | ansible.builtin.command: kubectl get pod test-csi-smb-dnas-step1 -o jsonpath='{.status.phase}' 37 | changed_when: false 38 | register: kubcetl_result 39 | delay: 2 40 | retries: 60 41 | until: "'Succeeded' in kubcetl_result.stdout" 42 | - name: Apply step2 43 | become: true 44 | ansible.builtin.command: kubectl apply -f /tmp/resources/test-csi-smb-dnas-step2.yml 45 | changed_when: false 46 | - name: Wait for step2 47 | become: true 48 | ansible.builtin.command: kubectl get pod test-csi-smb-dnas-step2 -o jsonpath='{.status.phase}' 49 | changed_when: false 50 | register: kubcetl_result 51 | delay: 2 52 | retries: 60 53 | until: "'Succeeded' in kubcetl_result.stdout" 54 | - name: Get step2 output 55 | become: true 56 | ansible.builtin.command: kubectl logs test-csi-smb-dnas-step2 57 | changed_when: false 58 | register: step2_result 59 | - name: Verify logs 60 | ansible.builtin.assert: 61 | quiet: true 62 | that: 63 | - step2_result.stdout_lines[0] | trim == 'test-csi-smb-dnas-step1' 64 | -------------------------------------------------------------------------------- /.github/workflows/ci-test.yml: -------------------------------------------------------------------------------- 1 | name: Continous Integration - Test 2 | 3 | on: [ push, pull_request ] 4 | 5 | jobs: 6 | 7 | test: 8 | if: ${{ false }} 9 | name: Test molecule scenario 10 | runs-on: ubuntu-20.04 11 | strategy: 12 | fail-fast: false 13 | matrix: 14 | scenario: [ 'c1' ] 15 | n1_box: [ '' ] 16 | n2_box: [ '' ] 17 | driver: [ "'qemu'" ] 18 | # include: 19 | # - scenario: c1 20 | # n1_box: generic/ubuntu1804 21 | # - scenario: c1 22 | # n1_box: generic/ubuntu2004 23 | # - scenario: c1-ceph 24 | # n1_box: generic/debian11 25 | # - scenario: c1-ceph 26 | # n1_box: generic/debian10 27 | # - scenario: c2 28 | # n1_box: generic/debian11 29 | # n2_box: generic/debian10 30 | env: 31 | MOLECULE_SCENARIO: ${{ matrix.scenario }} 32 | MOLECULE_N1_BOX: ${{ matrix.n1_box }} 33 | MOLECULE_N2_BOX: ${{ matrix.n2_box }} 34 | MOLECULE_DRIVER: ${{ matrix.driver }} 35 | steps: 36 | - uses: actions/checkout@v2 37 | - uses: actions/cache@v2 38 | with: 39 | key: vagrant-box-debian11 40 | path: "~/.vagrant.d/boxes/generic-VAGRANTSLASH-debian11" 41 | - uses: actions/cache@v2 42 | with: 43 | key: vagrant-box-debian10 44 | path: "~/.vagrant.d/boxes/generic-VAGRANTSLASH-debian10" 45 | - uses: actions/cache@v2 46 | with: 47 | key: vagrant-box-ubuntu1804 48 | path: "~/.vagrant.d/boxes/generic-VAGRANTSLASH-ubuntu1804" 49 | - uses: actions/cache@v2 50 | with: 51 | key: vagrant-box-ubuntu2004 52 | path: "~/.vagrant.d/boxes/generic-VAGRANTSLASH-ubuntu2004" 53 | #- uses: actions/cache@v2 54 | # with: 55 | # key: python3-env 56 | # path: "env" 57 | - uses: actions/setup-python@v2 58 | with: 59 | python-version: '3.x' 60 | - name: Install Python requirements 61 | run: | 62 | export PATH="$PATH:$HOME/.local/bin:/root/.local/bin" 63 | pip install --upgrade setuptools wheel pip 64 | pip install -r requirements.txt 65 | - name: Install vagrant and libvirt 66 | run: | 67 | export PATH="$PATH:/home/runner/.local/bin:/root/.local/bin" 68 | sudo ./scripts/ci-install.sh 69 | - name: Execute test scenario 70 | run: | 71 | PATH="$PATH:/home/runner/.local/bin:/root/.local/bin" 72 | sudo /home/runner/.local/bin/molecule test -s $MOLECULE_SCENARIO 73 | -------------------------------------------------------------------------------- /collection/roles/k3s_longhorn/tasks/configure-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Create the image directory" 4 | become: true 5 | ansible.builtin.file: 6 | path: "{{ k3s_longhorn_image_file | dirname }}" 7 | state: directory 8 | mode: 0755 9 | recurse: true 10 | 11 | - name: "Stats the image file" 12 | become: true 13 | ansible.builtin.stat: 14 | path: "{{ k3s_longhorn_image_file }}" 15 | get_attributes: no 16 | get_checksum: no 17 | get_mime: no 18 | register: stat_k3s_longhorn_image_file 19 | 20 | - name: "Create the image file" 21 | when: not stat_k3s_longhorn_image_file.stat.exists 22 | become: true 23 | ansible.builtin.command: 24 | cmd: "dd if=/dev/zero of={{ k3s_longhorn_image_file }} bs={{ k3s_longhorn_image_dd_bs }} count={{ k3s_longhorn_image_dd_cnt }}" 25 | 26 | - name: "Create the scripts directory" 27 | become: true 28 | ansible.builtin.file: 29 | path: /usr/local/share/homecloud/scripts 30 | state: directory 31 | mode: 0755 32 | recurse: true 33 | 34 | - name: "Transfer the script" 35 | become: true 36 | ansible.builtin.template: 37 | src: create_longhorn_loop.sh.jinja2 38 | dest: "{{ k3s_longhorn_script_create_loop }}" 39 | owner: root 40 | group: root 41 | mode: 0755 42 | 43 | - name: "Transfer the service configuration" 44 | become: true 45 | ansible.builtin.template: 46 | src: longhorn_loop.service 47 | dest: /etc/systemd/system/longhorn_loop.service 48 | owner: root 49 | group: root 50 | mode: 0644 51 | 52 | - name: "Enable and start the service configuration" 53 | become: true 54 | ansible.builtin.systemd: 55 | daemon_reload: true 56 | name: longhorn_loop.service 57 | enabled: true 58 | state: started 59 | 60 | - name: "Mount the loop" 61 | become: true 62 | ansible.builtin.shell: 63 | cmd: "{{ k3s_longhorn_script_create_loop }} || true" 64 | changed_when: false 65 | 66 | - name: "Format the block image of Longhorn" 67 | become: true 68 | when: not stat_k3s_longhorn_image_file.stat.exists 69 | community.general.filesystem: 70 | dev: "{{ k3s_longhorn_image_device }}" 71 | force: false 72 | fstype: "{{ k3s_longhorn_disk_type }}" 73 | 74 | - name: "Transfer the mount configuration" 75 | become: true 76 | ansible.builtin.template: 77 | src: longhorn_loop.mount 78 | dest: "/etc/systemd/system/{{ k3s_longhorn_disk_name }}" 79 | owner: root 80 | group: root 81 | mode: 0644 82 | 83 | - name: "Enable and start the mount configuration" 84 | become: true 85 | ansible.builtin.systemd: 86 | daemon_reload: true 87 | name: "{{ k3s_longhorn_disk_name }}" 88 | enabled: true 89 | state: restarted 90 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | os: linux 2 | arch: amd64 3 | dist: bionic 4 | language: python 5 | python: 6 | - 3.8 7 | addons: 8 | apt: 9 | update: true 10 | sources: 11 | - sourceline: 'deb [arch=amd64] https://apt.releases.hashicorp.com bionic main' 12 | key_url: 'https://apt.releases.hashicorp.com/gpg' 13 | packages: 14 | - bridge-utils 15 | - dnsmasq-base 16 | - ebtables 17 | - jq 18 | - libssl-dev 19 | - libvirt-bin 20 | - libvirt-dev 21 | - qemu-kvm 22 | - qemu-system-arm 23 | - qemu-user-static 24 | - qemu-utils 25 | - ruby 26 | - ruby-dev 27 | - vagrant 28 | - xz-utils 29 | env: 30 | global: 31 | - VAGRANT_DEFAULT_PROVIDER=libvirt 32 | - BOX_VERSION=3.2.12 33 | matrix: 34 | #- TEST_NAME=armbian # chroot fails on travis 35 | - TEST_NAME=k1 MOLECULE_N1_BOX=generic/debian10 36 | - TEST_NAME=k1ha MOLECULE_N1_BOX=generic/debian11 37 | - TEST_NAME=k1ha MOLECULE_N1_BOX=generic/debian10 38 | - TEST_NAME=k1ha MOLECULE_N1_BOX=generic/ubuntu1804 39 | - TEST_NAME=k1ha MOLECULE_N1_BOX=generic/ubuntu2004 40 | - TEST_NAME=k2 MOLECULE_N1_BOX=generic/debian10 MOLECULE_N1_BOX=generic/ubuntu1804 41 | - TEST_NAME=k2ha MOLECULE_N1_BOX=generic/ubuntu2004 MOLECULE_N1_BOX=generic/debian11 42 | cache: 43 | pip: false 44 | apt: false 45 | directories: 46 | #- /home/travis/.ansible/collections 47 | #- /home/travis/.ansible/roles 48 | - /home/travis/.vagrant.d/boxes/generic-VAGRANTSLASH-debian11 49 | - /home/travis/.vagrant.d/boxes/generic-VAGRANTSLASH-debian10 50 | - /home/travis/.vagrant.d/boxes/generic-VAGRANTSLASH-ubuntu1804 51 | - /home/travis/.vagrant.d/boxes/generic-VAGRANTSLASH-ubuntu2004 52 | before_cache: 53 | - rm -f /home/travis/.vagrant.d/boxes/generic-VAGRANTSLASH-debian11/3.2.12/libvirt/box_update_check 54 | - rm -f /home/travis/.vagrant.d/boxes/generic-VAGRANTSLASH-debian10/3.2.12/libvirt/box_update_check 55 | - rm -f /home/travis/.vagrant.d/boxes/generic-VAGRANTSLASH-ubuntu1804/3.2.12/libvirt/box_update_check 56 | - rm -f /home/travis/.vagrant.d/boxes/generic-VAGRANTSLASH-ubuntu2004/3.2.12/libvirt/box_update_check 57 | install: 58 | # configure python 59 | - pip install --upgrade setuptools 60 | - pip install -r requirements.txt 61 | # configure libvirt 62 | - sudo sed -i 's/unix_sock_group = "libvirt"/unix_sock_group = "travis"/' /etc/libvirt/libvirtd.conf 63 | - sudo systemctl restart libvirtd.service 64 | # configure vagrant 65 | - vagrant plugin install vagrant-libvirt 66 | - vagrant box add generic/debian11 --provider=libvirt --box-version $BOX_VERSION || true 67 | - vagrant box add generic/debian10 --provider=libvirt --box-version $BOX_VERSION || true 68 | - vagrant box add generic/ubuntu1804 --provider=libvirt --box-version $BOX_VERSION || true 69 | - vagrant box add generic/ubuntu2004 --provider=libvirt --box-version $BOX_VERSION || true 70 | script: 71 | - molecule test -s $TEST_NAME 72 | -------------------------------------------------------------------------------- /.github/workflows/ci-build.yml: -------------------------------------------------------------------------------- 1 | name: Continous Integration - Build 2 | 3 | on: [ push, pull_request ] 4 | 5 | jobs: 6 | 7 | paper: 8 | name: Build the paper 9 | runs-on: ubuntu-20.04 10 | container: asciidoctor/docker-asciidoctor 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Build paper-website 14 | run: | 15 | asciidoctor paper/README.adoc -D dist/paper -a toc=left -o index.html 16 | cp paper/*.png dist/paper 17 | - name: Build paper-pdf 18 | run: asciidoctor-pdf paper/README.adoc -D dist -o homecloud-paper.pdf 19 | - name: Upload paper-pdf 20 | uses: actions/upload-artifact@v2 21 | with: 22 | name: paper-pdf 23 | path: dist/homecloud-paper.pdf 24 | - name: Upload paper-website 25 | uses: actions/upload-artifact@v2 26 | with: 27 | name: paper-website 28 | path: dist/paper 29 | - name: Copy paper-pdf to website 30 | if: ${{ startsWith(github.ref, 'refs/tags/') }} 31 | run: cp dist/homecloud-paper.pdf dist/paper/homecloud-paper.pdf 32 | - name: Publish documentation 33 | if: ${{ startsWith(github.ref, 'refs/tags/') }} 34 | uses: peaceiris/actions-gh-pages@v3 35 | with: 36 | github_token: ${{ secrets.GITHUB_TOKEN }} 37 | publish_dir: ./dist/paper 38 | 39 | collection: 40 | name: Build the Ansible collection 41 | runs-on: ubuntu-20.04 42 | steps: 43 | - uses: actions/checkout@v2 44 | - uses: actions/setup-python@v2 45 | with: 46 | python-version: '3.x' 47 | - name: Install Python requirements 48 | run: | 49 | export PATH="$PATH:$HOME/.local/bin" 50 | pip install --user --upgrade setuptools wheel pip 51 | pip install --user -r requirements.txt 52 | ansible-galaxy collection install -r molecule/resources/collections.yml 53 | - name: Lint the collection 54 | run: | 55 | export PATH="$PATH:$HOME/.local/bin" 56 | export ANSIBLE_COLLECTIONS_PATH=~/.ansible/collections:/usr/share/ansible/collections:/etc/ansible/collections 57 | export ANSIBLE_ROLES_PATH=~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles:collection/roles 58 | ansible-lint 59 | - name: Build the collection 60 | run: | 61 | export PATH="$PATH:$HOME/.local/bin" 62 | ansible-galaxy collection build --force 63 | working-directory: collection 64 | - name: Upload collection 65 | uses: actions/upload-artifact@v2 66 | with: 67 | name: collection 68 | path: collection/tmorin-homecloud-*.tar.gz 69 | - name: Publish the collection 70 | if: ${{ startsWith(github.ref, 'refs/tags/') }} 71 | run: | 72 | export PATH="$PATH:$HOME/.local/bin" 73 | ansible-galaxy collection publish --token ${{ secrets.ANSIBLE_GALAXY_API_KEY }} tmorin-homecloud-*.tar.gz 74 | working-directory: collection 75 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/files/mounter.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ACTION="" 4 | HOST_DIRECTORY="" 5 | IMAGE_FILE="" 6 | BASE_IMAGE_FILE="" 7 | MOUNT_DIRECTORY="" 8 | 9 | POSITIONAL=() 10 | while [[ $# -gt 0 ]]; do 11 | key="$1" 12 | case ${key} in 13 | -c | --clean) 14 | ACTION="clean" 15 | shift 16 | ;; 17 | -p | --prepare) 18 | ACTION="prepare" 19 | shift 20 | ;; 21 | -m | --mount) 22 | ACTION="mount" 23 | shift 24 | ;; 25 | -u | --umount) 26 | ACTION="umount" 27 | shift 28 | ;; 29 | -hd | --host-directory) 30 | HOST_DIRECTORY="$2" 31 | shift 32 | shift 33 | ;; 34 | -bi | --base-image) 35 | BASE_IMAGE_FILE="$2" 36 | shift 37 | shift 38 | ;; 39 | *) 40 | POSITIONAL+=("$1") 41 | shift 42 | ;; 43 | esac 44 | done 45 | set -- "${POSITIONAL[@]}" # restore positional parameters 46 | 47 | IMAGE_FILE="${HOST_DIRECTORY}/image.img" 48 | MOUNT_DIRECTORY="${HOST_DIRECTORY}/rootfs" 49 | 50 | if [[ -z ${ACTION} ]]; then 51 | echo "error : [-m|--mount] or [-u|--umount] are required or [-p|--prepare] are required" 52 | exit 1 53 | fi 54 | 55 | if [[ -z ${HOST_DIRECTORY} ]]; then 56 | echo "error : [-hd|--host-directory ] is required" 57 | exit 1 58 | fi 59 | 60 | echo "---- execute action [$ACTION] in ${HOST_DIRECTORY} ----" 61 | 62 | set -x -e -o pipefail 63 | 64 | function execute_mount() { 65 | mkdir -p "${MOUNT_DIRECTORY}" 66 | offset=$(sudo fdisk -l "${IMAGE_FILE}" | grep -A2 -E "Device\s*Boot\s*Start\s*End" | tail -n +2 | sed -r "s/[^ ]* *([0-9]*).*$/\1/") 67 | offsetInBytes=$((offset * 512)) 68 | sync 69 | local device=$(sudo losetup -f "${IMAGE_FILE}" -o ${offsetInBytes} --show) 70 | sync 71 | mount -t auto "${device}" "${MOUNT_DIRECTORY}" 72 | sync 73 | } 74 | 75 | function execute_umount() { 76 | sync 77 | mount | grep "${MOUNT_DIRECTORY}" && umount "${MOUNT_DIRECTORY}" || echo "mount [${MOUNT_DIRECTORY}] missing" 78 | sync 79 | 80 | devices=$(losetup -l -O BACK-FILE,NAME -n -J | jq -r ".loopdevices | .[] | select(.\"back-file\" | contains(\"${IMAGE_FILE}\") ) | .name") 81 | declare -a arr=(${devices}) 82 | for device in "${arr[@]}"; do 83 | losetup -d "${device}" 84 | sync 85 | done 86 | } 87 | 88 | function execute_prepare() { 89 | if [[ -z ${BASE_IMAGE_FILE} ]]; then 90 | echo "error : [-bi|--base-image ] is required" 91 | exit 1 92 | fi 93 | mkdir -p "${MOUNT_DIRECTORY}" 94 | if [[ ! -f "IMAGE_FILE" ]]; then 95 | cp "${BASE_IMAGE_FILE}" "${IMAGE_FILE}" 96 | fi 97 | } 98 | 99 | function execute_clean() { 100 | sync 101 | rm -Rf "${HOST_DIRECTORY}" 102 | sync 103 | } 104 | 105 | case ${ACTION} in 106 | mount) 107 | execute_umount 108 | execute_mount 109 | ;; 110 | umount) 111 | execute_umount 112 | ;; 113 | prepare) 114 | execute_umount 115 | execute_prepare 116 | execute_mount 117 | ;; 118 | clean) 119 | execute_umount 120 | execute_clean 121 | ;; 122 | esac 123 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/tasks/patch-host-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Clean host directory {{ image_aosc_host_directory }}" 4 | delegate_to: localhost 5 | become: true 6 | when: image_aosc_clean_working_directory 7 | ansible.builtin.script: "files/mounter.sh -c -hd {{ image_aosc_host_directory }}" 8 | args: 9 | executable: /bin/bash 10 | changed_when: false 11 | 12 | - name: "Prepare host directory {{ image_aosc_host_directory }}" 13 | delegate_to: localhost 14 | become: true 15 | when: image_aosc_clean_working_directory 16 | ansible.builtin.script: "files/mounter.sh -p -hd {{ image_aosc_host_directory }} -bi {{ image_aosc_base_image_path }}" 17 | args: 18 | executable: /bin/bash 19 | changed_when: false 20 | 21 | - name: Customize the aosc image 22 | block: 23 | - name: Mount host image 24 | delegate_to: localhost 25 | become: true 26 | ansible.builtin.script: "files/mounter.sh -m -hd {{ image_aosc_host_directory }}" 27 | args: 28 | executable: /bin/bash 29 | changed_when: false 30 | register: mount_image 31 | - name: Debug mount_image 32 | ansible.builtin.debug: 33 | var: mount_image 34 | - name: Set aosc_first_run.txt 35 | delegate_to: localhost 36 | become: true 37 | ansible.builtin.template: 38 | src: aosc_first_run.txt 39 | dest: "{{ image_aosc_host_directory }}/rootfs/boot/aosc_first_run.txt" 40 | mode: 0644 41 | changed_when: false 42 | register: aosc_first_run 43 | - name: Debug aosc_first_run 44 | ansible.builtin.debug: 45 | var: aosc_first_run 46 | - name: Customize the host image 47 | delegate_to: localhost 48 | become: true 49 | ansible.builtin.script: "files/customizer.sh -rf {{ image_aosc_host_directory }}/rootfs -cu {{ image_aosc_username }}" 50 | args: 51 | executable: /bin/bash 52 | changed_when: false 53 | register: customize_image 54 | - name: Debug customize_image 55 | ansible.builtin.debug: 56 | var: customize_image 57 | - name: Copy ssh public key 58 | delegate_to: localhost 59 | become: true 60 | ansible.builtin.template: 61 | src: "{{ image_aosc_ssh_pub_key_path }}" 62 | dest: "{{ image_aosc_host_directory }}/rootfs/home/{{ image_aosc_username }}/.ssh/authorized_keys" 63 | force: true 64 | owner: "1000" 65 | group: "1000" 66 | mode: "0640" 67 | changed_when: false 68 | register: copy_ssh_pub_key 69 | - name: Debug copy_ssh_pub_key 70 | ansible.builtin.debug: 71 | var: copy_ssh_pub_key 72 | - name: Fix /etc/hostname 73 | delegate_to: localhost 74 | become: true 75 | ansible.builtin.copy: 76 | content: "{{ inventory_hostname }}" 77 | dest: "{{ image_aosc_host_directory }}/rootfs/etc/hostname" 78 | mode: 0644 79 | changed_when: false 80 | register: fix_hostname 81 | - name: Debug fix_hostname 82 | ansible.builtin.debug: 83 | var: fix_hostname 84 | - name: Fix /etc/hosts 85 | delegate_to: localhost 86 | become: true 87 | ansible.builtin.replace: 88 | path: "{{ image_aosc_host_directory }}/rootfs/etc/hosts" 89 | regexp: rock64 90 | replace: "{{ inventory_hostname }}" 91 | changed_when: false 92 | register: fix_hosts 93 | - name: Debug fix_hosts 94 | ansible.builtin.debug: 95 | var: fix_hosts 96 | always: 97 | - name: Unmount host image 98 | delegate_to: localhost 99 | become: true 100 | ansible.builtin.script: "files/mounter.sh -u -hd {{ image_aosc_host_directory }}" 101 | args: 102 | executable: /bin/bash 103 | changed_when: false 104 | register: umount_image 105 | - name: Debug umount_image 106 | ansible.builtin.debug: 107 | var: umount_image 108 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/files/mounter.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ACTION="" 4 | HOST_DIRECTORY="" 5 | IMAGE_FILE="" 6 | BASE_IMAGE_FILE="" 7 | 8 | POSITIONAL=() 9 | while [[ $# -gt 0 ]]; do 10 | key="$1" 11 | case ${key} in 12 | -c | --clean) 13 | ACTION="clean" 14 | shift 15 | ;; 16 | -p | --prepare) 17 | ACTION="prepare" 18 | shift 19 | ;; 20 | -m | --mount) 21 | ACTION="mount" 22 | shift 23 | ;; 24 | -u | --umount) 25 | ACTION="umount" 26 | shift 27 | ;; 28 | -hd | --host-directory) 29 | HOST_DIRECTORY="$2" 30 | shift 31 | shift 32 | ;; 33 | -bi | --base-image) 34 | BASE_IMAGE_FILE="$2" 35 | shift 36 | shift 37 | ;; 38 | *) 39 | POSITIONAL+=("$1") 40 | shift 41 | ;; 42 | esac 43 | done 44 | set -- "${POSITIONAL[@]}" # restore positional parameters 45 | 46 | IMAGE_FILE="${HOST_DIRECTORY}/image.img" 47 | MOUNT_BOOTFS_DIRECTORY="${HOST_DIRECTORY}/bootfs" 48 | MOUNT_ROOTFS_DIRECTORY="${HOST_DIRECTORY}/rootfs" 49 | 50 | if [[ -z ${ACTION} ]]; then 51 | echo "error : [-m|--mount] or [-u|--umount] are required or [-p|--prepare] are required" 52 | exit 1 53 | fi 54 | 55 | if [[ -z ${HOST_DIRECTORY} ]]; then 56 | echo "error : [-hd|--host-directory ] is required" 57 | exit 1 58 | fi 59 | 60 | echo "---- execute action [$ACTION] in ${HOST_DIRECTORY} ----" 61 | 62 | set -x -e -o pipefail 63 | 64 | function execute_mount() { 65 | echo "mount ${MOUNT_BOOTFS_DIRECTORY}" 66 | local offsetStartOne 67 | local offsetStartOneInBytes 68 | local offsetEndOne 69 | local sizeOneInByte 70 | local deviceOne 71 | mkdir -p "${MOUNT_BOOTFS_DIRECTORY}" 72 | offsetStartOne=$(fdisk -l "${IMAGE_FILE}" -o Start | tail -n-2 | head -n+1) 73 | offsetEndOne=$(fdisk -l "${IMAGE_FILE}" -o End | tail -n-2 | head -n+1) 74 | offsetStartOneInBytes=$((offsetStartOne * 512)) 75 | sizeOneInByte=$(((offsetEndOne-offsetStartOne) * 512)) 76 | 77 | deviceOne=$(losetup -f "${IMAGE_FILE}" -o ${offsetStartOneInBytes} --sizelimit ${sizeOneInByte} --show) 78 | mount -t auto "${deviceOne}" "${MOUNT_BOOTFS_DIRECTORY}" 79 | 80 | echo "mount ${MOUNT_ROOTFS_DIRECTORY}" 81 | local offsetStartTwo 82 | local offsetStartTwoInBytes 83 | local deviceTwo 84 | mkdir -p "${MOUNT_ROOTFS_DIRECTORY}" 85 | offsetStartTwo=$(fdisk -l "${IMAGE_FILE}" -o Start | tail -n-1 | head -n+1) 86 | offsetStartTwoInBytes=$((offsetStartTwo * 512)) 87 | deviceTwo=$(losetup -f "${IMAGE_FILE}" -o ${offsetStartTwoInBytes} --show) 88 | mount -t auto "${deviceTwo}" "${MOUNT_ROOTFS_DIRECTORY}" 89 | 90 | sync 91 | } 92 | 93 | function execute_umount() { 94 | sync 95 | mount | grep "${MOUNT_BOOTFS_DIRECTORY}" && umount "${MOUNT_BOOTFS_DIRECTORY}" || echo "mount [${MOUNT_BOOTFS_DIRECTORY}] missing" 96 | sync 97 | mount | grep "${MOUNT_ROOTFS_DIRECTORY}" && umount "${MOUNT_ROOTFS_DIRECTORY}" || echo "mount [${MOUNT_ROOTFS_DIRECTORY}] missing" 98 | sync 99 | 100 | devices=$(losetup -l -O BACK-FILE,NAME -n -J | jq -r ".loopdevices | .[] | select(.\"back-file\" | contains(\"${IMAGE_FILE}\") ) | .name") 101 | declare -a arr=(${devices}) 102 | for deviceOne in "${arr[@]}"; do 103 | losetup -d "${deviceOne}" 104 | sync 105 | done 106 | } 107 | 108 | function execute_prepare() { 109 | if [[ -z ${BASE_IMAGE_FILE} ]]; then 110 | echo "error : [-bi|--base-image ] is required" 111 | exit 1 112 | fi 113 | mkdir -p "${HOST_DIRECTORY}" 114 | if [[ ! -f "IMAGE_FILE" ]]; then 115 | cp "${BASE_IMAGE_FILE}" "${IMAGE_FILE}" 116 | fi 117 | } 118 | 119 | function execute_clean() { 120 | rm -Rf "${HOST_DIRECTORY}" 121 | } 122 | 123 | case ${ACTION} in 124 | mount) 125 | execute_umount 126 | execute_mount 127 | ;; 128 | umount) 129 | execute_umount 130 | ;; 131 | prepare) 132 | execute_umount 133 | execute_prepare 134 | execute_mount 135 | ;; 136 | clean) 137 | execute_umount 138 | execute_clean 139 | ;; 140 | esac 141 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure("2") do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://vagrantcloud.com/search. 15 | config.vm.box = "generic/ubuntu2104" 16 | 17 | # Disable automatic box update checking. If you disable this, then 18 | # boxes will only be checked for updates when the user runs 19 | # `vagrant box outdated`. This is not recommended. 20 | # config.vm.box_check_update = false 21 | 22 | # Create a forwarded port mapping which allows access to a specific port 23 | # within the machine from a port on the host machine. In the example below, 24 | # accessing "localhost:8080" will access port 80 on the guest machine. 25 | # NOTE: This will enable public access to the opened port 26 | # config.vm.network "forwarded_port", guest: 80, host: 8080 27 | 28 | # Create a forwarded port mapping which allows access to a specific port 29 | # within the machine from a port on the host machine and only allow access 30 | # via 127.0.0.1 to disable public access 31 | # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" 32 | 33 | # Create a private network, which allows host-only access to the machine 34 | # using a specific IP. 35 | # config.vm.network "private_network", ip: "192.168.33.10" 36 | 37 | # Create a public network, which generally matched to bridged network. 38 | # Bridged networks make the machine appear as another physical device on 39 | # your network. 40 | # config.vm.network "public_network" 41 | 42 | # Share an additional folder to the guest VM. The first argument is 43 | # the path on the host to the actual folder. The second argument is 44 | # the path on the guest to mount the folder. And the optional third 45 | # argument is a set of non-required options. 46 | # config.vm.synced_folder "../data", "/vagrant_data" 47 | config.vm.synced_folder ".", "/vagrant", type: "virtualbox" 48 | 49 | # Provider-specific configuration so you can fine-tune various 50 | # backing providers for Vagrant. These expose provider-specific options. 51 | # Example for VirtualBox: 52 | # 53 | # config.vm.provider "virtualbox" do |vb| 54 | # # Display the VirtualBox GUI when booting the machine 55 | # vb.gui = true 56 | # 57 | # # Customize the amount of memory on the VM: 58 | # vb.memory = "1024" 59 | # end 60 | # 61 | # View the documentation for the provider you are using for more 62 | # information on available options. 63 | config.vm.provider "virtualbox" do |vb| 64 | vb.memory = "4096" 65 | vb.customize ["modifyvm", :id, "--nested-hw-virt", "on"] 66 | end 67 | 68 | # Enable provisioning with a shell script. Additional provisioners such as 69 | # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the 70 | # documentation for more information about their specific syntax and use. 71 | config.vm.provision "shell", inline: <<-SHELL 72 | set -ex 73 | 74 | echo "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | tee -a /etc/apt/sources.list.d/hashicorp.list 75 | curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add - 76 | 77 | apt-get update 78 | 79 | apt-get install -y vagrant virtualbox 80 | apt-get install -y bridge-utils qemu-kvm virtinst libvirt-daemon-system 81 | apt-get install -y ruby-dev libvirt-dev libssl-dev 82 | apt-get install -y btrfs-progs jq lz4 python3-virtualenv qemu qemu-system qemu-user qemu-user-static xz-utils 83 | 84 | vagrant plugin install vagrant-libvirt 85 | 86 | kvm-ok || true 87 | SHELL 88 | end 89 | -------------------------------------------------------------------------------- /collection/roles/image_aosc/files/mounter.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ACTION="" 4 | HOST_DIRECTORY="" 5 | IMAGE_FILE="" 6 | BASE_IMAGE_FILE="" 7 | 8 | POSITIONAL=() 9 | while [[ $# -gt 0 ]]; do 10 | key="$1" 11 | case ${key} in 12 | -c | --clean) 13 | ACTION="clean" 14 | shift 15 | ;; 16 | -p | --prepare) 17 | ACTION="prepare" 18 | shift 19 | ;; 20 | -m | --mount) 21 | ACTION="mount" 22 | shift 23 | ;; 24 | -u | --umount) 25 | ACTION="umount" 26 | shift 27 | ;; 28 | -hd | --host-directory) 29 | HOST_DIRECTORY="$2" 30 | shift 31 | shift 32 | ;; 33 | -bi | --base-image) 34 | BASE_IMAGE_FILE="$2" 35 | shift 36 | shift 37 | ;; 38 | *) 39 | POSITIONAL+=("$1") 40 | shift 41 | ;; 42 | esac 43 | done 44 | set -- "${POSITIONAL[@]}" # restore positional parameters 45 | 46 | IMAGE_FILE="${HOST_DIRECTORY}/image.img" 47 | MOUNT_BOOTFS_DIRECTORY="${HOST_DIRECTORY}/bootfs" 48 | MOUNT_ROOTFS_DIRECTORY="${HOST_DIRECTORY}/rootfs" 49 | 50 | if [[ -z ${ACTION} ]]; then 51 | echo "error : [-m|--mount] or [-u|--umount] are required or [-p|--prepare] are required" 52 | exit 1 53 | fi 54 | 55 | if [[ -z ${HOST_DIRECTORY} ]]; then 56 | echo "error : [-hd|--host-directory ] is required" 57 | exit 1 58 | fi 59 | 60 | echo "---- execute action [$ACTION] in ${HOST_DIRECTORY} ----" 61 | 62 | set -x -e -o pipefail 63 | 64 | function execute_mount() { 65 | echo "mount ${MOUNT_BOOTFS_DIRECTORY}" 66 | local offsetStartOne 67 | local offsetStartOneInBytes 68 | local offsetEndOne 69 | local sizeOneInByte 70 | local deviceOne 71 | mkdir -p "${MOUNT_BOOTFS_DIRECTORY}" 72 | offsetStartOne=$(fdisk -l "${IMAGE_FILE}" -o Start | tail -n-2 | head -n+1) 73 | offsetEndOne=$(fdisk -l "${IMAGE_FILE}" -o End | tail -n-2 | head -n+1) 74 | offsetStartOneInBytes=$((offsetStartOne * 512)) 75 | sizeOneInByte=$(((offsetEndOne-offsetStartOne) * 512)) 76 | echo "$offsetStartOneInBytes $sizeOneInByte" 77 | 78 | deviceOne=$(losetup -f "${IMAGE_FILE}" -o ${offsetStartOneInBytes} --sizelimit ${sizeOneInByte} --show) 79 | mount -t auto "${deviceOne}" "${MOUNT_BOOTFS_DIRECTORY}" 80 | 81 | echo "mount ${MOUNT_ROOTFS_DIRECTORY}" 82 | local offsetStartTwo 83 | local offsetStartTwoInBytes 84 | local deviceTwo 85 | mkdir -p "${MOUNT_ROOTFS_DIRECTORY}" 86 | offsetStartTwo=$(fdisk -l "${IMAGE_FILE}" -o Start | tail -n-1 | head -n+1) 87 | offsetEndTwo=$(fdisk -l "${IMAGE_FILE}" -o End | tail -n-1 | head -n+1) 88 | offsetStartTwoInBytes=$((offsetStartTwo * 512)) 89 | sizeTwoInByte=$(((offsetEndTwo-offsetStartTwo) * 512)) 90 | echo "$offsetStartTwoInBytes $sizeTwoInByte" 91 | 92 | deviceTwo=$(losetup -f "${IMAGE_FILE}" --offset ${offsetStartTwoInBytes} --sizelimit ${sizeTwoInByte} --show) 93 | sleep 1 94 | sync 95 | mount -t auto "${deviceTwo}" "${MOUNT_ROOTFS_DIRECTORY}" 96 | 97 | sync 98 | } 99 | 100 | function execute_umount() { 101 | sync 102 | mount | grep "${MOUNT_BOOTFS_DIRECTORY}" && umount "${MOUNT_BOOTFS_DIRECTORY}" || echo "mount [${MOUNT_BOOTFS_DIRECTORY}] missing" 103 | sync 104 | mount | grep "${MOUNT_ROOTFS_DIRECTORY}" && umount "${MOUNT_ROOTFS_DIRECTORY}" || echo "mount [${MOUNT_ROOTFS_DIRECTORY}] missing" 105 | sync 106 | 107 | devices=$(losetup -l -O BACK-FILE,NAME -n -J | jq -r ".loopdevices | .[] | select(.\"back-file\" | contains(\"${IMAGE_FILE}\") ) | .name") 108 | declare -a arr=(${devices}) 109 | for deviceOne in "${arr[@]}"; do 110 | losetup -d "${deviceOne}" 111 | sync 112 | done 113 | } 114 | 115 | function execute_prepare() { 116 | if [[ -z ${BASE_IMAGE_FILE} ]]; then 117 | echo "error : [-bi|--base-image ] is required" 118 | exit 1 119 | fi 120 | mkdir -p "${HOST_DIRECTORY}" 121 | if [[ ! -f "IMAGE_FILE" ]]; then 122 | cp "${BASE_IMAGE_FILE}" "${IMAGE_FILE}" 123 | fi 124 | } 125 | 126 | function execute_clean() { 127 | rm -Rf "${HOST_DIRECTORY}" 128 | } 129 | 130 | case ${ACTION} in 131 | mount) 132 | execute_umount 133 | execute_mount 134 | ;; 135 | umount) 136 | execute_umount 137 | ;; 138 | prepare) 139 | execute_umount 140 | execute_prepare 141 | execute_mount 142 | ;; 143 | clean) 144 | execute_umount 145 | execute_clean 146 | ;; 147 | esac 148 | -------------------------------------------------------------------------------- /collection/roles/image_armbian/tasks/patch-host-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Clean host directory {{ image_armbian_host_directory }}" 4 | delegate_to: localhost 5 | become: true 6 | when: image_armbian_clean_working_directory 7 | ansible.builtin.script: "files/mounter.sh -c -hd {{ image_armbian_host_directory }}" 8 | args: 9 | executable: /bin/bash 10 | changed_when: false 11 | 12 | - name: "Prepare host directory {{ image_armbian_host_directory }}" 13 | delegate_to: localhost 14 | become: true 15 | when: image_armbian_clean_working_directory 16 | ansible.builtin.script: "files/mounter.sh -p -hd {{ image_armbian_host_directory }} -bi {{ image_armbian_base_image_path }}" 17 | args: 18 | executable: /bin/bash 19 | changed_when: false 20 | 21 | - name: Customize the armbian image 22 | block: 23 | - name: Mount host image 24 | delegate_to: localhost 25 | become: true 26 | ansible.builtin.script: "files/mounter.sh -m -hd {{ image_armbian_host_directory }}" 27 | args: 28 | executable: /bin/bash 29 | changed_when: false 30 | register: mount_image 31 | - name: Debug mount_image 32 | ansible.builtin.debug: 33 | var: mount_image 34 | - name: Set armbian_first_run.txt 35 | delegate_to: localhost 36 | become: true 37 | ansible.builtin.template: 38 | src: armbian_first_run.txt 39 | dest: "{{ image_armbian_host_directory }}/rootfs/boot/armbian_first_run.txt" 40 | mode: 0644 41 | changed_when: false 42 | register: armbian_first_run 43 | - name: Debug armbian_first_run 44 | ansible.builtin.debug: 45 | var: armbian_first_run 46 | - name: Customize the host image 47 | delegate_to: localhost 48 | become: true 49 | ansible.builtin.script: "files/customizer.sh -rf {{ image_armbian_host_directory }}/rootfs -cu {{ image_armbian_username }}" 50 | args: 51 | executable: /bin/bash 52 | changed_when: false 53 | register: customize_image 54 | - name: Debug customize_image 55 | ansible.builtin.debug: 56 | var: customize_image 57 | - name: Copy ssh public key 58 | delegate_to: localhost 59 | become: true 60 | ansible.builtin.template: 61 | src: "{{ image_armbian_ssh_pub_key_path }}" 62 | dest: "{{ image_armbian_host_directory }}/rootfs/home/{{ image_armbian_username }}/.ssh/authorized_keys" 63 | force: true 64 | owner: "1000" 65 | group: "1000" 66 | mode: "0640" 67 | changed_when: false 68 | register: copy_ssh_pub_key 69 | - name: Debug copy_ssh_pub_key 70 | ansible.builtin.debug: 71 | var: copy_ssh_pub_key 72 | - name: Fix /etc/hostname 73 | delegate_to: localhost 74 | become: true 75 | ansible.builtin.copy: 76 | content: "{{ inventory_hostname }}" 77 | dest: "{{ image_armbian_host_directory }}/rootfs/etc/hostname" 78 | mode: 0644 79 | changed_when: false 80 | register: fix_hostname 81 | - name: Debug fix_hostname 82 | ansible.builtin.debug: 83 | var: fix_hostname 84 | - name: Fix /etc/hosts 85 | delegate_to: localhost 86 | become: true 87 | ansible.builtin.replace: 88 | path: "{{ image_armbian_host_directory }}/rootfs/etc/hosts" 89 | regexp: rock64 90 | replace: "{{ inventory_hostname }}" 91 | changed_when: false 92 | register: fix_hosts 93 | - name: Debug fix_hosts 94 | ansible.builtin.debug: 95 | var: fix_hosts 96 | - name: "Set custom MAC address" 97 | delegate_to: localhost 98 | when: homecloud_node_mac | length > 0 99 | become: true 100 | ansible.builtin.replace: 101 | path: "{{ image_armbian_host_directory }}/rootfs/lib/armbian/armbian-firstrun-config" 102 | regexp: "\\$\\{FIXED_IP_SETTINGS\\}$" 103 | replace: "${FIXED_IP_SETTINGS} ethernet.cloned-mac-address \"{{ homecloud_node_mac }}\" -ethernet.mac-address \"\"" 104 | changed_when: false 105 | always: 106 | - name: Unmount host image 107 | delegate_to: localhost 108 | become: true 109 | ansible.builtin.script: "files/mounter.sh -u -hd {{ image_armbian_host_directory }}" 110 | args: 111 | executable: /bin/bash 112 | changed_when: false 113 | register: umount_image 114 | - name: Debug umount_image 115 | ansible.builtin.debug: 116 | var: umount_image 117 | -------------------------------------------------------------------------------- /collection/roles/image_ubuntu_raspi/tasks/patch-host-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Clean host directory {{ image_ubuntu_raspi_host_directory }}" 4 | delegate_to: localhost 5 | become: true 6 | when: image_ubuntu_raspi_clean_working_directory 7 | ansible.builtin.script: "files/mounter.sh -c -hd {{ image_ubuntu_raspi_host_directory }}" 8 | args: 9 | executable: /bin/bash 10 | changed_when: false 11 | 12 | - name: "Prepare host directory {{ image_ubuntu_raspi_host_directory }}" 13 | delegate_to: localhost 14 | become: true 15 | when: image_ubuntu_raspi_clean_working_directory 16 | ansible.builtin.script: "files/mounter.sh -p -hd {{ image_ubuntu_raspi_host_directory }} -bi {{ image_ubuntu_raspi_base_image_path }}" 17 | args: 18 | executable: /bin/bash 19 | changed_when: false 20 | 21 | - name: "Customize the ubuntu image" 22 | block: 23 | - name: "Mount host image" 24 | delegate_to: localhost 25 | become: true 26 | ansible.builtin.script: "files/mounter.sh -m -hd {{ image_ubuntu_raspi_host_directory }}" 27 | args: 28 | executable: /bin/bash 29 | changed_when: false 30 | register: mount_image 31 | - name: "Set network-config" 32 | delegate_to: localhost 33 | become: true 34 | ansible.builtin.template: 35 | src: network-config 36 | dest: "{{ image_ubuntu_raspi_host_directory }}/bootfs/network-config" 37 | mode: 0644 38 | changed_when: false 39 | - name: "Read cmdline.txt" 40 | delegate_to: localhost 41 | become: true 42 | ansible.builtin.set_fact: 43 | file_cmdline_txt_content: "{{ lookup('file', image_ubuntu_raspi_host_directory~'/bootfs/cmdline.txt') }}" 44 | changed_when: false 45 | - name: "Write cmdline.txt" 46 | delegate_to: localhost 47 | become: true 48 | ansible.builtin.copy: 49 | content: "cgroup_enable=cpuset cgroup_enable=memory cgroup_memory=1 {{ file_cmdline_txt_content | trim }}" 50 | dest: "{{ image_ubuntu_raspi_host_directory }}/bootfs/cmdline.txt" 51 | mode: 0644 52 | changed_when: false 53 | register: write_cmdline 54 | - name: "Change username" 55 | delegate_to: localhost 56 | become: true 57 | ansible.builtin.lineinfile: 58 | path: "{{ image_ubuntu_raspi_host_directory }}/rootfs/etc/cloud/cloud.cfg" 59 | regexp: "^ name: ubuntu" 60 | line: " name: {{ image_ubuntu_raspi_username }}" 61 | changed_when: false 62 | - name: "Change gecos" 63 | delegate_to: localhost 64 | become: true 65 | ansible.builtin.lineinfile: 66 | path: "{{ image_ubuntu_raspi_host_directory }}/rootfs/etc/cloud/cloud.cfg" 67 | regexp: "^ gecos: Ubuntu" 68 | line: " gecos: {{ image_ubuntu_raspi_username }}" 69 | changed_when: false 70 | - name: "Configure set-passwords" 71 | delegate_to: localhost 72 | become: true 73 | ansible.builtin.lineinfile: 74 | path: "{{ image_ubuntu_raspi_host_directory }}/rootfs/etc/cloud/cloud.cfg" 75 | insertafter: "^ - default" 76 | line: | 77 | # set-passwords 78 | ssh_pwauth: no 79 | chpasswd: 80 | expire: false 81 | list: 82 | - {{ image_ubuntu_raspi_username }}:RANDOM 83 | changed_when: false 84 | - name: "Configure ssh keys" 85 | delegate_to: localhost 86 | become: true 87 | ansible.builtin.lineinfile: 88 | path: "{{ image_ubuntu_raspi_host_directory }}/rootfs/etc/cloud/cloud.cfg" 89 | insertafter: "^ lock_passwd: True" 90 | line: | 91 | # ssh_authorized_keys 92 | ssh_authorized_keys: 93 | - {{ lookup('file', image_ubuntu_raspi_ssh_pub_key_path) | trim }} 94 | changed_when: false 95 | - name: "Configure hostname" 96 | delegate_to: localhost 97 | become: true 98 | ansible.builtin.lineinfile: 99 | path: "{{ image_ubuntu_raspi_host_directory }}/rootfs/etc/cloud/cloud.cfg" 100 | insertafter: "^preserve_hostname: false" 101 | line: "hostname: {{ inventory_hostname }}" 102 | changed_when: false 103 | always: 104 | - name: "Unmount host image" 105 | delegate_to: localhost 106 | become: true 107 | ansible.builtin.script: "files/mounter.sh -u -hd {{ image_ubuntu_raspi_host_directory }}" 108 | args: 109 | executable: /bin/bash 110 | changed_when: false 111 | register: umount_image 112 | # - name: Debug umount_image 113 | # ansible.builtin.debug: 114 | # var: umount_image 115 | -------------------------------------------------------------------------------- /molecule/resources/tasks/verify-k3s_longhorn.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Verify Longhorn UI 4 | block: 5 | - name: Configure endpoint verifier 6 | ansible.builtin.set_fact: 7 | k8s_namespace: longhorn-system 8 | k8s_deploy_name: longhorn-ui 9 | k8s_service_name: longhorn-frontend-external 10 | k8s_url_protocol: http 11 | k8s_expected_status: HTTP/1.1 200 OK 12 | - name: Include task 13 | ansible.builtin.include_tasks: verify-k8s-http-endpoint.yml 14 | 15 | - name: Prepare testing environment 16 | block: 17 | - name: Create working directory 18 | ansible.builtin.file: 19 | dest: /tmp/resources/deployments 20 | state: directory 21 | mode: '755' 22 | - name: Clean resources 23 | become: true 24 | ansible.builtin.command: kubectl delete \ 25 | -f /tmp/resources/test-longhorn-rwo-step2.yml \ 26 | -f /tmp/resources/test-longhorn-rwo-step1.yml \ 27 | -f /tmp/resources/test-longhorn-rwx-step3.yml \ 28 | -f /tmp/resources/test-longhorn-rwx-step2.yml \ 29 | -f /tmp/resources/test-longhorn-rwx-step1.yml 30 | changed_when: false 31 | failed_when: false 32 | - name: Copy deployment manifests 33 | ansible.builtin.copy: 34 | src: ../deployments/ 35 | dest: /tmp/resources/ 36 | mode: '644' 37 | 38 | - name: Verify PersistentVolumeClaim - ReadWriteOnce 39 | block: 40 | - name: ReadWriteOnce - Apply step1 41 | become: true 42 | ansible.builtin.command: kubectl apply -f /tmp/resources/test-longhorn-rwo-step1.yml 43 | changed_when: false 44 | - name: ReadWriteOnce - Wait for step1 45 | become: true 46 | ansible.builtin.command: kubectl get pod test-longhorn-rwo-step1 -o jsonpath='{.status.phase}' 47 | changed_when: false 48 | register: kubcetl_result 49 | delay: 2 50 | retries: 60 51 | until: "'Succeeded' in kubcetl_result.stdout" 52 | - name: ReadWriteOnce - Apply step2 53 | become: true 54 | ansible.builtin.command: kubectl apply -f /tmp/resources/test-longhorn-rwo-step2.yml 55 | changed_when: false 56 | - name: ReadWriteOnce - Wait for step2 57 | become: true 58 | ansible.builtin.command: kubectl get pod test-longhorn-rwo-step2 -o jsonpath='{.status.phase}' 59 | changed_when: false 60 | register: kubcetl_result 61 | delay: 2 62 | retries: 60 63 | until: "'Succeeded' in kubcetl_result.stdout" 64 | - name: ReadWriteOnce - Get step2 output 65 | become: true 66 | ansible.builtin.command: kubectl logs test-longhorn-rwo-step2 67 | changed_when: false 68 | register: step2_result 69 | - name: ReadWriteOnce - Verify logs 70 | ansible.builtin.assert: 71 | quiet: true 72 | that: 73 | - step2_result.stdout_lines[0] | trim == 'hello from step1' 74 | 75 | - name: Verify PersistentVolumeClaim - ReadWriteMany 76 | block: 77 | - name: ReadWriteMany - Apply step1 78 | become: true 79 | ansible.builtin.command: kubectl apply -f /tmp/resources/test-longhorn-rwx-step1.yml 80 | changed_when: false 81 | - name: ReadWriteMany - Wait for step1 82 | become: true 83 | ansible.builtin.command: kubectl get pod test-longhorn-rwx-step1 -o jsonpath='{.status.phase}' 84 | changed_when: false 85 | register: kubcetl_result 86 | delay: 2 87 | retries: 60 88 | until: "'Succeeded' in kubcetl_result.stdout" 89 | - name: ReadWriteMany - Apply step2 90 | become: true 91 | ansible.builtin.command: kubectl apply -f /tmp/resources/test-longhorn-rwx-step2.yml 92 | changed_when: false 93 | - name: ReadWriteMany - Wait for step2 94 | become: true 95 | ansible.builtin.command: kubectl get pod test-longhorn-rwx-step2 -o jsonpath='{.status.phase}' 96 | changed_when: false 97 | register: kubcetl_result 98 | delay: 2 99 | retries: 60 100 | until: "'Succeeded' in kubcetl_result.stdout" 101 | - name: ReadWriteMany - Apply step3 102 | become: true 103 | ansible.builtin.command: kubectl apply -f /tmp/resources/test-longhorn-rwx-step3.yml 104 | changed_when: false 105 | - name: ReadWriteMany - Wait for step3 106 | become: true 107 | ansible.builtin.command: kubectl get pod test-longhorn-rwx-step3 -o jsonpath='{.status.phase}' 108 | changed_when: false 109 | register: kubcetl_result 110 | delay: 2 111 | retries: 60 112 | until: "'Succeeded' in kubcetl_result.stdout" 113 | - name: ReadWriteMany - Get step3 output 114 | become: true 115 | ansible.builtin.command: kubectl logs test-longhorn-rwx-step3 116 | changed_when: false 117 | register: step3_result 118 | - name: ReadWriteMany - Verify logs 119 | ansible.builtin.assert: 120 | quiet: true 121 | that: 122 | - step3_result.stdout_lines[0] | trim == 'hello from step1' 123 | - step3_result.stdout_lines[1] | trim == 'hello from step2' 124 | -------------------------------------------------------------------------------- /collection/roles/k3s_dnas/templates/dnas-share.kustomize.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: dnas 6 | --- 7 | apiVersion: v1 8 | kind: Service 9 | metadata: 10 | labels: 11 | app: dnas-share 12 | name: dnas-share 13 | spec: 14 | selector: 15 | app: dnas-share 16 | type: ClusterIP 17 | ports: 18 | - name: netbios-ss 19 | port: 139 20 | protocol: TCP 21 | targetPort: netbios-ss 22 | - name: microsoft-ds 23 | port: 445 24 | protocol: TCP 25 | targetPort: microsoft-ds 26 | - port: 2049 27 | name: tcp-nfs 28 | protocol: TCP 29 | - port: 2049 30 | name: udp-nfs 31 | protocol: UDP 32 | - port: 111 33 | name: tcp-111 34 | protocol: TCP 35 | - port: 111 36 | name: udp-111 37 | protocol: UDP 38 | - port: 32765 39 | name: tcp-32765 40 | protocol: TCP 41 | - port: 32765 42 | name: udp-32765 43 | protocol: UDP 44 | - port: 32767 45 | name: tcp-32767 46 | protocol: TCP 47 | - port: 32767 48 | name: udp-32767 49 | protocol: UDP 50 | --- 51 | apiVersion: apps/v1 52 | kind: Deployment 53 | metadata: 54 | labels: 55 | app: dnas-share 56 | name: dnas-share 57 | spec: 58 | replicas: 1 59 | selector: 60 | matchLabels: 61 | app: dnas-share 62 | strategy: 63 | type: Recreate 64 | template: 65 | metadata: 66 | labels: 67 | app: dnas-share 68 | spec: 69 | affinity: 70 | nodeAffinity: 71 | preferredDuringSchedulingIgnoredDuringExecution: 72 | - preference: 73 | matchExpressions: 74 | - key: "dnas.morin.io/node" 75 | operator: In 76 | values: 77 | - "true" 78 | - key: "dnas.morin.io/hostname" 79 | operator: In 80 | values: 81 | - "{{ item.ansible_hostname }}" 82 | weight: 1 83 | requiredDuringSchedulingIgnoredDuringExecution: 84 | nodeSelectorTerms: 85 | - matchExpressions: 86 | - key: "dnas.morin.io/node" 87 | operator: In 88 | values: 89 | - "true" 90 | - key: "dnas.morin.io/hostname" 91 | operator: In 92 | values: 93 | - "{{ item.ansible_hostname }}" 94 | containers: 95 | - image: klutchell/nfs-server 96 | name: nfs 97 | env: 98 | - name: NFS_EXPORT_0 99 | value: "/{{ dnas_username }} *(fsid=0,rw,sync,no_root_squash,no_subtree_check,insecure)" 100 | volumeMounts: 101 | - name: "{{ dnas_username }}" 102 | mountPath: "/{{ dnas_username }}" 103 | readOnly: false 104 | - name: modules 105 | mountPath: /lib/modules 106 | readOnly: true 107 | ports: 108 | - containerPort: 2049 109 | hostPort: 2049 110 | protocol: TCP 111 | - containerPort: 2049 112 | hostPort: 2049 113 | protocol: UDP 114 | - containerPort: 111 115 | hostPort: 111 116 | protocol: TCP 117 | - containerPort: 111 118 | hostPort: 111 119 | protocol: UDP 120 | - containerPort: 32765 121 | hostPort: 32765 122 | protocol: TCP 123 | - containerPort: 32765 124 | hostPort: 32765 125 | protocol: UDP 126 | - containerPort: 32767 127 | hostPort: 32767 128 | protocol: TCP 129 | - containerPort: 32767 130 | hostPort: 32767 131 | protocol: UDP 132 | securityContext: 133 | privileged: true 134 | capabilities: 135 | add: 136 | - SYS_ADMIN 137 | - SYS_MODULE 138 | - image: dperson/samba 139 | name: samba 140 | env: 141 | - name: USERID 142 | value: "{{ dnas_uid }}" 143 | - name: GROUPID 144 | value: "{{ dnas_gid }}" 145 | - name: USER 146 | value: "{{ dnas_username }};{{ dnas_password }}" 147 | - name: SHARE 148 | value: "{{ dnas_username }};/shares/{{ dnas_username }};no;no;no;{{ dnas_username }}" 149 | ports: 150 | - containerPort: 139 151 | name: netbios-ss 152 | protocol: TCP 153 | hostPort: 139 154 | - containerPort: 445 155 | name: microsoft-ds 156 | protocol: TCP 157 | hostPort: 445 158 | volumeMounts: 159 | - name: "{{ dnas_username }}" 160 | mountPath: "/shares/{{ dnas_username }}" 161 | readOnly: false 162 | volumes: 163 | - name: "{{ dnas_username }}" 164 | hostPath: 165 | path: "{{ dnas_mount_where }}" 166 | type: Directory 167 | - name: modules 168 | hostPath: 169 | path: /lib/modules 170 | type: Directory 171 | --- 172 | --------------------------------------------------------------------------------