├── .gitignore ├── ActiveDirectory └── readme.md ├── Ansible └── readme.md ├── Asustor ├── Docker │ ├── configs │ │ ├── alertmanager │ │ │ └── config.yml │ │ ├── grafana │ │ │ └── provisioning │ │ │ │ ├── dashboards │ │ │ │ ├── dashboard.yml │ │ │ │ ├── docker_containers.json │ │ │ │ ├── docker_host.json │ │ │ │ ├── monitor_services.json │ │ │ │ ├── pihole.json │ │ │ │ └── traefik.json │ │ │ │ └── datasources │ │ │ │ └── datasource.yml │ │ ├── homepage │ │ │ └── config │ │ │ │ ├── bookmarks.yaml │ │ │ │ ├── custom.css │ │ │ │ ├── custom.js │ │ │ │ ├── docker.yaml │ │ │ │ ├── kubernetes.yaml │ │ │ │ ├── services.yaml │ │ │ │ ├── settings.yaml │ │ │ │ └── widgets.yaml │ │ ├── pihole │ │ │ └── dnsmasq.d │ │ │ │ └── 07-dhcp-options │ │ ├── prometheus │ │ │ ├── alert.rules │ │ │ └── prometheus.yml │ │ ├── readme.md │ │ └── traefik │ │ │ ├── acme │ │ │ └── acme.json │ │ │ └── rules │ │ │ ├── asusstornas1-rtr.yaml │ │ │ ├── homelab-rtr.yaml │ │ │ ├── middlewares-buffer.yaml │ │ │ ├── middlewares-chain-no-auth.yaml │ │ │ ├── middlewares-rate.limit.yaml │ │ │ ├── middlewares-security-headers.yaml │ │ │ ├── nassell-rtr.yaml │ │ │ └── tls-opts.yml │ ├── data │ │ └── readme.md │ └── readme.md ├── docker-compose │ ├── .env │ ├── compose │ │ ├── cloud │ │ │ └── nextcloud.yaml │ │ ├── coding │ │ │ └── code-server.yaml │ │ ├── core │ │ │ ├── clamav.yaml │ │ │ ├── dozzle.yaml │ │ │ ├── homepage.yaml │ │ │ └── portainer.yaml │ │ ├── database │ │ │ ├── mariadb.yaml │ │ │ └── redis.yaml │ │ ├── monitoring │ │ │ ├── alertmanager.yaml │ │ │ ├── cadvisor.yaml │ │ │ ├── grafana.yaml │ │ │ ├── mariadbexporter.yaml │ │ │ ├── nextcloudexporter.yaml │ │ │ ├── nodeexporter.yaml │ │ │ ├── piholeexporter.yaml │ │ │ ├── prometheus.yaml │ │ │ ├── pushgateway.yaml │ │ │ └── redisexporter.yaml │ │ ├── network │ │ │ ├── cloudflared.yaml │ │ │ ├── dhcphelper.yaml │ │ │ ├── dockovpn.yaml │ │ │ ├── nginxproxymanager.yaml │ │ │ ├── pihole.yaml │ │ │ └── traefik.yaml │ │ └── tools │ │ │ ├── boinc.yaml │ │ │ ├── collabora.yaml │ │ │ ├── drawio.yaml │ │ │ ├── it-tools.yaml │ │ │ └── stirling-pdf.yaml │ ├── dhcp-helper │ │ └── dhcp-helper.dockerfile │ ├── docker-compose.yaml │ ├── readme.md │ └── secrets │ │ └── readme.md └── readme.md ├── CertbotCloudflare └── readme.md ├── Docker ├── NvidiaGPU │ └── readme.md ├── docker │ ├── .env │ ├── compose │ │ ├── databases │ │ │ ├── mariadb.yaml │ │ │ ├── postgresql.yaml │ │ │ └── valkey.yaml │ │ ├── management │ │ │ └── portainer.yaml │ │ ├── monitoring │ │ │ ├── alertmanager.yaml │ │ │ ├── cadvisor.yaml │ │ │ ├── dozzle.yaml │ │ │ ├── grafana.yaml │ │ │ ├── mariadb-exporter.yaml │ │ │ ├── node-exporter.yaml │ │ │ ├── pbs1-exporter.yaml │ │ │ ├── postgres-exporter.yaml │ │ │ ├── prometheus.yaml │ │ │ └── redis-exporter.yaml │ │ ├── network │ │ │ └── traefik.yaml │ │ ├── security │ │ │ ├── clamav.yaml │ │ │ ├── crowdsec.yaml │ │ │ ├── generate-indexer-certs.yml │ │ │ └── wazuh.yaml │ │ ├── storage │ │ │ └── minio.yaml │ │ └── tools │ │ │ ├── bookstack.yaml │ │ │ ├── collabora.yaml │ │ │ ├── cyberchef.yaml │ │ │ ├── drawio.yaml │ │ │ ├── exalidraw.yaml │ │ │ ├── gitlab.yaml │ │ │ ├── homepage.yaml │ │ │ ├── immich-ml.yaml │ │ │ ├── immich.yaml │ │ │ ├── it-tools.yaml │ │ │ ├── local-ai.yaml │ │ │ ├── netbox.yaml │ │ │ ├── nextcloud.yaml │ │ │ ├── roundcube.yaml │ │ │ ├── shlink.yaml │ │ │ ├── stirling-pdf.yaml │ │ │ └── talk.yaml │ ├── docker-compose.yaml │ └── readme.md ├── readme.md └── volumes │ └── DockerProd1 │ └── configs │ ├── alertmanager │ ├── alertmanager.yml │ └── config.yml │ ├── prometheus │ ├── alert-rules.yml │ └── prometheus.yml │ ├── traefik │ ├── acme │ │ └── acme.json │ └── rules │ │ ├── middlewares.yaml │ │ ├── routers.yaml │ │ ├── services.yaml │ │ └── tls-opts.yaml │ └── wazuh │ ├── certs.yml │ ├── wazuh_cluster │ └── wazuh_manager.conf │ ├── wazuh_dashboard │ ├── opensearch_dashboards.yml │ └── wazuh.yml │ ├── wazuh_indexer │ ├── internal_users.yml │ └── wazuh.indexer.yml │ └── wazuh_indexer_ssl_certs │ ├── admin-key.pem │ ├── admin.pem │ ├── root-ca.key │ ├── root-ca.pem │ ├── wazuh.dashboard-key.pem │ ├── wazuh.dashboard.pem │ ├── wazuh.indexer-key.pem │ └── wazuh.indexer.pem ├── Images ├── .gitkeep ├── Proxmox Network diagram.png └── Proxmox Storage diagram.png ├── Kubernetes ├── ActualBudget │ ├── readme.md │ └── yamls │ │ └── actual-budget.yaml ├── Asustornas1 │ ├── readme.md │ └── yamls │ │ └── ig-asustornas1.yaml ├── Authentik │ ├── readme.md │ └── yamls │ │ ├── ig-authentik-forwardauth.yaml │ │ ├── ig-authentik.yaml │ │ └── middleware-authentik.yaml ├── Automation │ ├── ArgoCD │ │ ├── readme.md │ │ └── yamls │ │ │ ├── argocd-redis-auth.yaml │ │ │ ├── argocd-values.yaml │ │ │ └── ig-argocd.yaml │ ├── ExternalSecrets │ │ ├── readme.md │ │ └── yamls │ │ │ ├── cluster-external-secret-example.yaml │ │ │ ├── cluster-secret-store.yaml │ │ │ └── external-secret-example.yaml │ ├── Gitlab │ │ ├── readme.md │ │ └── yamls │ │ │ ├── gitlab-values.yaml │ │ │ └── ig-gitlab.yaml │ ├── HashicorpVault │ │ ├── confs │ │ │ └── cert.config │ │ ├── readme.md │ │ └── yamls │ │ │ ├── hashicorp-vault-ha-values.yaml │ │ │ ├── hashicorp-vault-values.yaml │ │ │ ├── ig-hashicorp-vault-ha.yaml │ │ │ └── ig-hashicorp-vault.yaml │ └── Semaphore │ │ ├── readme.md │ │ └── yamls │ │ ├── ig-semaphore.yaml │ │ └── semaphoreui.yaml ├── Bookstack │ ├── readme.md │ └── yamls │ │ └── bookstack-deploy.yaml ├── Cert-manager │ ├── readme.md │ └── yamls │ │ ├── cert-manager-issuer-cfdns01.yaml │ │ └── cert.yaml ├── ClamAV │ └── readme.md ├── Cloudflare-Operator │ ├── readme.md │ └── yamls │ │ ├── 01-cloudflare-secret.yaml │ │ ├── 02-cloudflareaccount.yaml │ │ ├── 03-dynamic-IP.yaml │ │ ├── 04a-dns-a-dynamic-ip.yaml │ │ ├── 04b-dns-a-fixed-ip.yaml │ │ ├── 04c-dns-cname.yaml │ │ ├── 05-prometheus-pod-monitor.yaml │ │ └── 06-grafana-dashboard.json ├── Cluster │ ├── 01-Prepare-Machines │ │ ├── configs │ │ │ ├── 20auto-upgrades │ │ │ ├── 50-default.conf │ │ │ ├── 50unattended-upgrades │ │ │ ├── main.cf │ │ │ ├── sasl_passwd │ │ │ └── smtp_header_checks file │ │ └── readme.md │ ├── 02-External-Etcd │ │ ├── configs │ │ │ ├── etcd.service │ │ │ ├── etcd1.conf │ │ │ ├── etcd2.conf │ │ │ └── etcd3.conf │ │ └── readme.md │ ├── 03-High-Availability │ │ ├── configs │ │ │ ├── check_apiserver.sh │ │ │ ├── haproxy.cfg │ │ │ ├── keepalived_backup.conf │ │ │ ├── keepalived_master.conf │ │ │ └── sysctl.conf │ │ └── readme.md │ ├── 04-Kubernetes │ │ ├── configs │ │ │ ├── 99-kubernetes-cri.conf │ │ │ ├── config.toml │ │ │ ├── containerd.conf │ │ │ ├── custom-resources.yaml │ │ │ └── kubeadm-config.yaml │ │ └── readme.md │ ├── 04-Microk8s │ │ ├── readme.md │ │ └── yamls │ │ │ └── microk8s.yaml │ └── readme.md ├── Coding │ └── CodeServer │ │ ├── readme.md │ │ └── yamls │ │ └── code-server-deploy.yaml ├── Collabora │ ├── readme.md │ └── yamls │ │ ├── collabora-values.yaml │ │ └── ig-collabora.yaml ├── CommonCommands │ └── readme.md ├── Composecraft │ ├── readme.md │ └── yamls │ │ └── composecraft.yaml ├── Computing │ └── Boinc │ │ ├── readme.md │ │ └── yamls │ │ └── boinc-deploy.yaml ├── Crowdsec │ ├── readme.md │ └── yamls │ │ ├── crowdsec-traefik-bouncer-values.yaml │ │ └── crowdsec-values.yaml ├── Cyberchef │ ├── readme.md │ └── yamls │ │ └── ig-cyberchef.yaml ├── Dashboard │ ├── readme.md │ └── yamls │ │ ├── ig-kubernetes-dashboard.yaml │ │ ├── k8dashcrb.yaml │ │ ├── k8dashsa.yaml │ │ ├── k8sdash.yaml │ │ └── recommended.yaml ├── Database │ ├── CloudBeaver │ │ ├── readme.md │ │ └── yamls │ │ │ └── cloudbeaver.yaml │ ├── Couchdb │ │ ├── readme.md │ │ └── yamls │ │ │ └── couchdb-traefik.yaml │ ├── Dbgate │ │ ├── readme.md │ │ └── yamls │ │ │ └── dbgate.yaml │ ├── Mariadb │ │ ├── cnf │ │ │ └── proxysql-mariadb.cnf │ │ ├── readme.md │ │ └── yamls │ │ │ ├── ig-mariadb.yaml │ │ │ ├── phpmyadmin-mariadb.yaml │ │ │ └── proxysql-mariadb.yaml │ ├── Memcached │ │ ├── readme.md │ │ └── yamls │ │ │ └── ig-memcached.yaml │ ├── Mongodb │ │ └── readme.md │ ├── Mysql │ │ ├── cnf │ │ │ └── proxysql-mysql.cnf │ │ ├── readme.md │ │ └── yamls │ │ │ ├── ig-mysql.yaml │ │ │ ├── phpmyadmin-mysql.yaml │ │ │ └── proxysql-mysql.yaml │ ├── Postgresql │ │ ├── readme.md │ │ └── yamls │ │ │ ├── ig-postgresql.yaml │ │ │ └── pgadmin.yaml │ ├── PostgresqlHA │ │ ├── readme.md │ │ └── yamls │ │ │ ├── ig-postgresql.yaml │ │ │ └── pgadmin.yaml │ ├── Redis │ │ ├── readme.md │ │ └── yamls │ │ │ └── ig-redis.yaml │ └── readme.md ├── Descheduler │ ├── readme.md │ └── yamls │ │ └── descheduler-values.yaml ├── Drawio │ ├── readme.md │ └── yamls │ │ └── drawio.yaml ├── FireflyIII │ ├── readme.md │ └── yamls │ │ ├── fireflyIII.yaml │ │ └── fireflyIIIimporter.yaml ├── FreshRSS │ ├── readme.md │ └── yamls │ │ └── freshrss.yaml ├── Groundcover │ ├── readme.md │ └── yamls │ │ └── groundcover-values.yaml ├── Heimdall-dashboard │ └── readme.md ├── Helm │ └── readme.md ├── Homepage │ ├── images │ │ └── gethomepage.png │ ├── readme.md │ └── yamls │ │ ├── gethomepage-values.yaml │ │ ├── homepage-cm.yaml │ │ ├── homepage.yaml │ │ └── ig-homepage.yaml ├── Homer │ ├── assets │ │ ├── config.yml │ │ └── tools │ │ │ ├── datree.png │ │ │ ├── dell.png │ │ │ ├── grafana.svg │ │ │ ├── haproxy.png │ │ │ ├── kubernetesdashboard.svg │ │ │ ├── longhorn.png │ │ │ ├── portainer.svg │ │ │ ├── prometheus.png │ │ │ ├── proxmox.png │ │ │ ├── traefik.svg │ │ │ └── uptimekuma.svg │ ├── readme.md │ └── yaml │ │ └── homer.yaml ├── Hosting │ ├── readme.md │ └── yamls │ │ └── ig-hosting.yaml ├── Intel-GPU │ └── readme.md ├── It-tools │ ├── readme.md │ └── yamls │ │ └── it-tools.yaml ├── K8sBackup │ └── Velero │ │ ├── readme.md │ │ └── yamls │ │ └── velero-values.yaml ├── Karakeep │ ├── readme.md │ └── yamls │ │ └── karakeep.yaml ├── Kubectl │ └── readme.md ├── Kubescape │ └── readme.md ├── Kubetail │ ├── readme.md │ └── yamls │ │ └── ig-kubetail.yaml ├── Kured │ └── readme.md ├── LocalAI │ ├── readme.md │ └── yamls │ │ └── ig-localai.yaml ├── Media-Server │ ├── readme.md │ └── yamls │ │ ├── media-server-deployment.yaml │ │ └── media-server-jellyfin-deployment.yaml ├── Metallb │ ├── readme.md │ └── yamls │ │ ├── 01-kubeproxy-config-map.yaml │ │ └── metallb-pool.yaml ├── Metrics-Server │ ├── readme.md │ └── yamls │ │ └── high-availability.yaml ├── Multus │ └── readme.md ├── Netbox │ ├── readme.md │ └── yamls │ │ ├── ig-netbox-external.yaml │ │ └── ig-netbox.yaml ├── Nextcloud │ ├── readme.md │ └── yamls │ │ └── nextcloud-deploy.yaml ├── Node-Feature-Discovery │ └── readme.md ├── Nvidia-GPU │ ├── readme.md │ └── yamls │ │ ├── gpu-operator-values-vgpu.yaml │ │ └── gpu-operator-values.yaml ├── Openproject │ ├── readme.md │ └── yamls │ │ ├── ig-openproject.yaml │ │ └── openproject-values.yaml ├── Paperless-ngx │ ├── readme.md │ └── yamls │ │ └── paperless-ngx.yaml ├── Portainer │ ├── readme.md │ └── yamls │ │ └── ig-portainer.yaml ├── Privatebin │ ├── cfg │ │ └── cfg.php │ ├── readme.md │ └── yamls │ │ └── privatebin.yaml ├── Prometheus-Stack │ ├── Authentik │ │ ├── Dashboards │ │ │ └── grafana-authentik.json │ │ └── readme.md │ ├── Automation │ │ ├── ArgoCD │ │ │ ├── Dashboards │ │ │ │ ├── grafana-argocd-application-overview.json │ │ │ │ ├── grafana-argocd-notifications-overview.json │ │ │ │ ├── grafana-argocd-operational-overview.json │ │ │ │ └── grafana-argocd.json │ │ │ └── readme.md │ │ └── Vault │ │ │ ├── Dashboards │ │ │ └── grafana-vault.yaml │ │ │ └── readme.md │ ├── Calico │ │ ├── Dashboard │ │ │ ├── grafana-felix.json │ │ │ └── grafana-typha.json │ │ └── readme.md │ ├── Cert-manager │ │ ├── Dashboards │ │ │ └── grafana-certmanager.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── podmonitor-certmanager.yaml │ ├── Crowdsec │ │ ├── Dashboards │ │ │ ├── crowdsec-details-per-machine-old.json │ │ │ ├── crowdsec-details-per-machine.json │ │ │ ├── crowdsec-insight-old.json │ │ │ ├── crowdsec-insight.json │ │ │ ├── crowdsec-lapi-old.json │ │ │ ├── crowdsec-lapi.json │ │ │ ├── crowdsec-overview-old.json │ │ │ └── crowdsec-overview.json │ │ └── readme.md │ ├── Database │ │ ├── Couchdb │ │ │ ├── Dashboards │ │ │ │ ├── grafana-couchdb.json │ │ │ │ └── grafana-couchdb2.json │ │ │ ├── readme.md │ │ │ └── yamls │ │ │ │ └── prom-couchdb.yaml │ │ ├── Mariadb │ │ │ ├── Dashboard │ │ │ │ └── grafana-mysql.json │ │ │ └── readme.md │ │ ├── Memcached │ │ │ ├── Dashboards │ │ │ │ ├── Memcached.json │ │ │ │ ├── Memcached2.json │ │ │ │ ├── Memcached3.json │ │ │ │ └── memcached_pods.json │ │ │ └── readme.md │ │ ├── Mongodb │ │ │ ├── Dashboards │ │ │ │ ├── grafana-mongodb.json │ │ │ │ └── grafana-mongodb2.json │ │ │ └── readme.md │ │ ├── Mysql │ │ │ ├── Dashboard │ │ │ │ └── grafana-mysql.json │ │ │ └── readme.md │ │ ├── Postgresql │ │ │ ├── Dashboard │ │ │ │ └── grafana-postgresql.json │ │ │ └── readme.md │ │ └── Redis │ │ │ ├── Dashboard │ │ │ └── grafana-redis.json │ │ │ └── readme.md │ ├── ExternalEtcd │ │ ├── Dashboard │ │ │ ├── etcd-cluster-overview.json │ │ │ └── etcd-cluster.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── prom-etcd.yaml │ ├── Fritzbox-Eporter │ │ ├── Dashboard │ │ │ └── grafana-fritzbox.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── prom-fritzbox.yaml │ ├── Grafana-dashboards │ │ └── etcd.json │ ├── Haproxy-Monitoring │ │ ├── Dashboard │ │ │ └── grafana-haproxy.json │ │ ├── readme.md │ │ └── yamls │ │ │ ├── prom-haproxy1.yaml │ │ │ ├── prom-haproxy2.yaml │ │ │ ├── prom-haproxy3.yaml │ │ │ ├── traefik-haproxy1-svc.yaml │ │ │ ├── traefik-haproxy1.yaml │ │ │ ├── traefik-haproxy2-svc.yaml │ │ │ ├── traefik-haproxy2.yaml │ │ │ ├── traefik-haproxy3-svc.yaml │ │ │ └── traefik-haproxy3.yaml │ ├── K8sBackup │ │ └── Velero │ │ │ ├── Dashboards │ │ │ └── grafana-velero.json │ │ │ └── readme.md │ ├── Kubescape │ │ ├── Dashboard │ │ │ └── grafana-kubescape.json │ │ └── readme.md │ ├── Kured │ │ ├── Dashboard │ │ │ └── grafana-kured.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── servicemonitor-kured.yaml │ ├── Linux │ │ ├── Dashboards │ │ │ └── grafana-node-exporter.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── sm-node-exporter.yaml │ ├── Metallb │ │ ├── Dashboard │ │ │ └── grafana-metallb.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── prom-metallb.yaml │ ├── Nextcloud │ │ ├── Dashboards │ │ │ └── grafana-nextcloud.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── sm-nextcloud.yaml │ ├── NvidiaGPU │ │ ├── Dashboard │ │ │ ├── grafana-nvidia-gpu.json │ │ │ └── grafana-nvidia-vgpu.json │ │ └── readme.md │ ├── PfSense │ │ ├── Dashboards │ │ │ ├── grafana-pfsense.json │ │ │ └── grafana-pfsense2.json │ │ └── readme.md │ ├── Prometheus-snmp │ │ ├── Dashboard │ │ │ ├── grafana-arista.json │ │ │ └── grafana-idrac.json │ │ ├── MIBs │ │ │ └── iDRAC-SMIv2.mib │ │ ├── readme.md │ │ └── yamls │ │ │ ├── snmp-exporter-arista-switch.yaml │ │ │ ├── snmp-exporter-idrac.yaml │ │ │ └── snmp.yml │ ├── Proxmox-Backup-Monitoring │ │ ├── Dashboard │ │ │ ├── grafana-pbs-v2.json │ │ │ └── grafana-proxmox-backup-server.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── exporter-pbs.yaml │ ├── Proxmox-Monitoring │ │ ├── Dashboard │ │ │ └── grafana-proxmox.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── prom-proxmox.yaml │ ├── Pushgateway │ │ ├── readme.md │ │ └── yaml │ │ │ └── ig-pushgateway.yaml │ ├── Storage │ │ ├── Ceph │ │ │ ├── Dashboards │ │ │ │ ├── ceph-cluster.json │ │ │ │ ├── cephfs-overview.json │ │ │ │ ├── osd-device-details.json │ │ │ │ ├── osds-overview.json │ │ │ │ ├── pool-detail.json │ │ │ │ ├── pools-overview.json │ │ │ │ ├── rbd-details.json │ │ │ │ └── rbd-overview.json │ │ │ ├── readme.md │ │ │ └── yamls │ │ │ │ ├── prom-ceph.yaml │ │ │ │ └── prometheus_alerts.yaml │ │ ├── Glusterfs │ │ │ ├── Dashboard │ │ │ │ └── grafana-glusterfs.json │ │ │ ├── readme.md │ │ │ └── yamls │ │ │ │ └── sm-gluster-exporter.yaml │ │ ├── Longhorn │ │ │ ├── Dashboard │ │ │ │ └── grafana-longhorn.json │ │ │ ├── readme.md │ │ │ └── yamls │ │ │ │ └── sm-longhorn.yaml │ │ ├── Minio │ │ │ ├── Dashboards │ │ │ │ ├── minio-bucket-dashboard.json │ │ │ │ ├── minio-dashboard.json │ │ │ │ ├── minio-node-dashboard.json │ │ │ │ └── minio-replication-dashboard.json │ │ │ ├── readme.md │ │ │ └── yamls │ │ │ │ └── sm-minio.yaml │ │ └── NFS-server │ │ │ ├── Dashboard │ │ │ └── grafana-nfs-cluster.json │ │ │ ├── readme.md │ │ │ └── yamls │ │ │ └── sm-nodeexporter.yaml │ ├── Teleport │ │ ├── Grafana │ │ │ └── grafana-teleport-cluster.json │ │ └── readme.md │ ├── Traefik │ │ ├── Dashboards │ │ │ ├── grafana-traefik.json │ │ │ └── grafana-traefik3.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── prom-traefik.yaml │ ├── Uptime-kuma │ │ ├── Dashboard │ │ │ └── grafana-uk.json │ │ ├── readme.md │ │ └── yamls │ │ │ └── sm-ukuma.yaml │ ├── readme.md │ └── yamls │ │ ├── ig-kube-prometheus-stack.yaml │ │ └── kube-prometheus-stack-values.yaml ├── QRCodeGenerator │ ├── readme.md │ └── yamls │ │ └── qr-code-generator.yaml ├── Rancher │ └── readme.md ├── Reflector │ └── readme.md ├── Roundcube │ ├── readme.md │ └── yamls │ │ └── roundcube.yaml ├── Shlink │ ├── readme.md │ └── yamls │ │ ├── shlink-web-client.yaml │ │ └── shlink.yaml ├── Stirling-PDF │ ├── readme.md │ └── yamls │ │ └── s-pdf.yaml ├── Storage │ ├── Glusterfs │ │ ├── readme.md │ │ └── yamls │ │ │ ├── 01-endpoints.yaml │ │ │ ├── 02a-direct.yaml │ │ │ ├── 02b1-presistent-volume.yaml │ │ │ └── 02b2-persistent-volume-pod.yaml │ ├── Longhorn │ │ ├── readme.md │ │ └── yamls │ │ │ ├── ig-longhorn.yaml │ │ │ ├── longhorn-nvme-sc.yaml │ │ │ └── longhorn-values.yaml │ ├── MinIO │ │ ├── readme.md │ │ └── yamls │ │ │ ├── ig-minio-operator.yaml │ │ │ ├── ig-minio-tenant.yaml │ │ │ ├── minio-operator-values.yaml │ │ │ └── minio-tenant-values.yaml │ ├── NFS │ │ ├── readme.md │ │ └── yamls │ │ │ ├── nfs-test.yaml │ │ │ ├── sc-csi-nfs.yaml │ │ │ └── snapshotclass-csi-nfs.yaml │ ├── OpenEBS │ │ └── readme.md │ ├── Rook │ │ ├── readme.md │ │ └── yamls │ │ │ ├── rook-mysql.yaml │ │ │ ├── rook-sc-nvme1tb.yaml │ │ │ ├── rook-sc-nvme2tb.yaml │ │ │ ├── rook-sc-ssd2t.yaml │ │ │ └── rook-wordpress.yaml │ └── readme.md ├── Teleport │ ├── readme.md │ └── yaml │ │ ├── ig-teleport.yaml │ │ ├── teleport-agent-cm.yaml │ │ ├── teleport-cluster-values.yaml │ │ ├── teleport-clusterip.yaml │ │ ├── teleport-kube-agent-15-values.yaml │ │ ├── teleport-kube-agent-values.yaml │ │ ├── teleport-loadbalancer.yaml │ │ └── teleport-ssl-cert.yaml ├── Traefik │ ├── readme.md │ └── yamls │ │ ├── ig-traefik.yaml │ │ └── traefik-values.yaml ├── Truenas │ ├── readme.md │ └── yamls │ │ └── ig-truenas.yaml ├── Uptimekuma │ ├── readme.md │ └── yamls │ │ └── ig-uptimekuma.yaml ├── Vaultwarden │ ├── readme.md │ └── yamls │ │ └── vaultwarden-deploy.yaml ├── Wazuh │ └── readme.md ├── k8tz │ └── readme.md ├── kubernetes-01.md ├── kubernetes-02.md └── readme.md ├── LICENSE ├── LinuxTips.md ├── NUT └── readme.md ├── Nethserver └── readme.md ├── Network ├── .$NetworkDiagram.drawio.bkp ├── NetworkDiagram.drawio ├── Switch │ ├── Arista.md │ ├── Cisco.md │ ├── readme.md │ └── switch.conf ├── V2 │ ├── Network2.drawio │ └── readme.md ├── network.drawio.svg └── readme.md ├── Proxmox ├── Ceph.md ├── Cloud-init.md ├── Clustering.md ├── Dell3rdPartyHardwareCheck.md ├── Installation.md ├── Network.md ├── Openmanage.md ├── Optimizations.md ├── Passtrhough.md ├── VirtualMachines.md ├── iDRACservicemodule.md ├── ipmitool.md ├── network │ └── interfaces └── readme.md ├── README.md ├── Rack ├── RackDiagram.drawio └── RackDiagram.png ├── Storage ├── Glusterfs │ └── readme.md ├── LVM │ └── readme.md ├── Microceph │ ├── readme.md │ └── yamls │ │ ├── ceph-dashboard-router.yaml │ │ └── ig-ceph-dashboard.yaml ├── NFS Cluster │ └── readme.md └── iSCSI │ └── readme.md ├── Swarm └── readme.md ├── TruenasSCALE └── readme.md ├── bashrc.md ├── homeassistant.md ├── postgres_ha.md ├── proxmox.md ├── ubuntuovpn.md └── zfs.md /ActiveDirectory/readme.md: -------------------------------------------------------------------------------- 1 | # Active Directory 2 | 3 | IP: 10.0.50.15 4 | 5 | Domain: ad.urbaman.it 6 | -------------------------------------------------------------------------------- /Ansible/readme.md: -------------------------------------------------------------------------------- 1 | # Use Ansible and Ansible Semaphore UI 2 | 3 | ## Install ansible and semaphore 4 | 5 | ```bash 6 | sudo apt install ansible 7 | sudo snap install semaphore 8 | ``` 9 | 10 | ## Setup semaphore 11 | 12 | ```bash 13 | sudo snap stop semaphore 14 | sudo semaphore user add --admin \ 15 | --login john \ 16 | --name=John \ 17 | --email=john1996@gmail.com \ 18 | --password=12345 19 | sudo snap start semaphore 20 | ``` 21 | 22 | Go to `http://:3000`, eventually use traefik or nginx to proxy with SSL 23 | 24 | ## Configure and test Semaphore 25 | 26 | - Create a repo on Github, follow the instructions to add an ssh key to authenticate to github 27 | - Add Keys to Semaphore: one with the private key used to authenticate to github, others with usee/pass or key authentication to the machines to manage 28 | - Create a default environment, can be empty (put {} on both the spaces) 29 | - Create a repository, using the ssh url of your repo and the github key previously created 30 | - Create one or more inventories, at least one per authentication key, selecting the relative authentication key 31 | - Create one test template per inventory, pointing to a ping playbook in your repo and run it to test connections. 32 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/alertmanager/config.yml: -------------------------------------------------------------------------------- 1 | route: 2 | group_by: ['namespace'] 3 | group_wait: 30s 4 | group_interval: 5m 5 | repeat_interval: 12h 6 | receiver: 'null' 7 | routes: 8 | - receiver: 'null' 9 | matchers: 10 | - alertname =~ "InfoInhibitor|Watchdog" 11 | - receiver: 'mail' 12 | receivers: 13 | - name: 'null' 14 | - name: 'mail' 15 | email_configs: 16 | - to: 'admin@domain.com' 17 | from: 'admin@domain.com' 18 | smarthost: smtp.domain.com:587 19 | auth_username: 'admin@domain.com' 20 | auth_identity: 'admin@domain.com' 21 | auth_password: 'password' 22 | send_resolved: true 23 | tls_config: 24 | insecure_skip_verify: true 25 | headers: 26 | subject: 'Prometheus Mail Alerts' 27 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/grafana/provisioning/dashboards/dashboard.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'Prometheus' 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | editable: true 10 | allowUiUpdates: true 11 | options: 12 | path: /etc/grafana/provisioning/dashboards 13 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/grafana/provisioning/datasources/datasource.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: Prometheus 5 | type: prometheus 6 | access: proxy 7 | orgId: 1 8 | url: http://prometheus:9090 9 | basicAuth: false 10 | isDefault: true 11 | editable: true 12 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/homepage/config/bookmarks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # For configuration options and examples, please see: 3 | # https://gethomepage.dev/latest/configs/bookmarks 4 | 5 | - Deployment notes: 6 | - Github: 7 | - abbr: HL 8 | href: https://github.com/urbaman/HomeLab 9 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/homepage/config/custom.css: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Asustor/Docker/configs/homepage/config/custom.css -------------------------------------------------------------------------------- /Asustor/Docker/configs/homepage/config/custom.js: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Asustor/Docker/configs/homepage/config/custom.js -------------------------------------------------------------------------------- /Asustor/Docker/configs/homepage/config/docker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # For configuration options and examples, please see: 3 | # https://gethomepage.dev/latest/configs/docker/ 4 | 5 | # my-docker: 6 | # host: 127.0.0.1 7 | # port: 2375 8 | 9 | my-docker: 10 | socket: /var/run/docker.sock 11 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/homepage/config/kubernetes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # sample kubernetes config 3 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/homepage/config/settings.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # For configuration options and examples, please see: 3 | # https://gethomepage.dev/latest/configs/settings 4 | 5 | 6 | title: My Homelab Homepage 7 | language: it 8 | showStats: true 9 | quicklaunch: 10 | searchDescriptions: true 11 | hideInternetSearch: true 12 | hideVisitURL: true 13 | headerStyle: boxed 14 | 15 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/homepage/config/widgets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # For configuration options and examples, please see: 3 | # https://gethomepage.dev/latest/configs/service-widgets 4 | 5 | - logo: 6 | icon: mdi-home-outline-#0328fc 7 | - resources: 8 | cpu: true 9 | memory: true 10 | disk: / 11 | expanded: true 12 | cputemp: true 13 | uptime: true 14 | - openmeteo: 15 | label: Anzola dell'Emilia 16 | latitude: 44.5473 17 | longitude: 11.1956 18 | timezone: Europe/Rome 19 | units: metric 20 | cache: 5 21 | - datetime: 22 | text_size: xl 23 | format: 24 | dateStyle: long 25 | timeStyle: long 26 | hourCycle: h23 27 | - search: 28 | provider: google 29 | target: _blank 30 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/pihole/dnsmasq.d/07-dhcp-options: -------------------------------------------------------------------------------- 1 | dhcp-option=option:dns-server,192.168.1.31 -------------------------------------------------------------------------------- /Asustor/Docker/configs/readme.md: -------------------------------------------------------------------------------- 1 | # Docker app configs 2 | 3 | Here are all of the docker app configs. 4 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/traefik/acme/acme.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Asustor/Docker/configs/traefik/acme/acme.json -------------------------------------------------------------------------------- /Asustor/Docker/configs/traefik/rules/asusstornas1-rtr.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | routers: 3 | asustornas1-rtr: 4 | rule: "Host(`asustornas1.{{env "DOMAINNAME_1"}}`)" 5 | entryPoints: 6 | - websecure 7 | middlewares: 8 | - middlewares-chain-no-auth 9 | service: asustornas1-svc 10 | tls: 11 | certResolver: dns-cloudflare 12 | options: tls-opts@file 13 | services: 14 | asustornas1-svc: 15 | loadBalancer: 16 | servers: 17 | - url: "https://192.168.1.31:8001" 18 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/traefik/rules/middlewares-buffer.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | middlewares-buffer: 4 | buffering: 5 | maxRequestBodyBytes: 16000000 6 | memRequestBodyBytes: 16000000 7 | maxResponseBodyBytes: 16000000 8 | memResponseBodyBytes: 1600000 9 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/traefik/rules/middlewares-chain-no-auth.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | middlewares-chain-no-auth: 4 | chain: 5 | middlewares: 6 | - middlewares-rate-limit 7 | - middlewares-secure-headers 8 | # - middlewares-compress 9 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/traefik/rules/middlewares-rate.limit.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | middlewares-rate-limit: 4 | rateLimit: 5 | average: 100 6 | burst: 50 7 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/traefik/rules/nassell-rtr.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | routers: 3 | nasshell-rtr: 4 | rule: "Host(`nasshell.{{env "DOMAINNAME_1"}}`)" 5 | entryPoints: 6 | - websecure 7 | middlewares: 8 | - middlewares-chain-no-auth 9 | service: nasshell-svc 10 | tls: 11 | certResolver: dns-cloudflare 12 | options: tls-opts@file 13 | services: 14 | nasshell-svc: 15 | loadBalancer: 16 | servers: 17 | - url: "https://192.168.1.31:4200" 18 | -------------------------------------------------------------------------------- /Asustor/Docker/configs/traefik/rules/tls-opts.yml: -------------------------------------------------------------------------------- 1 | tls: 2 | options: 3 | tls-opts: 4 | minVersion: VersionTLS12 5 | cipherSuites: 6 | - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 7 | - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 8 | - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 9 | - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 10 | - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 11 | - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 12 | - TLS_AES_128_GCM_SHA256 13 | - TLS_AES_256_GCM_SHA384 14 | - TLS_CHACHA20_POLY1305_SHA256 15 | - TLS_FALLBACK_SCSV # Client is doing version fallback. See RFC 7507 16 | curvePreferences: 17 | - CurveP521 18 | - CurveP384 19 | sniStrict: true 20 | -------------------------------------------------------------------------------- /Asustor/Docker/data/readme.md: -------------------------------------------------------------------------------- 1 | # Docker data 2 | 3 | Here are all of the docker data dirs. 4 | -------------------------------------------------------------------------------- /Asustor/Docker/readme.md: -------------------------------------------------------------------------------- 1 | # Docker data dir 2 | 3 | Here is all of the app configs and or data needed. 4 | -------------------------------------------------------------------------------- /Asustor/docker-compose/.env: -------------------------------------------------------------------------------- 1 | PUID=1000 2 | PGID=100 3 | TZ="Europe/Rome" 4 | USERDIR="/home/nasadmin" 5 | SECRETSDIR="/home/nasadmin/docker-compose/secrets" 6 | DOCKERDIR="/volume1/Docker" 7 | DATADIR="/volume1/Docker/data" 8 | CONFIGDIR="/volume1/Docker/configs" 9 | LOGDIR="/volume1/Docker/logs" 10 | DOMAINNAME_1="urbaman.cloud" 11 | SERVER_IP=192.168.1.31 12 | LOCAL_IPS=127.0.0.1/32,10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 13 | CLOUDFLARE_IPS=173.245.48.0/20,103.21.244.0/22,103.22.200.0/22,103.31.4.0/22,141.101.64.0/18,108.162.192.0/18,190.93.240.0/20,188.114.96.0/20,197.234.240.0/22,198.41.128.0/17,162.158.0.0/15,104.16.0.0/13,104.24.0.0/14,172.64.0.0/13,131.0.72.0/22 14 | #HOSTNAME="udms" 15 | GRAFANA_USER=admin 16 | GRAFANA_PASSWORD=password 17 | PIHOLEPASSWORD=password 18 | CODESERVERPASSWORD=password 19 | MARIADB_ROOT_PWD=password 20 | COLLABORA_USERNAME=admin 21 | COLLABORA_PASSWORD=password 22 | MYSQL_NC_PASSWORD=password 23 | NC_ADMIN_USER=admin 24 | NC_ADMIN_PASSWORD=nextcloudadmin 25 | NC_SMTP_PASSWORD=password 26 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/coding/code-server.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | code-server: 3 | image: lscr.io/linuxserver/code-server:latest 4 | container_name: code-server 5 | pull_policy: always 6 | environment: 7 | - PUID=${PUID} 8 | - PGID=${PGID} 9 | - TZ=${TZ} 10 | - PASSWORD=codeserveradmin 11 | # - HASHED_PASSWORD= #optional 12 | - SUDO_PASSWORD=codeserveradmin #optional 13 | # - SUDO_PASSWORD_HASH= #optional 14 | - PROXY_DOMAIN=code-server.urbaman.cloud #optional 15 | - DEFAULT_WORKSPACE=/config/workspace #optional 16 | volumes: 17 | - $CONFIGDIR/code-server/config:/config 18 | networks: 19 | - traefik 20 | - pihole 21 | dns: 22 | - 192.168.4.100 23 | - 127.0.0.11 24 | # ports: 25 | # - 8443:8443 26 | restart: unless-stopped 27 | labels: 28 | - "org.label-schema.group=monitoring" 29 | - "traefik.enable=true" 30 | # HTTP Routers 31 | - "traefik.http.routers.code-server-rtr.entrypoints=websecure" 32 | - "traefik.http.routers.code-server-rtr.rule=Host(`code-server.$DOMAINNAME_1`)" # HostRegexp:code-server.${DOMAINNAME_1},{catchall:.*}" # Host(`code-server.$DOMAI 33 | # Middlewares 34 | - "traefik.http.routers.code-server-rtr.middlewares=middlewares-chain-no-auth@file" 35 | # HTTP Services 36 | - "traefik.http.routers.code-server-rtr.service=code-server-svc" 37 | - "traefik.http.services.code-server-svc.loadbalancer.server.port=8443" 38 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/core/clamav.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | clamav: 3 | image: clamav/clamav:latest_base 4 | pull_policy: always 5 | container_name: clamav 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | # profiles: ["apps", "all"] 10 | networks: 11 | # - dnet 12 | - clamav 13 | - pihole 14 | # ports: 15 | # - "8080:8080" 16 | dns: 17 | - 192.168.4.100 18 | - 127.0.0.11 19 | environment: 20 | # - DOZZLE_FILTER: "label=log_me" # limits logs displayed to containers with this label 21 | # - DOCKER_HOST: tcp://socket-proxy:2375 22 | - TZ=${TZ} 23 | volumes: 24 | - $DATADIR/clamav/db:/var/lib/clamav 25 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/core/dozzle.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | dozzle: 3 | image: amir20/dozzle:latest 4 | pull_policy: always 5 | container_name: dozzle 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | # profiles: ["apps", "all"] 10 | networks: 11 | # - dnet 12 | - traefik 13 | - pihole 14 | # ports: 15 | # - "8080:8080" 16 | dns: 17 | - 192.168.4.100 18 | - 127.0.0.11 19 | environment: 20 | - DOZZLE_LEVEL=info 21 | - DOZZLE_TAILSIZE=300 22 | - DOZZLE_FILTER="status=running" 23 | # - DOZZLE_FILTER: "label=log_me" # limits logs displayed to containers with this label 24 | # - DOCKER_HOST: tcp://socket-proxy:2375 25 | - TZ=${TZ} 26 | labels: 27 | - "org.label-schema.group=monitoring" 28 | - "traefik.enable=true" 29 | # HTTP Routers 30 | - "traefik.http.routers.dozzle-rtr.entrypoints=websecure" 31 | - "traefik.http.routers.dozzle-rtr.rule=Host(`dozzle.$DOMAINNAME_1`)" 32 | # Middlewares 33 | - "traefik.http.routers.dozzle-rtr.middlewares=middlewares-chain-no-auth@file" 34 | # HTTP Services 35 | - "traefik.http.routers.dozzle-rtr.service=dozzle-svc" 36 | - "traefik.http.services.dozzle-svc.loadbalancer.server.port=8080" 37 | volumes: 38 | - /var/run/docker.sock:/var/run/docker.sock:ro # Use Docker Socket Proxy instead for improved security 39 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/core/homepage.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | homepage: 3 | image: ghcr.io/gethomepage/homepage:latest 4 | pull_policy: always 5 | security_opt: 6 | - no-new-privileges:true 7 | restart: unless-stopped 8 | container_name: homepage 9 | networks: 10 | # - socket_proxy 11 | # - dnet 12 | - traefik 13 | - pihole 14 | # ports: 15 | # - 3000:3000 16 | dns: 17 | - 192.168.4.100 18 | - 127.0.0.11 19 | volumes: 20 | - $CONFIGDIR/homepage/config:/app/config # Make sure your local config directory exists 21 | - /var/run/docker.sock:/var/run/docker.sock:ro # (optional) For docker integrations, see alternative methods 22 | environment: 23 | - TZ={$TZ} 24 | labels: 25 | - "org.label-schema.group=monitoring" 26 | - "traefik.enable=true" 27 | # HTTP Routers 28 | - "traefik.http.routers.homepage-rtr.entrypoints=websecure" 29 | - "traefik.http.routers.homepage-rtr.rule=Host(`homepage.$DOMAINNAME_1`)" 30 | # Middlewares 31 | - "traefik.http.routers.homepage-rtr.middlewares=middlewares-chain-no-auth@file" 32 | # HTTP Services 33 | - "traefik.http.routers.homepage-rtr.service=homepage-svc" 34 | - "traefik.http.services.homepage-svc.loadbalancer.server.port=3000" 35 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/core/portainer.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | # Portainer - WebUI for Containers 3 | portainer: 4 | container_name: portainer 5 | image: portainer/portainer-ce:latest # Use portainer-ee if you have a Business Edition license key 6 | pull_policy: always 7 | security_opt: 8 | - no-new-privileges:true 9 | restart: unless-stopped 10 | # profiles: ["core", "all"] 11 | networks: 12 | # - dnet 13 | - traefik 14 | - pihole 15 | command: -H unix:///var/run/docker.sock # # Use Docker Socket Proxy instead for improved security 16 | #command: -H tcp://socket-proxy:2375 17 | # ports: 18 | # - "9000:9000" 19 | # - "8002:8000" 20 | # - "9443:9443" 21 | dns: 22 | - 192.168.4.100 23 | - 127.0.0.11 24 | volumes: 25 | - /var/run/docker.sock:/var/run/docker.sock:ro # # Use Docker Socket Proxy instead for improved security 26 | - $DATADIR/portainer/data:/data # Change to local directory if you want to save/transfer config locally 27 | environment: 28 | - TZ=${TZ} 29 | labels: 30 | - "org.label-schema.group=monitoring" 31 | - "traefik.enable=true" 32 | # HTTP Routers 33 | - "traefik.http.routers.portainer-rtr.entrypoints=websecure" 34 | - "traefik.http.routers.portainer-rtr.rule=Host(`portainer.$DOMAINNAME_1`)" 35 | # Middlewares 36 | - "traefik.http.routers.portainer-rtr.middlewares=middlewares-chain-no-auth@file" 37 | # HTTP Services 38 | - "traefik.http.routers.portainer-rtr.service=portainer-svc" 39 | - "traefik.http.services.portainer-svc.loadbalancer.server.port=9000" 40 | 41 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/database/mariadb.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | # MariaDB 3 | mariadb: 4 | container_name: mariadb 5 | image: mariadb:latest 6 | pull_policy: always 7 | security_opt: 8 | - no-new-privileges:true 9 | restart: unless-stopped 10 | networks: 11 | - database 12 | volumes: 13 | - $DATADIR/mariadb:/var/lib/mysql 14 | environment: 15 | - TZ=${TZ} 16 | - MARIADB_ROOT_PASSWORD=${MARIADB_ROOT_PWD} 17 | - MARIADB_AUTO_UPGRADE=1 18 | - MARIADB_DISABLE_UPGRADE_BACKUP=1 19 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/database/redis.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | # Redis 3 | redis: 4 | container_name: redis 5 | image: redis:latest 6 | pull_policy: always 7 | security_opt: 8 | - no-new-privileges:true 9 | restart: unless-stopped 10 | networks: 11 | - database 12 | environment: 13 | - TZ=${TZ} 14 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/monitoring/alertmanager.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | alertmanager: 4 | image: prom/alertmanager:latest 5 | container_name: alertmanager 6 | pull_policy: always 7 | security_opt: 8 | - no-new-privileges:true 9 | volumes: 10 | - $CONFIGDIR/alertmanager:/etc/alertmanager 11 | - $DATADIR/alertmanager:/alertmanager 12 | user: "1000" 13 | command: 14 | - '--config.file=/etc/alertmanager/config.yml' 15 | - '--storage.path=/alertmanager' 16 | restart: unless-stopped 17 | # expose: 18 | # - 9093 19 | networks: 20 | - monitoring 21 | - pihole 22 | - traefik 23 | dns: 24 | - 192.168.4.100 25 | - 127.0.0.11 26 | environment: 27 | - TZ=${TZ} 28 | - PUID=${PUID} 29 | - PGID=${PGID} 30 | labels: 31 | - "org.label-schema.group=monitoring" 32 | - "traefik.enable=true" 33 | # HTTP Routers 34 | - "traefik.http.routers.alertmanager-rtr.entrypoints=websecure" 35 | - "traefik.http.routers.alertmanager-rtr.rule=Host(`alertmanager.$DOMAINNAME_1`)" 36 | # Middlewares 37 | - "traefik.http.routers.alertmanager-rtr.middlewares=middlewares-chain-no-auth@file" 38 | # HTTP Services 39 | - "traefik.http.routers.alertmanager-rtr.service=alertmanager-svc" 40 | - "traefik.http.services.alertmanager-svc.loadbalancer.server.port=9093" 41 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/monitoring/grafana.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | grafana: 3 | image: grafana/grafana:latest 4 | container_name: grafana 5 | volumes: 6 | - $DATADIR/grafana:/var/lib/grafana 7 | - $CONFIGDIR/grafana/provisioning:/etc/grafana/provisioning 8 | environment: 9 | - GF_SECURITY_ADMIN_USER=${GRAFANA_USER} 10 | - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD} 11 | - GF_USERS_ALLOW_SIGN_UP=false 12 | - GF_ALERTING_ENABLED=false 13 | - GF_UNIFIED_ALERTING_ENABLED=true 14 | - TZ=${TZ} 15 | - PUID=${PUID} 16 | - PGID=${PGID} 17 | user: "1000" 18 | restart: unless-stopped 19 | pull_policy: always 20 | security_opt: 21 | - no-new-privileges:true 22 | # expose: 23 | # - 3000 24 | dns: 25 | - 192.168.4.100 26 | - 127.0.0.11 27 | networks: 28 | - monitoring 29 | - pihole 30 | - traefik 31 | labels: 32 | - "org.label-schema.group=monitoring" 33 | - "traefik.enable=true" 34 | # HTTP Routers 35 | - "traefik.http.routers.grafana-rtr.entrypoints=websecure" 36 | - "traefik.http.routers.grafana-rtr.rule=Host(`grafana.$DOMAINNAME_1`)" 37 | # Middlewares 38 | - "traefik.http.routers.grafana-rtr.middlewares=middlewares-chain-no-auth@file" 39 | # HTTP Services 40 | - "traefik.http.routers.grafana-rtr.service=grafana-svc" 41 | - "traefik.http.services.grafana-svc.loadbalancer.server.port=3000" 42 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/monitoring/mariadbexporter.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | mariadbexporter: 3 | image: prom/mysqld-exporter:latest 4 | container_name: mariadbexporter 5 | pull_policy: always 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | command: 10 | - --config.my-cnf=/cfg/.my.cnf 11 | - --mysqld.address=mariadb:3306 12 | # expose: 13 | # - 9104 14 | dns: 15 | - 192.168.4.100 16 | - 127.0.0.11 17 | networks: 18 | - monitoring 19 | - database 20 | - pihole 21 | environment: 22 | - TZ=${TZ} 23 | volumes: 24 | - $CONFIGDIR/mariadbexporter/.my.cnf:/cfg/.my.cnf 25 | labels: 26 | - "org.label-schema.group=monitoring" 27 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/monitoring/nextcloudexporter.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | nextcloudexporter: 3 | image: xperimental/nextcloud-exporter:latest 4 | container_name: nextcloudexporter 5 | pull_policy: always 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | # expose: 10 | # - 9205 11 | dns: 12 | - 192.168.4.100 13 | - 127.0.0.11 14 | networks: 15 | - monitoring 16 | - pihole 17 | environment: 18 | - TZ=${TZ} 19 | - NEXTCLOUD_SERVER=https://nextcloud.urbaman.cloud 20 | - NEXTCLOUD_USERNAME=exporter 21 | - NEXTCLOUD_PASSWORD=${NEXTCLOUD_EXPORTER_PASSWORD} 22 | - NEXTCLOUD_TIMEOUT=5s 23 | labels: 24 | - "org.label-schema.group=monitoring" 25 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/monitoring/nodeexporter.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | nodeexporter: 3 | image: prom/node-exporter:latest 4 | container_name: nodeexporter 5 | pull_policy: always 6 | environment: 7 | - TZ=${TZ} 8 | security_opt: 9 | - no-new-privileges:true 10 | volumes: 11 | - /proc:/host/proc:ro 12 | - /sys:/host/sys:ro 13 | - /:/rootfs:ro 14 | command: 15 | - '--path.procfs=/host/proc' 16 | - '--path.rootfs=/rootfs' 17 | - '--path.sysfs=/host/sys' 18 | - '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)' 19 | restart: unless-stopped 20 | # expose: 21 | # - 9100 22 | networks: 23 | - monitoring 24 | - pihole 25 | dns: 26 | - 192.168.4.100 27 | - 127.0.0.11 28 | labels: 29 | - "org.label-schema.group=monitoring" 30 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/monitoring/piholeexporter.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | piholeexporter: 3 | image: ekofr/pihole-exporter:latest 4 | container_name: piholeexporter 5 | pull_policy: always 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | # expose: 10 | # - 9617 11 | dns: 12 | - 192.168.4.100 13 | - 127.0.0.11 14 | networks: 15 | - monitoring 16 | - pihole 17 | environment: 18 | - TZ=${TZ} 19 | - PUID=${PUID} 20 | - PGID=${PGID} 21 | - PIHOLE_HOSTNAME=pihole 22 | - PIHOLE_PASSWORD=piholeadmin 23 | - PIHOLE_PORT=80 24 | labels: 25 | - "org.label-schema.group=monitoring" 26 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/monitoring/prometheus.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | prometheus: 4 | image: prom/prometheus:latest 5 | container_name: prometheus 6 | pull_policy: always 7 | security_opt: 8 | - no-new-privileges:true 9 | volumes: 10 | - $CONFIGDIR/prometheus:/etc/prometheus 11 | - $DATADIR/prometheus:/prometheus 12 | user: "1000" 13 | command: 14 | - '--config.file=/etc/prometheus/prometheus.yml' 15 | - '--storage.tsdb.path=/prometheus' 16 | - '--web.console.libraries=/etc/prometheus/console_libraries' 17 | - '--web.console.templates=/etc/prometheus/consoles' 18 | - '--storage.tsdb.retention.time=200h' 19 | - '--web.enable-lifecycle' 20 | restart: unless-stopped 21 | # expose: 22 | # - 9090 23 | dns: 24 | - 192.168.4.100 25 | - 127.0.0.11 26 | networks: 27 | - monitoring 28 | - pihole 29 | - traefik 30 | environment: 31 | - TZ=${TZ} 32 | - PUID=${PUID} 33 | - PGID=${PGID} 34 | labels: 35 | - "org.label-schema.group=monitoring" 36 | - "traefik.enable=true" 37 | # HTTP Routers 38 | - "traefik.http.routers.prometheus-rtr.entrypoints=websecure" 39 | - "traefik.http.routers.prometheus-rtr.rule=Host(`prometheus.$DOMAINNAME_1`)" 40 | # Middlewares 41 | - "traefik.http.routers.prometheus-rtr.middlewares=middlewares-chain-no-auth@file" 42 | # HTTP Services 43 | - "traefik.http.routers.prometheus-rtr.service=prometheus-svc" 44 | - "traefik.http.services.prometheus-svc.loadbalancer.server.port=9090" 45 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/monitoring/pushgateway.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | pushgateway: 3 | image: prom/pushgateway:latest 4 | container_name: pushgateway 5 | restart: unless-stopped 6 | pull_policy: always 7 | security_opt: 8 | - no-new-privileges:true 9 | # expose: 10 | # - 9091 11 | dns: 12 | - 192.168.4.100 13 | - 127.0.0.11 14 | networks: 15 | - monitoring 16 | - pihole 17 | - traefik 18 | environment: 19 | - TZ=${TZ} 20 | - PUID=${PUID} 21 | - PGID=${PGID} 22 | labels: 23 | - "org.label-schema.group=monitoring" 24 | - "traefik.enable=true" 25 | # HTTP Routers 26 | - "traefik.http.routers.pushgateway-rtr.entrypoints=websecure" 27 | - "traefik.http.routers.pushgateway-rtr.rule=Host(`pushgateway.$DOMAINNAME_1`)" 28 | # Middlewares 29 | - "traefik.http.routers.pushgateway-rtr.middlewares=middlewares-chain-no-auth@file" 30 | # HTTP Services 31 | - "traefik.http.routers.pushgateway-rtr.service=pushgateway-svc" 32 | - "traefik.http.services.pushgateway-svc.loadbalancer.server.port=9091" 33 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/monitoring/redisexporter.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | redisexporter: 3 | image: oliver006/redis_exporter:latest 4 | container_name: redisexporter 5 | pull_policy: always 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | # expose: 10 | # - 9121 11 | dns: 12 | - 192.168.4.100 13 | - 127.0.0.11 14 | networks: 15 | - monitoring 16 | - database 17 | - pihole 18 | environment: 19 | - TZ=${TZ} 20 | - REDIS_ADDR=redis://redis:6379 21 | labels: 22 | - "org.label-schema.group=monitoring" 23 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/network/cloudflared.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cloudflared: 3 | container_name: cloudflared 4 | image: visibilityspots/cloudflared:latest 5 | pull_policy: always 6 | restart: unless-stopped 7 | networks: 8 | pihole: 9 | ipv4_address: 192.168.4.101 10 | labels: 11 | - "org.label-schema.group=monitoring" 12 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/network/dhcphelper.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | dhcphelper: 3 | container_name: dhcp-helper 4 | build: ./dhcp-helper 5 | restart: unless-stopped 6 | network_mode: "host" 7 | command: -s 192.168.4.100 8 | cap_add: 9 | - NET_ADMIN 10 | labels: 11 | - "org.label-schema.group=monitoring" 12 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/network/dockovpn.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | dockovpn: 3 | image: alekslitvinenk/openvpn 4 | container_name: dockovpn 5 | volumes: 6 | - $CONFIGDIR/dockovpn:/opt/Dockovpn_data 7 | environment: 8 | - TZ=${TZ} 9 | - PUID=${PUID} 10 | - PGID=${PGID} 11 | # user: "1000" 12 | restart: unless-stopped 13 | pull_policy: always 14 | security_opt: 15 | - no-new-privileges:true 16 | cap_add: 17 | - NET_ADMIN 18 | ports: 19 | - 1194:1194/udp 20 | # expose: 21 | # - 3000 22 | dns: 23 | - 192.168.4.100 24 | - 127.0.0.11 25 | networks: 26 | - monitoring 27 | - pihole 28 | - traefik 29 | labels: 30 | - "org.label-schema.group=monitoring" 31 | - "traefik.enable=true" 32 | # HTTP Routers 33 | - "traefik.http.routers.dockovpn-rtr.entrypoints=websecure" 34 | - "traefik.http.routers.dockovpn-rtr.rule=Host(`dockovpn.$DOMAINNAME_1`)" 35 | # Middlewares 36 | - "traefik.http.routers.dockovpn-rtr.middlewares=middlewares-chain-no-auth@file" 37 | # HTTP Services 38 | - "traefik.http.routers.dockovpn-rtr.service=dockovpn-svc" 39 | - "traefik.http.services.dockovpn-svc.loadbalancer.server.port=8080" 40 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/network/nginxproxymanager.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | nginx: 3 | image: 'jc21/nginx-proxy-manager:latest' 4 | container_name: nginx 5 | volumes: 6 | - $DATADIR/nginx:/data 7 | - $CONFIGDIR/nginx:/etc/letsencrypt 8 | environment: 9 | - TZ=${TZ} 10 | - PUID=${PUID} 11 | - PGID=${PGID} 12 | # user: "1000" 13 | restart: unless-stopped 14 | pull_policy: always 15 | security_opt: 16 | - no-new-privileges:true 17 | ports: 18 | - '8080:80' 19 | - '8443:443' 20 | # expose: 21 | # - 3000 22 | dns: 23 | - 192.168.4.100 24 | - 127.0.0.11 25 | networks: 26 | - monitoring 27 | - pihole 28 | - traefik 29 | labels: 30 | - "org.label-schema.group=monitoring" 31 | - "traefik.enable=true" 32 | # HTTP Routers 33 | - "traefik.http.routers.nginx-rtr.entrypoints=websecure" 34 | - "traefik.http.routers.nginx-rtr.rule=Host(`nginx.$DOMAINNAME_1`)" 35 | # Middlewares 36 | - "traefik.http.routers.nginx-rtr.middlewares=middlewares-chain-no-auth@file" 37 | # HTTP Services 38 | - "traefik.http.routers.nginx-rtr.service=nginx-svc" 39 | - "traefik.http.services.nginx-svc.loadbalancer.server.port=81" 40 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/tools/boinc.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | boinc: 3 | container_name: boinc 4 | image: lscr.io/linuxserver/boinc:latest 5 | pull_policy: always 6 | networks: 7 | - traefik 8 | - pihole 9 | dns: 10 | - 192.168.4.100 11 | - 127.0.0.11 12 | # ports: 13 | # - '8083:8080' 14 | volumes: 15 | # - $DATADIR/:/usr/share/tessdata 16 | - $DATADIR/boinc:/config 17 | # - $LOGDIR/stirling-pdf:/logs 18 | environment: 19 | - TZ=${TZ} 20 | - PUID=${PUID} 21 | - PGID=${PGID} 22 | devices: 23 | - /dev/dri:/dev/dri 24 | labels: 25 | - "org.label-schema.group=monitoring" 26 | - "traefik.enable=true" 27 | # HTTP Routers 28 | - "traefik.http.routers.boinc-rtr.entrypoints=websecure" 29 | - "traefik.http.routers.boinc-rtr.rule=Host(`boinc.$DOMAINNAME_1`)" # HostRegexp:boinc.${DOMAINNAME_1},{catchall:.*}" # Host(`boinc.$DOMAINNAME_1`)" 30 | # Middlewares 31 | - "traefik.http.routers.boinc-rtr.middlewares=middlewares-chain-no-auth@file" 32 | # HTTP Services 33 | - "traefik.http.routers.boinc-rtr.service=boinc-svc" 34 | - "traefik.http.services.boinc-svc.loadbalancer.server.port=8080" 35 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/tools/collabora.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | collabora: 3 | container_name: collabora 4 | image: collabora/code:latest 5 | pull_policy: always 6 | privileged: true 7 | cap_add: 8 | - MKNOD 9 | networks: 10 | - traefik 11 | - pihole 12 | dns: 13 | - 192.168.4.100 14 | - 127.0.0.11 15 | environment: 16 | - TZ=${TZ} 17 | - "extra-params=--o:ssl.enable=false --o:ssl.termination=true" 18 | - username=${COLLABORA_USERNAME} 19 | - password=${COLLABORA_PASSWORD} 20 | labels: 21 | - "org.label-schema.group=monitoring" 22 | - "traefik.enable=true" 23 | # HTTP Routers 24 | - "traefik.http.routers.collabora-rtr.entrypoints=websecure" 25 | - "traefik.http.routers.collabora-rtr.rule=Host(`collabora.$DOMAINNAME_1`)" 26 | # Middlewares 27 | #- "traefik.http.routers.collabora-rtr.middlewares=middlewares-chain-no-auth@file" 28 | # HTTP Services 29 | - "traefik.http.routers.collabora-rtr.service=collabora-svc" 30 | - "traefik.http.services.collabora-svc.loadbalancer.server.scheme=https" 31 | - "traefik.http.services.collabora-svc.loadbalancer.server.port=9980" 32 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/tools/drawio.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | drawio: 3 | container_name: drawio 4 | image: jgraph/drawio:latest 5 | pull_policy: always 6 | networks: 7 | - traefik 8 | - pihole 9 | dns: 10 | - 192.168.4.100 11 | - 127.0.0.11 12 | # ports: 13 | # - '8085:8080' 14 | # volumes: 15 | # - $DATADIR/stirling-pdf:/usr/share/tessdata 16 | environment: 17 | # - SYSTEM_DEFAULTLOCALE=it_IT 18 | - TZ=${TZ} 19 | labels: 20 | - "org.label-schema.group=monitoring" 21 | - "traefik.enable=true" 22 | # HTTP Routers 23 | - "traefik.http.routers.drawio-rtr.entrypoints=websecure" 24 | - "traefik.http.routers.drawio-rtr.rule=Host(`drawio.$DOMAINNAME_1`)" # HostRegexp:drawio.${DOMAINNAME_1},{catchall:.*}" # Host(`drawio.$DOMAINNAME_1`)" 25 | # Middlewares 26 | - "traefik.http.routers.drawio-rtr.middlewares=middlewares-chain-no-auth@file" 27 | # HTTP Services 28 | - "traefik.http.routers.drawio-rtr.service=drawio-svc" 29 | - "traefik.http.services.drawio-svc.loadbalancer.server.port=8080" 30 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/tools/it-tools.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | it-tools: 3 | container_name: it-tools 4 | image: corentinth/it-tools:latest 5 | pull_policy: always 6 | networks: 7 | - traefik 8 | - pihole 9 | dns: 10 | - 192.168.4.100 11 | - 127.0.0.11 12 | # ports: 13 | # - '8084:80' 14 | # volumes: 15 | # - $DATADIR/stirling-pdf:/usr/share/tessdata 16 | environment: 17 | # - SYSTEM_DEFAULTLOCALE=it_IT 18 | - TZ=${TZ} 19 | labels: 20 | - "org.label-schema.group=monitoring" 21 | - "traefik.enable=true" 22 | # HTTP Routers 23 | - "traefik.http.routers.it-tools-rtr.entrypoints=websecure" 24 | - "traefik.http.routers.it-tools-rtr.rule=Host(`it-tools.$DOMAINNAME_1`)" # HostRegexp:it-tools.${DOMAINNAME_1},{catchall:.*}" # Host(`it-tools.$DOMAINNAME_1`)" 25 | # Middlewares 26 | - "traefik.http.routers.it-tools-rtr.middlewares=middlewares-chain-no-auth@file" 27 | # HTTP Services 28 | - "traefik.http.routers.it-tools-rtr.service=it-tools-svc" 29 | - "traefik.http.services.it-tools-svc.loadbalancer.server.port=80" 30 | -------------------------------------------------------------------------------- /Asustor/docker-compose/compose/tools/stirling-pdf.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | s-pdf: 3 | container_name: s-pdf 4 | image: frooodle/s-pdf:latest 5 | pull_policy: always 6 | networks: 7 | - traefik 8 | - pihole 9 | dns: 10 | - 192.168.4.100 11 | - 127.0.0.11 12 | # ports: 13 | # - '8083:8080' 14 | volumes: 15 | - $DATADIR/stirling-pdf:/usr/share/tessdata 16 | - $CONFIGDIR/stirling-pdf:/configs 17 | - $LOGDIR/stirling-pdf:/logs 18 | environment: 19 | - DOCKER_ENABLE_SECURITY=true 20 | - SECURITY_ENABLE_LOGIN=true 21 | - INSTALL_BOOK_AND_ADVANCED_HTML_OPS=true 22 | - SECURITY_CSRFDISABLED=true 23 | - UI_APPNAME=StirlingPDF 24 | - UI_HOMEDESCRIPTION=Stirling PDF tools 25 | - UI_APPNAMENAVBAR=Stirling PDF Tools 26 | - SYSTEM_MAXFILESIZE=10000 27 | - METRICS_ENABLED=true 28 | - SYSTEM_GOOGLEVISIBILITY=false 29 | - SYSTEM_DEFAULTLOCALE=it_IT 30 | - TZ=${TZ} 31 | labels: 32 | - "org.label-schema.group=monitoring" 33 | - "traefik.enable=true" 34 | # HTTP Routers 35 | - "traefik.http.routers.s-pdf-rtr.entrypoints=websecure" 36 | - "traefik.http.routers.s-pdf-rtr.rule=Host(`s-pdf.$DOMAINNAME_1`)" # HostRegexp:s-pdf.${DOMAINNAME_1},{catchall:.*}" # Host(`s-pdf.$DOMAINNAME_1`)" 37 | # Middlewares 38 | - "traefik.http.routers.s-pdf-rtr.middlewares=middlewares-chain-no-auth@file" 39 | # HTTP Services 40 | - "traefik.http.routers.s-pdf-rtr.service=s-pdf-svc" 41 | - "traefik.http.services.s-pdf-svc.loadbalancer.server.port=8080" 42 | -------------------------------------------------------------------------------- /Asustor/docker-compose/dhcp-helper/dhcp-helper.dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:latest 2 | RUN apk --no-cache add dhcp-helper 3 | EXPOSE 67 67/udp 4 | ENTRYPOINT ["dhcp-helper", "-n"] -------------------------------------------------------------------------------- /Asustor/docker-compose/readme.md: -------------------------------------------------------------------------------- 1 | # Docker compose 2 | 3 | Here ara all of my docker compose files. 4 | -------------------------------------------------------------------------------- /Asustor/docker-compose/secrets/readme.md: -------------------------------------------------------------------------------- 1 | # Secrets 2 | 3 | Here are all of the secrets for docker compose (not real ones). 4 | -------------------------------------------------------------------------------- /Docker/docker/compose/databases/mariadb.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | # MariaDB 3 | mariadb: 4 | container_name: mariadb 5 | image: mariadb:latest 6 | pull_policy: always 7 | security_opt: 8 | - no-new-privileges:true 9 | restart: unless-stopped 10 | networks: 11 | - databases 12 | volumes: 13 | - $DATADIR/mariadb:/var/lib/mysql 14 | - $CONFIGDIR/mariadb:/etc/mysql 15 | environment: 16 | - TZ=${TZ} 17 | - MARIADB_ROOT_PASSWORD=${MARIADB_ROOT_PWD} 18 | - MARIADB_AUTO_UPGRADE=1 19 | - MARIADB_DISABLE_UPGRADE_BACKUP=1 20 | labels: 21 | - "org.label-schema.group=databases" 22 | - "traefik.enable=true" 23 | - "traefik.docker.network=databases" 24 | - "traefik.tcp.routers.mysql-router.entrypoints=mysql" 25 | - "traefik.tcp.routers.mysql-router.rule=HostSNI(`*`)" 26 | - "traefik.tcp.routers.mysql-router.service=mysql-service" 27 | - "traefik.tcp.services.mysql-service.loadbalancer.server.port=3306" 28 | 29 | -------------------------------------------------------------------------------- /Docker/docker/compose/databases/postgresql.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | postgres: 3 | container_name: postgres 4 | image: ghcr.io/immich-app/postgres:17-vectorchord0.3.0-pgvectors0.3.0 5 | pull_policy: always 6 | networks: 7 | - databases 8 | environment: 9 | POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} 10 | POSTGRES_USER: ${POSTGRES_USER} 11 | POSTGRES_INITDB_ARGS: '--data-checksums' 12 | # Uncomment the DB_STORAGE_TYPE: 'HDD' var if your database isn't stored on SSDs 13 | DB_STORAGE_TYPE: 'HDD' 14 | volumes: 15 | # Do not edit the next line. If you want to change the database storage location on your system, edit the value of DB_DATA_LOCATION in the .env file 16 | - $DATADIR/postgresql:/var/lib/postgresql/data 17 | - $CONFIGDIR/postgresql:/etc/postgresql 18 | restart: always 19 | labels: 20 | - "org.label-schema.group=databases" 21 | - "traefik.enable=true" 22 | - "traefik.docker.network=databases" 23 | - "traefik.tcp.routers.postgresql-router.entrypoints=postgresql" 24 | - "traefik.tcp.routers.postgresql-router.rule=HostSNI(`*`)" 25 | - "traefik.tcp.routers.postgresql-router.service=postgresql-service" 26 | - "traefik.tcp.services.postgresql-service.loadbalancer.server.port=5432" 27 | -------------------------------------------------------------------------------- /Docker/docker/compose/databases/valkey.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | valkey: 3 | container_name: valkey 4 | image: docker.io/valkey/valkey 5 | pull_policy: always 6 | networks: 7 | - databases 8 | security_opt: 9 | - no-new-privileges:true 10 | command: 11 | - --save 60 1 12 | - --loglevel warning 13 | environment: 14 | - TZ=${TZ} 15 | volumes: 16 | - $DATADIR/valkey:/data 17 | healthcheck: 18 | test: redis-cli ping || exit 1 19 | restart: always 20 | labels: 21 | - "org.label-schema.group=databases" 22 | - "traefik.enable=true" 23 | - "traefik.docker.network=databases" 24 | - "traefik.tcp.routers.valkey-router.entrypoints=redis" 25 | - "traefik.tcp.routers.valkey-router.rule=HostSNI(`*`)" 26 | - "traefik.tcp.routers.valkey-router.service=valkey-service" 27 | - "traefik.tcp.services.valkey-service.loadbalancer.server.port=6379" 28 | -------------------------------------------------------------------------------- /Docker/docker/compose/management/portainer.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | # Portainer - WebUI for Containers 3 | portainer: 4 | container_name: portainer 5 | image: portainer/portainer-ce:latest # Use portainer-ee if you have a Business Edition license key 6 | pull_policy: always 7 | security_opt: 8 | - no-new-privileges:true 9 | restart: unless-stopped 10 | # profiles: ["core", "all"] 11 | networks: 12 | - management 13 | command: -H unix:///var/run/docker.sock # # Use Docker Socket Proxy instead for improved security 14 | volumes: 15 | - /var/run/docker.sock:/var/run/docker.sock:ro # # Use Docker Socket Proxy instead for improved security 16 | - $DATADIR/portainer/data:/data # Change to local directory if you want to save/transfer config locally 17 | environment: 18 | - TZ=${TZ} 19 | labels: 20 | - "org.label-schema.group=management" 21 | - "traefik.enable=true" 22 | - "traefik.docker.network=management" 23 | # HTTP Routers 24 | - "traefik.http.routers.portainer-router.entrypoints=websecure" 25 | - "traefik.http.routers.portainer-router.rule=Host(`portainer.$DOMAINNAME_1`)" 26 | # Middlewares 27 | - "traefik.http.routers.portainer-router.middlewares=secure-headers@file" 28 | # HTTP Services 29 | - "traefik.http.routers.portainer-router.service=portainer-svc" 30 | - "traefik.http.services.portainer-svc.loadbalancer.server.port=9000" 31 | 32 | 33 | -------------------------------------------------------------------------------- /Docker/docker/compose/monitoring/alertmanager.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | alertmanager: 4 | image: prom/alertmanager:latest 5 | container_name: alertmanager 6 | pull_policy: always 7 | security_opt: 8 | - no-new-privileges:true 9 | volumes: 10 | - '$CONFIGDIR/alertmanager:/etc/alertmanager' 11 | - '$DATADIR/alertmanager:/alertmanager' 12 | command: 13 | - '--config.file=/etc/alertmanager/config.yml' 14 | - '--storage.path=/alertmanager' 15 | restart: unless-stopped 16 | networks: 17 | - monitoring 18 | environment: 19 | - TZ=${TZ} 20 | labels: 21 | - "org.label-schema.group=monitoring" 22 | - "traefik.enable=true" 23 | - "traefik.docker.network=monitoring" 24 | # HTTP Routers 25 | - "traefik.http.routers.alertmanager-router.entrypoints=websecure" 26 | - "traefik.http.routers.alertmanager-router.rule=Host(`alertmanager.$DOMAINNAME_1`)" 27 | # Middlewares 28 | - "traefik.http.routers.alertmanager-router.middlewares=secure-headers@file" 29 | # HTTP Services 30 | - "traefik.http.routers.alertmanager-router.service=alertmanager-service" 31 | - "traefik.http.services.alertmanager-service.loadbalancer.server.port=9093" 32 | 33 | #volumes: 34 | # alertmanager-data: 35 | # external: true 36 | # alertmanager-config: 37 | # external: true 38 | -------------------------------------------------------------------------------- /Docker/docker/compose/monitoring/cadvisor.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cadvisor: 3 | image: gcr.io/cadvisor/cadvisor:latest 4 | container_name: cadvisor 5 | command: 6 | - "-docker_only" 7 | # - "-housekeeping_interval=30s" 8 | # - "-disable_metrics=cpu_topology,disk,memory_numa,tcp,udp,percpu,sched,process,hugetlb,referenced_memory,resctrl,cpuset,advtcp" 9 | # - "-allow_dynamic_housekeeping=false" 10 | - "-logtostderr" 11 | volumes: 12 | - /:/rootfs:ro 13 | - /var/run:/var/run:ro 14 | - /sys:/sys:ro 15 | - /var/lib/docker:/var/lib/docker:ro 16 | - /dev/disk/:/dev/disk:ro 17 | - /sys/fs/cgroup:/cgroup:ro #doesn't work on MacOS only for Linux 18 | restart: unless-stopped 19 | privileged: true 20 | environment: 21 | - TZ=${TZ} 22 | devices: 23 | - /dev/kmsg 24 | networks: 25 | - monitoring 26 | labels: 27 | - "org.label-schema.group=monitoring" 28 | - "traefik.enable=true" 29 | - "traefik.docker.network=monitoring" 30 | # HTTP Routers 31 | - "traefik.http.routers.cadvisor-router.entrypoints=websecure" 32 | - "traefik.http.routers.cadvisor-router.rule=Host(`cadvisor.$DOMAINNAME_1`)" 33 | # Middlewares 34 | - "traefik.http.routers.cadvisor-router.middlewares=secure-headers@file" 35 | # HTTP Services 36 | - "traefik.http.routers.cadvisor-router.service=cadvisor-service" 37 | - "traefik.http.services.cadvisor-service.loadbalancer.server.port=8080" 38 | 39 | -------------------------------------------------------------------------------- /Docker/docker/compose/monitoring/dozzle.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | dozzle: 3 | image: amir20/dozzle:latest 4 | pull_policy: always 5 | container_name: dozzle 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | networks: 10 | - traefik 11 | - monitoring 12 | environment: 13 | - DOZZLE_LEVEL=info 14 | - DOZZLE_TAILSIZE=300 15 | - DOZZLE_FILTER="status=running" 16 | # - DOZZLE_FILTER: "label=log_me" # limits logs displayed to containers with this label 17 | # - DOCKER_HOST: tcp://socket-proxy:2375 18 | - TZ=${TZ} 19 | labels: 20 | - "org.label-schema.group=monitoring" 21 | - "traefik.enable=true" 22 | - "traefik.docker.network=monitoring" 23 | # HTTP Routers 24 | - "traefik.http.routers.dozzle-router.entrypoints=websecure" 25 | - "traefik.http.routers.dozzle-router.rule=Host(`dozzle.$DOMAINNAME_1`)" 26 | # Middlewares 27 | - "traefik.http.routers.dozzle-router.middlewares=secure-headers@file" 28 | # HTTP Services 29 | - "traefik.http.routers.dozzle-router.service=dozzle-service" 30 | - "traefik.http.services.dozzle-service.loadbalancer.server.port=8080" 31 | volumes: 32 | - /var/run/docker.sock:/var/run/docker.sock:ro # Use Docker Socket Proxy instead for improved security 33 | 34 | -------------------------------------------------------------------------------- /Docker/docker/compose/monitoring/grafana.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | grafana: 3 | image: grafana/grafana-enterprise:latest 4 | container_name: grafana 5 | volumes: 6 | - '/mnt/DockerProd/data/grafana:/var/lib/grafana' 7 | - '/mnt/DockerProd/configs/grafana:/etc/grafana/provisioning' 8 | environment: 9 | - GF_SECURITY_ADMIN_USER=${GRAFANA_USER} 10 | - GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD} 11 | - GF_USERS_ALLOW_SIGN_UP=false 12 | - GF_ALERTING_ENABLED=false 13 | - GF_UNIFIED_ALERTING_ENABLED=true 14 | - TZ=${TZ} 15 | restart: unless-stopped 16 | user: '1000' 17 | pull_policy: always 18 | security_opt: 19 | - no-new-privileges:true 20 | networks: 21 | - monitoring 22 | labels: 23 | - "org.label-schema.group=monitoring" 24 | - "traefik.enable=true" 25 | - "traefik.docker.network=monitoring" 26 | # HTTP Routers 27 | - "traefik.http.routers.grafana-router.entrypoints=websecure" 28 | - "traefik.http.routers.grafana-router.rule=Host(`grafana.$DOMAINNAME_1`)" 29 | # Middlewares 30 | - "traefik.http.routers.grafana-router.middlewares=secure-headers@file" 31 | # HTTP Services 32 | - "traefik.http.routers.grafana-router.service=grafana-service" 33 | - "traefik.http.services.grafana-service.loadbalancer.server.port=3000" 34 | 35 | #volumes: 36 | # grafana-data: 37 | # external: true 38 | # grafana-provisioning: 39 | # external: true 40 | -------------------------------------------------------------------------------- /Docker/docker/compose/monitoring/mariadb-exporter.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | mariadb-exporter: 3 | image: prom/mysqld-exporter:latest 4 | container_name: mariadb-exporter 5 | pull_policy: always 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | command: 10 | - --config.my-cnf=/cfg/.my.cnf 11 | - --mysqld.address=mariadb:3306 12 | - --collect.info_schema.processlist 13 | - --collect.info_schema.innodb_metrics 14 | - --collect.info_schema.tablestats 15 | - --collect.info_schema.tables 16 | - --collect.info_schema.userstats 17 | - --collect.engine_innodb_status 18 | - --collect.perf_schema.eventsstatements 19 | - --collect.perf_schema.file_events 20 | - --collect.perf_schema.indexiowaits 21 | - --collect.perf_schema.tableiowaits 22 | - --collect.perf_schema.tablelocks 23 | - --collect.info_schema.query_response_time 24 | - --collect.perf_schema.eventsstatementssum 25 | networks: 26 | - monitoring 27 | - databases 28 | environment: 29 | - TZ=${TZ} 30 | volumes: 31 | - $CONFIGDIR/mariadb-exporter/.my.cnf:/cfg/.my.cnf:ro 32 | labels: 33 | - "org.label-schema.group=monitoring" 34 | 35 | -------------------------------------------------------------------------------- /Docker/docker/compose/monitoring/node-exporter.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | nodeexporter: 3 | image: prom/node-exporter:latest 4 | container_name: node-exporter 5 | pull_policy: always 6 | environment: 7 | - TZ=${TZ} 8 | security_opt: 9 | - no-new-privileges:true 10 | volumes: 11 | - /proc:/host/proc:ro 12 | - /sys:/host/sys:ro 13 | - /:/rootfs:ro 14 | command: 15 | - '--path.procfs=/host/proc' 16 | - '--path.rootfs=/rootfs' 17 | - '--path.sysfs=/host/sys' 18 | - '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)' 19 | - '--collector.processes' 20 | - '--collector.interrupts' 21 | - '--collector.systemd' 22 | - '--collector.tcpstat' 23 | restart: unless-stopped 24 | networks: 25 | - monitoring 26 | labels: 27 | - "org.label-schema.group=monitoring" 28 | -------------------------------------------------------------------------------- /Docker/docker/compose/monitoring/pbs1-exporter.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | pbs1-exporter: 3 | image: ghcr.io/natrontech/pbs-exporter 4 | container_name: pbs1-exporter 5 | pull_policy: always 6 | environment: 7 | - TZ=${TZ} 8 | - PBS_API_TOKEN=${PBS_API_TOKEN} 9 | - PBS_ENDPOINT=https://pbs1.urbaman.cloud:8007 10 | - PBS_INSECURE=false 11 | - PBS_API_TOKEN_NAME=${PBS_API_TOKEN_NAME} 12 | - PBS_USERNAME=${PBS_USERNAME} 13 | #- PBS_METRICS_PATH="/pbs1-metrics" 14 | expose: 15 | - 10019 16 | security_opt: 17 | - no-new-privileges:true 18 | restart: unless-stopped 19 | networks: 20 | - monitoring 21 | labels: 22 | - "org.label-schema.group=monitoring" 23 | -------------------------------------------------------------------------------- /Docker/docker/compose/monitoring/postgres-exporter.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | postgres-exporter: 3 | image: prometheuscommunity/postgres-exporter:latest 4 | container_name: postgres-exporter 5 | pull_policy: always 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | networks: 10 | - monitoring 11 | - databases 12 | command: 13 | - --collector.postmaster 14 | - --collector.long_running_transactions 15 | - --collector.process_idle 16 | - --collector.stat_activity_autovacuum 17 | - --collector.stat_statements 18 | - --collector.stat_wal_receiver 19 | - --collector.statio_user_indexes 20 | - --collector.statio_user_tables 21 | environment: 22 | - TZ=${TZ} 23 | - DATA_SOURCE_URI=postgres:5432/postgres?sslmode=disable 24 | - DATA_SOURCE_USER=postgres 25 | - DATA_SOURCE_PASS=${POSTGRES_PASSWORD} 26 | labels: 27 | - "org.label-schema.group=monitoring" 28 | 29 | -------------------------------------------------------------------------------- /Docker/docker/compose/monitoring/prometheus.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | prometheus: 4 | image: prom/prometheus:latest 5 | container_name: prometheus 6 | pull_policy: always 7 | security_opt: 8 | - no-new-privileges:true 9 | volumes: 10 | - '$CONFIGDIR/prometheus:/etc/prometheus' 11 | - '$DATADIR/prometheus:/prometheus' 12 | command: 13 | - '--config.file=/etc/prometheus/prometheus.yml' 14 | - '--storage.tsdb.path=/prometheus' 15 | - '--web.console.libraries=/etc/prometheus/console_libraries' 16 | - '--web.console.templates=/etc/prometheus/consoles' 17 | - '--storage.tsdb.retention.time=200h' 18 | - '--web.enable-lifecycle' 19 | restart: unless-stopped 20 | networks: 21 | - monitoring 22 | environment: 23 | - TZ=${TZ} 24 | labels: 25 | - "org.label-schema.group=monitoring" 26 | - "traefik.enable=true" 27 | - "traefik.docker.network=monitoring" 28 | # HTTP Routers 29 | - "traefik.http.routers.prometheus-router.entrypoints=websecure" 30 | - "traefik.http.routers.prometheus-router.rule=Host(`prometheus.$DOMAINNAME_1`)" 31 | # Middlewares 32 | - "traefik.http.routers.prometheus-router.middlewares=secure-headers@file" 33 | # HTTP Services 34 | - "traefik.http.routers.prometheus-router.service=prometheus-service" 35 | - "traefik.http.services.prometheus-service.loadbalancer.server.port=9090" 36 | 37 | #volumes: 38 | # prometheus-data: 39 | # external: true 40 | # prometheus-config: 41 | # external: true 42 | -------------------------------------------------------------------------------- /Docker/docker/compose/monitoring/redis-exporter.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | redisexporter: 3 | image: oliver006/redis_exporter:latest 4 | container_name: redis-exporter 5 | pull_policy: always 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | networks: 10 | - monitoring 11 | - databases 12 | environment: 13 | - TZ=${TZ} 14 | - REDIS_ADDR=redis://valkey:6379 15 | labels: 16 | - "org.label-schema.group=monitoring" 17 | -------------------------------------------------------------------------------- /Docker/docker/compose/security/clamav.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | clamav: 3 | image: clamav/clamav:latest_base 4 | pull_policy: always 5 | container_name: clamav 6 | security_opt: 7 | - no-new-privileges:true 8 | restart: unless-stopped 9 | networks: 10 | - security 11 | environment: 12 | # - DOZZLE_FILTER: "label=log_me" # limits logs displayed to containers with this label 13 | # - DOCKER_HOST: tcp://socket-proxy:2375 14 | - TZ=${TZ} 15 | volumes: 16 | - $DATADIR/clamav/db:/var/lib/clamav 17 | labels: 18 | - "org.label-schema.group=security" 19 | -------------------------------------------------------------------------------- /Docker/docker/compose/security/crowdsec.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | crowdsec: 3 | image: crowdsecurity/crowdsec 4 | container_name: crowdsec 5 | environment: 6 | PGID: "1000" 7 | COLLECTIONS: "crowdsecurity/traefik crowdsecurity/http-cve" 8 | expose: 9 | - "8080" 10 | volumes: 11 | - $DATADIR/crowdsec:/var/lib/crowdsec/data 12 | - /var/log/auth.log:/logs/auth.log:ro 13 | - /var/log/syslog:/logs/syslog:ro 14 | - $LOGDIR/traefik/access.log:/var/log/traefik/access.log:ro 15 | - $CONFIGDIR/crowdsec:/etc/crowdsec 16 | restart: unless-stopped 17 | labels: 18 | - traefik.enable=false 19 | networks: 20 | - traefik 21 | - security 22 | - monitoring 23 | - management 24 | - tools 25 | -------------------------------------------------------------------------------- /Docker/docker/compose/security/generate-indexer-certs.yml: -------------------------------------------------------------------------------- 1 | services: 2 | generator: 3 | image: wazuh/wazuh-certs-generator:0.0.2 4 | hostname: wazuh-certs-generator 5 | volumes: 6 | - '/mnt/DockerProd/configs/wazuh/wazuh_indexer_ssl_certs/:/certificates/' 7 | - '/mnt/DockerProd/configs/wazuh/certs.yml:/config/certs.yml' 8 | -------------------------------------------------------------------------------- /Docker/docker/compose/tools/collabora.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | collabora: 3 | container_name: collabora 4 | image: collabora/code:latest 5 | pull_policy: always 6 | #privileged: true 7 | cap_add: 8 | - MKNOD 9 | networks: 10 | - traefik 11 | - tools 12 | environment: 13 | - TZ=${TZ} 14 | - "extra-params=--o:ssl.enable=false --o:ssl.termination=true" 15 | - username=${COLLABORA_USERNAME} 16 | - password=${COLLABORA_PASSWORD} 17 | labels: 18 | - "org.label-schema.group=tools" 19 | - "traefik.enable=true" 20 | - "traefik.docker.network=tools" 21 | - "traefik.http.routers.collabora-router.entrypoints=websecure" 22 | - "traefik.http.routers.collabora-router.rule=Host(`collabora.$DOMAINNAME_1`)" 23 | - "traefik.http.routers.collabora-router.middlewares=secure-headers@file" 24 | - "traefik.http.routers.collabora-router.service=collabora-service" 25 | - "traefik.http.services.collabora-service.loadbalancer.server.scheme=https" 26 | - "traefik.http.services.collabora-service.loadbalancer.server.port=9980" 27 | -------------------------------------------------------------------------------- /Docker/docker/compose/tools/cyberchef.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | cyberchef: 3 | image: ghcr.io/gchq/cyberchef:latest 4 | pull_policy: always 5 | security_opt: 6 | - no-new-privileges:true 7 | restart: unless-stopped 8 | container_name: cyberchef 9 | networks: 10 | - traefik 11 | - tools 12 | environment: 13 | - TZ={$TZ} 14 | labels: 15 | - "org.label-schema.group=tools" 16 | - "traefik.enable=true" 17 | - "traefik.docker.network=tools" 18 | # HTTP Routers 19 | - "traefik.http.routers.cyberchef-router.entrypoints=websecure" 20 | - "traefik.http.routers.cyberchef-router.rule=Host(`cyberchef.$DOMAINNAME_1`)" 21 | # Middlewares 22 | - "traefik.http.routers.cyberchef-router.middlewares=secure-headers@file" 23 | # HTTP Services 24 | - "traefik.http.routers.cyberchef-router.service=cyberchef-service" 25 | - "traefik.http.services.cyberchef-service.loadbalancer.server.port=80 -------------------------------------------------------------------------------- /Docker/docker/compose/tools/drawio.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | drawio: 3 | container_name: drawio 4 | image: jgraph/drawio:latest 5 | pull_policy: always 6 | networks: 7 | - traefik 8 | - tools 9 | environment: 10 | - TZ=${TZ} 11 | labels: 12 | - "org.label-schema.group=tools" 13 | - "traefik.enable=true" 14 | - "traefik.docker.network=tools" 15 | # HTTP Routers 16 | - "traefik.http.routers.drawio-router.entrypoints=websecure" 17 | - "traefik.http.routers.drawio-router.rule=Host(`drawio.$DOMAINNAME_1`)" # HostRegexp:drawio.${DOMAINNAME_1},{catchall:.*}" # Host(`drawio.$DOMAINNAME_1`)" 18 | # Middlewares 19 | - "traefik.http.routers.drawio-router.middlewares=secure-headers@file" 20 | # HTTP Services 21 | - "traefik.http.routers.drawio-router.service=drawio-service" 22 | - "traefik.http.services.drawio-service.loadbalancer.server.port=8080" 23 | 24 | -------------------------------------------------------------------------------- /Docker/docker/compose/tools/exalidraw.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | exalidraw: 3 | container_name: exalidraw 4 | image: excalidraw/excalidraw:latest 5 | pull_policy: always 6 | networks: 7 | - traefik 8 | - tools 9 | environment: 10 | - TZ=${TZ} 11 | labels: 12 | - "org.label-schema.group=tools" 13 | - "traefik.enable=true" 14 | - "traefik.docker.network=tools" 15 | # HTTP Routers 16 | - "traefik.http.routers.exalidraw-router.entrypoints=websecure" 17 | - "traefik.http.routers.exalidraw-router.rule=Host(`exalidraw.$DOMAINNAME_1`)" # HostRegexp:exalidraw.${DOMAINNAME_1},{catchall:.*}" # Host(`exalidraw.$DOMAINNAME_1`)" 18 | # Middlewares 19 | - "traefik.http.routers.exalidraw-router.middlewares=secure-headers@file" 20 | # HTTP Services 21 | - "traefik.http.routers.exalidraw-router.service=exalidraw-service" 22 | - "traefik.http.services.exalidraw-service.loadbalancer.server.port=80" 23 | 24 | -------------------------------------------------------------------------------- /Docker/docker/compose/tools/homepage.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | homepage: 3 | image: ghcr.io/gethomepage/homepage:latest 4 | pull_policy: always 5 | security_opt: 6 | - no-new-privileges:true 7 | restart: unless-stopped 8 | container_name: homepage 9 | networks: 10 | - traefik 11 | - tools 12 | volumes: 13 | - $CONFIGDIR/homepage/config:/app/config # Make sure your local config directory exists 14 | - /var/run/docker.sock:/var/run/docker.sock:ro # (optional) For docker integrations, see alternative methods 15 | environment: 16 | - TZ={$TZ} 17 | - HOMEPAGE_ALLOWED_HOSTS=homepage.${DOMAINNAME_1} 18 | labels: 19 | - "org.label-schema.group=tools" 20 | - "traefik.enable=true" 21 | - "traefik.docker.network=tools" 22 | # HTTP Routers 23 | - "traefik.http.routers.homepage-router.entrypoints=websecure" 24 | - "traefik.http.routers.homepage-router.rule=Host(`homepage.$DOMAINNAME_1`)" 25 | # Middlewares 26 | - "traefik.http.routers.homepage-router.middlewares=secure-headers@file" 27 | # HTTP Services 28 | - "traefik.http.routers.homepage-router.service=homepage-service" 29 | - "traefik.http.services.homepage-service.loadbalancer.server.port=3000" -------------------------------------------------------------------------------- /Docker/docker/compose/tools/immich-ml.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | immich-machine-learning: 4 | container_name: immich-ml 5 | # For hardware acceleration, add one of -[armnn, cuda, rocm, openvino, rknn] to the image tag. 6 | # Example tag: ${IMMICH_VERSION:-release}-cuda 7 | image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}-cuda 8 | #extends: cpu # uncomment this section for hardware acceleration - see https://immich.app/docs/features/ml-hardware-acceleration 9 | # file: hwaccel.ml.yml 10 | # service: cuda # set to one of [armnn, cuda, rocm, openvino, openvino-wsl, rknn] for accelerated inference - use the `-wsl` version for WSL2 where applicable 11 | networks: 12 | - tools 13 | volumes: 14 | - $DATADIR/immich-cache:/cache 15 | restart: always 16 | deploy: 17 | resources: 18 | reservations: 19 | devices: 20 | - driver: nvidia 21 | count: 1 22 | capabilities: 23 | - gpu 24 | healthcheck: 25 | disable: false 26 | -------------------------------------------------------------------------------- /Docker/docker/compose/tools/it-tools.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | it-tools: 3 | container_name: it-tools 4 | image: corentinth/it-tools:latest 5 | pull_policy: always 6 | networks: 7 | - traefik 8 | - tools 9 | environment: 10 | - TZ=${TZ} 11 | labels: 12 | - "org.label-schema.group=tools" 13 | - "traefik.enable=true" 14 | - "traefik.docker.network=tools" 15 | # HTTP Routers 16 | - "traefik.http.routers.it-tools-router.entrypoints=websecure" 17 | - "traefik.http.routers.it-tools-router.rule=Host(`it-tools.$DOMAINNAME_1`)" # HostRegexp:it-tools.${DOMAINNAME_1},{catchall:.*}" # Host(`it-tools.$DOMAINNAME_1`)" 18 | # Middlewares 19 | - "traefik.http.routers.it-tools-router.middlewares=secure-headers@file" 20 | # HTTP Services 21 | - "traefik.http.routers.it-tools-router.service=it-tools-service" 22 | - "traefik.http.services.it-tools-service.loadbalancer.server.port=80" 23 | 24 | -------------------------------------------------------------------------------- /Docker/docker/compose/tools/roundcube.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | 3 | roundcube: 4 | container_name: roundcube 5 | image: roundcube/roundcubemail:latest 6 | restart: always 7 | networks: 8 | - tools 9 | - databases 10 | environment: 11 | - ROUNDCUBEMAIL_DB_TYPE=pgsql 12 | - ROUNDCUBEMAIL_DB_HOST=postgres 13 | - ROUNDCUBEMAIL_DB_PORT=5432 14 | - ROUNDCUBEMAIL_DB_NAME=roundcube 15 | - ROUNDCUBEMAIL_DB_USER=roundcube 16 | - ROUNDCUBEMAIL_DB_PASSWORD=${ROUNDCUBE_DB_PASSWORD} 17 | - ROUNDCUBEMAIL_DEFAULT_HOST=ssl://mail.${DOMAINNAME_1} 18 | - ROUNDCUBEMAIL_DEFAULT_PORT=993 19 | - ROUNDCUBEMAIL_SMTP_SERVER=ssl://mail..${DOMAINNAME_1} 20 | - ROUNDCUBEMAIL_SMTP_PORT=587 21 | - ROUNDCUBEMAIL_SKIN=elastic 22 | - ROUNDCUBEMAIL_PLUGINS=archive,zipdownload 23 | labels: 24 | - "org.label-schema.group=tools" 25 | - "traefik.enable=true" 26 | - "traefik.docker.network=tools" 27 | # HTTP Routers 28 | - "traefik.http.routers.roundcube-router.entrypoints=websecure" 29 | - "traefik.http.routers.roundcube-router.rule=Host(`webmail.$DOMAINNAME_2`)" 30 | # Middlewares 31 | - "traefik.http.routers.roundcube-router.middlewares=secure-headers-no-frame-deny@file" 32 | # HTTP Services 33 | - "traefik.http.routers.roundcube-router.service=roundcube-service" 34 | - "traefik.http.services.roundcube-service.loadbalancer.server.port=80" 35 | -------------------------------------------------------------------------------- /Docker/docker/compose/tools/stirling-pdf.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | s-pdf: 3 | container_name: s-pdf 4 | image: frooodle/s-pdf:latest 5 | pull_policy: always 6 | networks: 7 | - traefik 8 | - tools 9 | volumes: 10 | - $DATADIR/stirling-pdf:/usr/share/tessdata 11 | - $CONFIGDIR/stirling-pdf:/configs 12 | - $LOGDIR/stirling-pdf:/logs 13 | environment: 14 | - DOCKER_ENABLE_SECURITY=true 15 | - SECURITY_ENABLE_LOGIN=true 16 | - INSTALL_BOOK_AND_ADVANCED_HTML_OPS=true 17 | - SECURITY_CSRFDISABLED=true 18 | - UI_APPNAME=StirlingPDF 19 | - UI_HOMEDESCRIPTION=Stirling PDF tools 20 | - UI_APPNAMENAVBAR=Stirling PDF Tools 21 | - SYSTEM_MAXFILESIZE=10000 22 | - METRICS_ENABLED=true 23 | - SYSTEM_GOOGLEVISIBILITY=false 24 | - SYSTEM_DEFAULTLOCALE=it_IT 25 | - TZ=${TZ} 26 | labels: 27 | - "org.label-schema.group=tools" 28 | - "traefik.enable=true" 29 | - "traefik.docker.network=tools" 30 | # HTTP Routers 31 | - "traefik.http.routers.s-pdf-router.entrypoints=websecure" 32 | - "traefik.http.routers.s-pdf-router.rule=Host(`s-pdf.$DOMAINNAME_1`)" # HostRegexp:s-pdf.${DOMAINNAME_1},{catchall:.*}" # Host(`s-pdf.$DOMAINNAME_1`)" 33 | # Middlewares 34 | - "traefik.http.routers.s-pdf-router.middlewares=secure-headers@file" 35 | # HTTP Services 36 | - "traefik.http.routers.s-pdf-router.service=s-pdf-service" 37 | - "traefik.http.services.s-pdf-service.loadbalancer.server.port=8080" 38 | 39 | -------------------------------------------------------------------------------- /Docker/readme.md: -------------------------------------------------------------------------------- 1 | # Docker Installation and Post-Installation setup 2 | 3 | ## Installation (Ubuntu 22.04) 4 | 5 | ```bash 6 | for pkg in docker.io docker-doc docker-compose docker-compose-v2 podman-docker containerd runc; do sudo apt-get remove $pkg; done 7 | # Add Docker's official GPG key: 8 | sudo apt-get update 9 | sudo apt-get install ca-certificates curl 10 | sudo install -m 0755 -d /etc/apt/keyrings 11 | sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc 12 | sudo chmod a+r /etc/apt/keyrings/docker.asc 13 | 14 | # Add the repository to Apt sources: 15 | echo \ 16 | "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ 17 | $(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ 18 | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null 19 | sudo apt-get update 20 | sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin 21 | sudo docker run hello-world 22 | ``` 23 | 24 | ## Post Installation settings 25 | 26 | ### Non-root user 27 | 28 | ```bash 29 | sudo groupadd docker 30 | sudo usermod -aG docker $USER 31 | newgrp docker 32 | docker run hello-world 33 | ``` 34 | 35 | ### Start at boot 36 | 37 | ```bash 38 | sudo systemctl enable docker.service 39 | sudo systemctl enable containerd.service 40 | ``` 41 | 42 | ### Configure journald as default logging driver 43 | 44 | ```bash 45 | vi /etc/docker/daemin.json 46 | ``` 47 | 48 | ```json 49 | { 50 | "log-driver": "journald" 51 | } 52 | ``` 53 | 54 | ```bash 55 | sudo journalctl [-b] CONTAINER_NAME= 56 | ``` 57 | -------------------------------------------------------------------------------- /Docker/volumes/DockerProd1/configs/alertmanager/alertmanager.yml: -------------------------------------------------------------------------------- 1 | route: 2 | group_by: ['alertname'] 3 | group_wait: 30s 4 | group_interval: 5m 5 | repeat_interval: 1h 6 | receiver: 'web.hook' 7 | receivers: 8 | - name: 'web.hook' 9 | webhook_configs: 10 | - url: 'http://127.0.0.1:5001/' 11 | inhibit_rules: 12 | - source_match: 13 | severity: 'critical' 14 | target_match: 15 | severity: 'warning' 16 | equal: ['alertname', 'dev', 'instance'] 17 | -------------------------------------------------------------------------------- /Docker/volumes/DockerProd1/configs/alertmanager/config.yml: -------------------------------------------------------------------------------- 1 | global: 2 | resolve_timeout: '5m' 3 | route: 4 | group_by: ['namespace'] 5 | group_wait: 30s 6 | group_interval: 5m 7 | repeat_interval: 12h 8 | receiver: 'null' 9 | routes: 10 | - receiver: 'null' 11 | matchers: 12 | - alertname =~ "InfoInhibitor|Watchdog" 13 | - receiver: 'mail' 14 | receivers: 15 | - name: 'null' 16 | - name: 'mail' 17 | email_configs: 18 | - to: 'admin@domain.com' 19 | from: 'alertmanager@domain.com' 20 | smarthost: mail.domain.com:587 21 | auth_username: 'admin@domain.com' 22 | auth_identity: 'admin@domain.com' 23 | auth_password: '' 24 | send_resolved: true 25 | tls_config: 26 | insecure_skip_verify: true 27 | headers: 28 | subject: 'Prometheus Mail Alerts' 29 | -------------------------------------------------------------------------------- /Docker/volumes/DockerProd1/configs/traefik/acme/acme.json: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Docker/volumes/DockerProd1/configs/traefik/acme/acme.json -------------------------------------------------------------------------------- /Docker/volumes/DockerProd1/configs/traefik/rules/middlewares.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | middlewares: 3 | secure-headers: 4 | headers: 5 | frameDeny: true 6 | sslRedirect: true 7 | browserXssFilter: true 8 | contentTypeNosniff: true 9 | stsIncludeSubdomains: true 10 | stsPreload: true 11 | stsSeconds: 31536000 12 | secure-headers-no-frame-deny: 13 | headers: 14 | sslRedirect: true 15 | browserXssFilter: true 16 | contentTypeNosniff: true 17 | stsIncludeSubdomains: true 18 | stsPreload: true 19 | stsSeconds: 31536000 20 | traefik-bouncer: 21 | plugin: 22 | bouncer: 23 | enabled: true 24 | crowdsecLapiKey: 25 | crowdsecLapiHost: crowdsec:8080 26 | -------------------------------------------------------------------------------- /Docker/volumes/DockerProd1/configs/traefik/rules/services.yaml: -------------------------------------------------------------------------------- 1 | http: 2 | services: 3 | asustornas1-service: 4 | loadBalancer: 5 | servers: 6 | - url: "https://:8001" 7 | hosting1-https-service: 8 | loadBalancer: 9 | servers: 10 | - url: "https://" 11 | hosting1-usermin-service: 12 | loadBalancer: 13 | servers: 14 | - url: "https://:2083" 15 | hosting1-webmin-service: 16 | loadBalancer: 17 | servers: 18 | - url: "https://:2087" 19 | -------------------------------------------------------------------------------- /Docker/volumes/DockerProd1/configs/traefik/rules/tls-opts.yaml: -------------------------------------------------------------------------------- 1 | tls: 2 | options: 3 | tls-opts: 4 | minVersion: VersionTLS12 5 | cipherSuites: 6 | - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 7 | - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 8 | - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 9 | - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 10 | - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 11 | - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 12 | - TLS_AES_128_GCM_SHA256 13 | - TLS_AES_256_GCM_SHA384 14 | - TLS_CHACHA20_POLY1305_SHA256 15 | - TLS_FALLBACK_SCSV # Client is doing version fallback. See RFC 7507 16 | curvePreferences: 17 | - CurveP521 18 | - CurveP384 19 | sniStrict: true 20 | 21 | -------------------------------------------------------------------------------- /Docker/volumes/DockerProd1/configs/wazuh/certs.yml: -------------------------------------------------------------------------------- 1 | nodes: 2 | # Wazuh indexer server nodes 3 | indexer: 4 | - name: wazuh.indexer 5 | ip: wazuh.indexer 6 | 7 | # Wazuh server nodes 8 | # Use node_type only with more than one Wazuh manager 9 | server: 10 | - name: wazuh.manager 11 | ip: wazuh.manager 12 | 13 | # Wazuh dashboard node 14 | dashboard: 15 | - name: wazuh.dashboard 16 | ip: wazuh.dashboard 17 | -------------------------------------------------------------------------------- /Docker/volumes/DockerProd1/configs/wazuh/wazuh_dashboard/opensearch_dashboards.yml: -------------------------------------------------------------------------------- 1 | server.host: 0.0.0.0 2 | server.port: 443 3 | opensearch.hosts: https://wazuh.indexer:9200 4 | opensearch.ssl.verificationMode: certificate 5 | opensearch.requestHeadersWhitelist: ["securitytenant","Authorization"] 6 | opensearch_security.multitenancy.enabled: false 7 | opensearch_security.readonly_mode.roles: ["kibana_read_only"] 8 | server.ssl.enabled: true 9 | server.ssl.key: "/usr/share/wazuh-dashboard/certs/wazuh-dashboard-key.pem" 10 | server.ssl.certificate: "/usr/share/wazuh-dashboard/certs/wazuh-dashboard.pem" 11 | opensearch.ssl.certificateAuthorities: ["/usr/share/wazuh-dashboard/certs/root-ca.pem"] 12 | uiSettings.overrides.defaultRoute: /app/wz-home 13 | -------------------------------------------------------------------------------- /Docker/volumes/DockerProd1/configs/wazuh/wazuh_dashboard/wazuh.yml: -------------------------------------------------------------------------------- 1 | hosts: 2 | - 1513629884013: 3 | url: "https://wazuh.manager" 4 | port: 55000 5 | username: wazuh-wui 6 | password: " 8 | -------------------------------------------------------------------------------- /Kubernetes/Automation/ExternalSecrets/readme.md: -------------------------------------------------------------------------------- 1 | # ExternalSecrets deployment 2 | 3 | Create a KV secret engine in Hashicorp Vault 4 | 5 | ```bash 6 | helm repo add external-secrets https://charts.external-secrets.io 7 | helm repo update 8 | helm upgrade -i external-secrets external-secrets/external-secrets -n external-secrets --create-namespace 9 | ``` 10 | 11 | Create the ClusterSecretStore and the Vault credentials secret. 12 | 13 | Create ExternalSecrets or PushSecrets 14 | -------------------------------------------------------------------------------- /Kubernetes/Automation/ExternalSecrets/yamls/cluster-external-secret-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1beta1 2 | kind: ClusterExternalSecret 3 | metadata: 4 | name: prova 5 | namespace: prova 6 | spec: 7 | externalSecretName: prova-cluster 8 | namespaceSelector: 9 | matchLabels: 10 | prova: secret 11 | refreshTime: "10s" 12 | externalSecretSpec: 13 | refreshInterval: "15s" 14 | secretStoreRef: 15 | name: vault-backend 16 | kind: ClusterSecretStore 17 | target: 18 | name: prova 19 | creationPolicy: Owner 20 | data: 21 | - secretKey: users 22 | remoteRef: 23 | key: prova 24 | property: users -------------------------------------------------------------------------------- /Kubernetes/Automation/ExternalSecrets/yamls/cluster-secret-store.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: vault-token 5 | data: 6 | token: cm9vdA== # "root" 7 | --- 8 | apiVersion: external-secrets.io/v1beta1 9 | kind: ClusterSecretStore 10 | metadata: 11 | name: vault-backend 12 | spec: 13 | provider: 14 | vault: 15 | server: "http://my.vault.server:8200" 16 | path: "kv-secrets" 17 | # Version is the Vault KV secret engine version. 18 | # This can be either "v1" or "v2", defaults to "v2" 19 | version: "v2" 20 | auth: 21 | # points to a secret that contains a vault token 22 | # https://www.vaultproject.io/docs/auth/token 23 | tokenSecretRef: 24 | name: "vault-token" 25 | key: "token" 26 | conditions: 27 | # Options are namespaceSelector, or namespaces. Could be assent for every namespace. 28 | - namespaceSelector: 29 | matchLabels: 30 | my.namespace.io/some-label: "value" # Only namespaces with that label will work 31 | 32 | - namespaces: 33 | - "namespace-a" 34 | - "namespace-b" -------------------------------------------------------------------------------- /Kubernetes/Automation/ExternalSecrets/yamls/external-secret-example.yaml: -------------------------------------------------------------------------------- 1 | kind: ExternalSecret 2 | metadata: 3 | name: prova 4 | namespace: prova 5 | spec: 6 | data: 7 | - remoteRef: 8 | conversionStrategy: Default 9 | decodingStrategy: None 10 | key: prova # secret to sync 11 | metadataPolicy: None 12 | property: users # key to sync 13 | secretKey: users # key in k8s secret 14 | refreshInterval: 15s 15 | secretStoreRef: 16 | kind: ClusterSecretStore 17 | name: vault-backend 18 | target: 19 | creationPolicy: Owner 20 | deletionPolicy: Retain 21 | name: prova # secret in k8s -------------------------------------------------------------------------------- /Kubernetes/Automation/HashicorpVault/confs/cert.config: -------------------------------------------------------------------------------- 1 | [req] 2 | req_extensions = v3_req 3 | distinguished_name = dn 4 | prompt = no 5 | 6 | [dn] 7 | CN = vault.local 8 | 9 | [v3_req] 10 | keyUsage = keyEncipherment, dataEncipherment 11 | extendedKeyUsage = serverAuth 12 | subjectAltName = @alt_names 13 | 14 | [alt_names] 15 | DNS.1 = vault.svc.cluster.local 16 | DNS.2 = vault-0.vault-internal 17 | DNS.3 = vault-1.vault-internal 18 | DNS.4 = vault-2.vault-internal 19 | DNS.5 = vault.local -------------------------------------------------------------------------------- /Kubernetes/Automation/HashicorpVault/yamls/hashicorp-vault-values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | serverTelemetry: 3 | prometheusOperator: true 4 | 5 | server: 6 | ingress: 7 | enabled: false 8 | standalone: 9 | config: | 10 | ui = true 11 | 12 | listener "tcp" { 13 | tls_disable = 1 14 | address = "[::]:8200" 15 | cluster_address = "[::]:8201" 16 | telemetry { 17 | unauthenticated_metrics_access = "true" 18 | } 19 | } 20 | storage "file" { 21 | path = "/vault/data" 22 | } 23 | telemetry { 24 | prometheus_retention_time = "30s" 25 | disable_hostname = true 26 | } 27 | 28 | ui: 29 | enabled: true 30 | 31 | serverTelemetry: 32 | serviceMonitor: 33 | enabled: true 34 | selectors: 35 | release: kube-prometheus-stack 36 | -------------------------------------------------------------------------------- /Kubernetes/Automation/Semaphore/readme.md: -------------------------------------------------------------------------------- 1 | # Semaphore 2 | 3 | ## External behind Traefik 4 | 5 | ```bash 6 | kubectl apply -f ig-semaphore.yaml 7 | ``` 8 | 9 | ## Deploy in the Cluster 10 | 11 | Create a MySQL/MariaDB db and user (semaphoreui/semaphoreui) 12 | 13 | Create a key (for the SEMAPHORE_ACCESS_KEY_ENCRYPTION environment variable) 14 | 15 | ```bash 16 | head -c32 /dev/urandom | base64 17 | ``` 18 | 19 | Encode the passwords: 20 | 21 | ```bash 22 | echo -n '' | base64 23 | echo -n '' | base64 24 | echo -n '' | base64 25 | ``` 26 | 27 | Put them in the secret, then apply the deployment 28 | 29 | ```bash 30 | kubectl apply -f semaphoreui.yaml 31 | ``` 32 | -------------------------------------------------------------------------------- /Kubernetes/Bookstack/readme.md: -------------------------------------------------------------------------------- 1 | # Bookstack installation 2 | 3 | Deploy the yaml file 4 | 5 | Generate a (Lavarel APP_KEY)[https://generate-random.org/laravel-key-generator?count=1] and set it, together with a mariadb/mysql db and user. 6 | 7 | ```bash 8 | kubectl apply -f bookstack-deploy.yaml 9 | ``` 10 | -------------------------------------------------------------------------------- /Kubernetes/Cert-manager/readme.md: -------------------------------------------------------------------------------- 1 | # Cert Manager (to manage SSL certs with Traefik in HA mode) 2 | 3 | ## Install Cert-manager 4 | 5 | ```bash 6 | kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.15.2/cert-manager.yaml 7 | ``` 8 | 9 | Or via helm 10 | 11 | ```bash 12 | helm repo add jetstack https://charts.jetstack.io 13 | helm repo update 14 | helm upgrade -i cert-manager jetstack/cert-manager --namespace cert-manager --create-namespace --set crds.enabled=true --set crds.keep=true --set dns01RecursiveNameserversOnly=true --set dns01RecursiveNameservers=8.8.8.8:53\,1.1.1.1:53 # extraArgs='{--dns01-recursive-nameservers-only,--dns01-recursive-nameservers=8.8.8.8:53,1.1.1.1:53}' 15 | ``` 16 | 17 | Set the installCRDs=true to install the CRDs (best method) 18 | 19 | ## Let's Encrypt through Cloudflare DNS Challenge 20 | 21 | Create a secret with Cloudflare API Token (all domains) 22 | 23 | API Token recommended settings: 24 | 25 | - Permissions: 26 | - Zone - DNS - Edit 27 | - Zone - Zone - Read 28 | - Zone Resources: 29 | - Include - All Zones 30 | 31 | Set the token and the email in the `cert-manager-issuer-cfdns01.yaml` file, then apply. 32 | 33 | ### Certificate creation 34 | 35 | Let's create our wildcard certificate using the `cert.yaml` template. 36 | -------------------------------------------------------------------------------- /Kubernetes/Cert-manager/yamls/cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: urbaman-wildcard 5 | spec: 6 | # Certificate will be valid for these domain names 7 | dnsNames: 8 | - urbaman.it 9 | - '*.urbaman.it' 10 | # Reference our issuer 11 | # As it's a ClusterIssuer, it can be in a different namespace 12 | issuerRef: 13 | kind: ClusterIssuer 14 | name: cert-manager-acme-issuer 15 | # Secret that will be created with our certificate and private keys 16 | secretName: urbaman-wildcard-certificate -------------------------------------------------------------------------------- /Kubernetes/ClamAV/readme.md: -------------------------------------------------------------------------------- 1 | # ClamAV installation 2 | 3 | Install the helm chart 4 | 5 | ```bash 6 | helm repo add wiremind https://wiremind.github.io/wiremind-helm-charts 7 | helm repo update 8 | helm upgrade -i -n clamav clamav --create-namespace wiremind/clamav --set --set hpa.enabled=false 9 | #helm upgrade -i -n clamav clamav --create-namespace wiremind/clamav --set resources.limits.cpu=200m --set resources.limits.memory=1536Mi --set resources.requests.cpu=100m --set resources.requests.memory=1024Mi --set hpa.cpu=80 --set hpa.memory=80 10 | ``` 11 | 12 | ## Check from a client 13 | 14 | ```bash 15 | sudo apt install -y clamdscan 16 | sudo apt remove -y clamav-daemon clamav-freshclam 17 | ``` 18 | 19 | Port-forward the service 20 | 21 | ```bash 22 | kubectl port-forward -n clamav service/clamav :3310 23 | ``` 24 | 25 | Add the option `TCPAddr` and `TCPSocket` (`TCPAddr 1.2.3.4` `TCPSocket `) and remove (LocalSocket) from the client on `/etc/clamav/clamd.conf` file (create if not present). 26 | 27 | Create a file and scan it: 28 | 29 | ```bash 30 | touch prova 31 | clamdscan -v --stream [--config-file=/etc/clamav/clamd.conf] --fdpass prova 32 | ``` 33 | -------------------------------------------------------------------------------- /Kubernetes/Cloudflare-Operator/yamls/01-cloudflare-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: cloudflare-dnschallenge-credentials 5 | namespace: traefik 6 | type: Opaque 7 | stringData: 8 | email: mail@example.com 9 | apiKey: 1234 10 | -------------------------------------------------------------------------------- /Kubernetes/Cloudflare-Operator/yamls/02-cloudflareaccount.yaml: -------------------------------------------------------------------------------- 1 | piVersion: cf.containeroo.ch/v1beta1 2 | kind: Account 3 | metadata: 4 | name: account-sample 5 | spec: 6 | email: mail@example.com 7 | globalAPIKey: 8 | secretRef: 9 | name: cloudflare-global-api-key 10 | namespace: cloudflare-operator 11 | managedZones: 12 | - example.com -------------------------------------------------------------------------------- /Kubernetes/Cloudflare-Operator/yamls/03-dynamic-IP.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cf.containeroo.ch/v1beta1 2 | kind: IP 3 | metadata: 4 | name: dynamic-external-ipv4-address 5 | spec: 6 | type: dynamic 7 | interval: 5m 8 | ipSources: 9 | - url: https://ifconfig.me/ip 10 | - url: https://ipecho.net/plain 11 | - url: https://myip.is/ip/ 12 | - url: https://checkip.amazonaws.com 13 | - url: https://api.ipify.org -------------------------------------------------------------------------------- /Kubernetes/Cloudflare-Operator/yamls/04a-dns-a-dynamic-ip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cf.containeroo.ch/v1beta1 2 | kind: DNSRecord 3 | metadata: 4 | name: example-com 5 | namespace: cloudflare-operator 6 | spec: 7 | name: example.com 8 | type: A 9 | ipRef: 10 | name: dynamic-external-ipv4-address 11 | proxied: true 12 | ttl: 1 13 | interval: 5m -------------------------------------------------------------------------------- /Kubernetes/Cloudflare-Operator/yamls/04b-dns-a-fixed-ip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cf.containeroo.ch/v1beta1 2 | kind: DNSRecord 3 | metadata: 4 | name: blog 5 | namespace: cloudflare-operator 6 | spec: 7 | name: blob.example.com 8 | content: 178.4.20.69 9 | type: A 10 | proxied: true 11 | ttl: 1 12 | interval: 5m -------------------------------------------------------------------------------- /Kubernetes/Cloudflare-Operator/yamls/04c-dns-cname.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cf.containeroo.ch/v1beta1 2 | kind: DNSRecord 3 | metadata: 4 | name: blog-example-com 5 | namespace: cloudflare-operator 6 | spec: 7 | name: blog.example.com 8 | content: arecord.example.com 9 | type: CNAME 10 | proxied: true 11 | ttl: 1 12 | interval: 5m -------------------------------------------------------------------------------- /Kubernetes/Cloudflare-Operator/yamls/05-prometheus-pod-monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: PodMonitor 5 | metadata: 6 | name: cloudflare-operator 7 | namespace: cloudflare-operator 8 | labels: 9 | app.kubernetes.io/instance: cloudflare-operator 10 | app.kubernetes.io/name: cloudflare-operator 11 | release: kube-prometheus-stack 12 | spec: 13 | podMetricsEndpoints: 14 | - interval: 60s 15 | port: metrics 16 | selector: 17 | matchLabels: 18 | app.kubernetes.io/instance: cloudflare-operator 19 | app.kubernetes.io/name: cloudflare-operator 20 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/01-Prepare-Machines/configs/20auto-upgrades: -------------------------------------------------------------------------------- 1 | APT::Periodic::Update-Package-Lists "1"; 2 | APT::Periodic::Unattended-Upgrade "1"; 3 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/01-Prepare-Machines/configs/50-default.conf: -------------------------------------------------------------------------------- 1 | # Default rules for rsyslog. 2 | # 3 | # For more information see rsyslog.conf(5) and /etc/rsyslog.conf 4 | 5 | # 6 | # First some standard log files. Log by facility. 7 | # 8 | auth,authpriv.* /var/log/auth.log 9 | *.*;auth,authpriv.none -/var/log/syslog 10 | #cron.* /var/log/cron.log 11 | #daemon.* -/var/log/daemon.log 12 | kern.* -/var/log/kern.log 13 | #lpr.* -/var/log/lpr.log 14 | mail.* -/var/log/mail.log 15 | #user.* -/var/log/user.log 16 | 17 | # 18 | # Logging for the mail system. Split it up so that 19 | # it is easy to write scripts to parse these files. 20 | # 21 | mail.info -/var/log/mail.info 22 | mail.warn -/var/log/mail.warn 23 | mail.err /var/log/mail.err 24 | 25 | # 26 | # Some "catch-all" log files. 27 | # 28 | #*.=debug;\ 29 | # auth,authpriv.none;\ 30 | # news.none;mail.none -/var/log/debug 31 | #*.=info;*.=notice;*.=warn;\ 32 | # auth,authpriv.none;\ 33 | # cron,daemon.none;\ 34 | # mail,news.none -/var/log/messages 35 | 36 | # 37 | # Emergencies are sent to everybody logged in. 38 | # 39 | *.emerg :omusrmsg:* 40 | 41 | # 42 | # I like to have messages displayed on the console, but only on a virtual 43 | # console I usually leave idle. 44 | # 45 | #daemon,mail.*;\ 46 | # news.=crit;news.=err;news.=notice;\ 47 | # *.=debug;*.=info;\ 48 | # *.=notice;*.=warn /dev/tty8 49 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/01-Prepare-Machines/configs/main.cf: -------------------------------------------------------------------------------- 1 | # See /usr/share/postfix/main.cf.dist for a commented, more complete version 2 | 3 | myhostname=k8cp1.urbaman.it 4 | 5 | smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU) 6 | biff = no 7 | 8 | # appending .domain is the MUA's job. 9 | append_dot_mydomain = no 10 | 11 | # Uncomment the next line to generate "delayed mail" warnings 12 | #delay_warning_time = 4h 13 | 14 | alias_maps = hash:/etc/aliases 15 | alias_database = hash:/etc/aliases 16 | #mydestination = $myhostname, localhost.$mydomain, localhost 17 | #relayhost = 18 | mynetworks = 127.0.0.0/8 19 | inet_interfaces = loopback-only 20 | recipient_delimiter = + 21 | 22 | #Relay 23 | relayhost = [smtp.urbaman.it]:587 24 | smtp_use_tls = yes 25 | smtp_sasl_auth_enable = yes 26 | smtp_sasl_security_options = noanonymous 27 | smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd 28 | smtp_tls_CAfile = /etc/ssl/certs/ca-certificates.crt 29 | 30 | compatibility_level = 2 31 | 32 | smtp_header_checks = pcre:/etc/postfix/smtp_header_checks 33 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/01-Prepare-Machines/configs/sasl_passwd: -------------------------------------------------------------------------------- 1 | [smtp.gmail.com]:587 testmehere@gmail.com:PASSWD -------------------------------------------------------------------------------- /Kubernetes/Cluster/01-Prepare-Machines/configs/smtp_header_checks file: -------------------------------------------------------------------------------- 1 | /^From:.*/ REPLACE From: HOSTNAME-alert -------------------------------------------------------------------------------- /Kubernetes/Cluster/02-External-Etcd/configs/etcd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd key-value store 3 | Documentation=https://github.com/etcd-io/etcd 4 | After=network.target 5 | 6 | [Service] 7 | Type=notify 8 | EnvironmentFile=/etc/etcd/etcd.conf 9 | ExecStart=/usr/local/bin/etcd 10 | Restart=always 11 | RestartSec=10s 12 | LimitNOFILE=40000 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/02-External-Etcd/configs/etcd1.conf: -------------------------------------------------------------------------------- 1 | ETCD_NAME=etcd1 2 | ETCD_LISTEN_PEER_URLS="https://10.0.50.41:2380" 3 | ETCD_LISTEN_CLIENT_URLS="https://10.0.50.41:2379" 4 | ETCD_INITIAL_CLUSTER_TOKEN="etcdclstr" 5 | ETCD_INITIAL_CLUSTER="etcd1=https://10.0.50.41:2380,etcd2=https://10.0.50.42:2380,etcd3=https://10.0.50.43:2380" 6 | ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.50.41:2380" 7 | ETCD_ADVERTISE_CLIENT_URLS="https://10.0.50.41:2379" 8 | ETCD_TRUSTED_CA_FILE="/etc/etcd/etcd-ca.crt" 9 | ETCD_CERT_FILE="/etc/etcd/server.crt" 10 | ETCD_KEY_FILE="/etc/etcd/server.key" 11 | ETCD_PEER_CLIENT_CERT_AUTH=true 12 | ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/etcd-ca.crt" 13 | ETCD_PEER_KEY_FILE="/etc/etcd/server.key" 14 | ETCD_PEER_CERT_FILE="/etc/etcd/server.crt" 15 | ETCD_DATA_DIR="/var/lib/etcd" 16 | ETCD_SNAPSHOT_COUNT="10000" 17 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/02-External-Etcd/configs/etcd2.conf: -------------------------------------------------------------------------------- 1 | ETCD_NAME=etcd2 2 | ETCD_LISTEN_PEER_URLS="https://10.0.50.42:2380" 3 | ETCD_LISTEN_CLIENT_URLS="https://10.0.50.42:2379" 4 | ETCD_INITIAL_CLUSTER_TOKEN="etcdclstr" 5 | ETCD_INITIAL_CLUSTER="etcd1=https://10.0.50.41:2380,etcd2=https://10.0.50.42:2380,etcd3=https://10.0.50.43:2380" 6 | ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.50.42:2380" 7 | ETCD_ADVERTISE_CLIENT_URLS="https://10.0.50.42:2379" 8 | ETCD_TRUSTED_CA_FILE="/etc/etcd/etcd-ca.crt" 9 | ETCD_CERT_FILE="/etc/etcd/server.crt" 10 | ETCD_KEY_FILE="/etc/etcd/server.key" 11 | ETCD_PEER_CLIENT_CERT_AUTH=true 12 | ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/etcd-ca.crt" 13 | ETCD_PEER_KEY_FILE="/etc/etcd/server.key" 14 | ETCD_PEER_CERT_FILE="/etc/etcd/server.crt" 15 | ETCD_DATA_DIR="/var/lib/etcd" 16 | ETCD_SNAPSHOT_COUNT="10000" 17 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/02-External-Etcd/configs/etcd3.conf: -------------------------------------------------------------------------------- 1 | ETCD_NAME=etcd3 2 | ETCD_LISTEN_PEER_URLS="https://10.0.50.43:2380" 3 | ETCD_LISTEN_CLIENT_URLS="https://10.0.50.43:2379" 4 | ETCD_INITIAL_CLUSTER_TOKEN="etcdclstr" 5 | ETCD_INITIAL_CLUSTER="etcd1=https://10.0.50.41:2380,etcd2=https://10.0.50.42:2380,etcd3=https://10.0.50.43:2380" 6 | ETCD_INITIAL_ADVERTISE_PEER_URLS="https://10.0.50.43:2380" 7 | ETCD_ADVERTISE_CLIENT_URLS="https://10.0.50.43:2379" 8 | ETCD_TRUSTED_CA_FILE="/etc/etcd/etcd-ca.crt" 9 | ETCD_CERT_FILE="/etc/etcd/server.crt" 10 | ETCD_KEY_FILE="/etc/etcd/server.key" 11 | ETCD_PEER_CLIENT_CERT_AUTH=true 12 | ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/etcd-ca.crt" 13 | ETCD_PEER_KEY_FILE="/etc/etcd/server.key" 14 | ETCD_PEER_CERT_FILE="/etc/etcd/server.crt" 15 | ETCD_DATA_DIR="/var/lib/etcd" 16 | ETCD_SNAPSHOT_COUNT="10000" 17 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/03-High-Availability/configs/check_apiserver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | errorExit() { 4 | echo "*** $*" 1>&2 5 | exit 1 6 | } 7 | 8 | curl --silent --max-time 2 --insecure https://localhost:6443/ -o /dev/null || errorExit "Error GET https://localhost:6443/" 9 | if ip addr | grep -q 10.0.50.64; then 10 | curl --silent --max-time 2 --insecure https://10.0.50.64:6443/ -o /dev/null || errorExit "Error GET https://10.0.50.64:6443/" 11 | fi 12 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/03-High-Availability/configs/keepalived_backup.conf: -------------------------------------------------------------------------------- 1 | ! /etc/keepalived/keepalived.conf 2 | ! Configuration File for keepalived 3 | global_defs { 4 | router_id LVS_DEVEL 5 | } 6 | vrrp_script check_apiserver { 7 | script "/etc/keepalived/check_apiserver.sh" 8 | interval 3 9 | weight -2 10 | fall 10 11 | rise 2 12 | } 13 | 14 | vrrp_instance VI_1 { 15 | state BACKUP 16 | interface ens18 17 | virtual_router_id 51 18 | priority 100 19 | authentication { 20 | auth_type PASS 21 | auth_pass 42 22 | } 23 | virtual_ipaddress { 24 | 10.0.50.64 25 | } 26 | track_script { 27 | check_apiserver 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/03-High-Availability/configs/keepalived_master.conf: -------------------------------------------------------------------------------- 1 | ! /etc/keepalived/keepalived.conf 2 | ! Configuration File for keepalived 3 | global_defs { 4 | router_id LVS_DEVEL 5 | } 6 | vrrp_script check_apiserver { 7 | script "/etc/keepalived/check_apiserver.sh" 8 | interval 3 9 | weight -2 10 | fall 10 11 | rise 2 12 | } 13 | 14 | vrrp_instance VI_1 { 15 | state MASTER 16 | interface ens18 17 | virtual_router_id 51 18 | priority 101 19 | authentication { 20 | auth_type PASS 21 | auth_pass 42 22 | } 23 | virtual_ipaddress { 24 | 10.0.50.64 25 | } 26 | track_script { 27 | check_apiserver 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/04-Kubernetes/configs/99-kubernetes-cri.conf: -------------------------------------------------------------------------------- 1 | net.bridge.bridge-nf-call-iptables = 1 2 | net.bridge.bridge-nf-call-ip6tables = 1 3 | net.ipv4.ip_forward = 1 4 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/04-Kubernetes/configs/containerd.conf: -------------------------------------------------------------------------------- 1 | overlay 2 | br_netfilter 3 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/04-Kubernetes/configs/custom-resources.yaml: -------------------------------------------------------------------------------- 1 | # This section includes base Calico installation configuration. 2 | # For more information, see: https://projectcalico.docs.tigera.io/v3.23/reference/installation/api#operator.tigera.io/v1.Installation 3 | apiVersion: operator.tigera.io/v1 4 | kind: Installation 5 | metadata: 6 | name: default 7 | spec: 8 | # Configures Calico networking. 9 | calicoNetwork: 10 | # Note: The ipPools section cannot be modified post-install. 11 | ipPools: 12 | - blockSize: 26 13 | cidr: 172.16.0.0/12 14 | encapsulation: VXLANCrossSubnet 15 | natOutgoing: Enabled 16 | nodeSelector: all() 17 | 18 | --- 19 | 20 | # This section configures the Calico API server. 21 | # For more information, see: https://projectcalico.docs.tigera.io/v3.23/reference/installation/api#operator.tigera.io/v1.APIServer 22 | apiVersion: operator.tigera.io/v1 23 | kind: APIServer 24 | metadata: 25 | name: default 26 | spec: {} 27 | 28 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/04-Kubernetes/configs/kubeadm-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kubeadm.k8s.io/v1beta3 3 | kind: ClusterConfiguration 4 | kubernetesVersion: stable 5 | controlPlaneEndpoint: "k8cp.urbaman.it:6443" # change this (see below) 6 | networking: 7 | podSubnet: "172.16.0.0/12" # change this (see below) 8 | etcd: 9 | external: 10 | endpoints: 11 | - https://k8cp.urbaman.it:2379 # change ETCD_0_IP appropriately 12 | caFile: /etc/kubernetes/pki/etcd/ca.crt 13 | certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt 14 | keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key 15 | -------------------------------------------------------------------------------- /Kubernetes/Cluster/04-Microk8s/yamls/microk8s.yaml: -------------------------------------------------------------------------------- 1 | # microk8s-config.yaml 2 | --- 3 | # Set the cluster and services CIDRs 4 | # 'extraKubeProxyArgs' is extra arguments to add to the local node kube-proxy. 5 | # Set a value to null to remove it from the arguments. 6 | version: 0.2.0 7 | addons: 8 | - name: dns 9 | args: [10.0.50.3,10.100.0.10] 10 | - name: ha-cluster 11 | - name: helm 12 | - name: helm3 13 | extraCNIEnv: 14 | IPv4_CLUSTER_CIDR: "10.50.0.0/16" 15 | IPv4_SERVICE_CIDR: "10.100.0.0/16" 16 | extraSANs: 17 | - 10.100.0.1 18 | extraKubeAPIServerArgs: 19 | --etcd-servers: https://etcd.urbaman.it:2379 20 | --etcd-cafile: ${SNAP_DATA}/certs/etcd-cluster-ca.crt 21 | --etcd-certfile: ${SNAP_DATA}/certs/etcd-cluster-client.crt 22 | --etcd-keyfile: ${SNAP_DATA}/certs/etcd-cluster-client.key 23 | --bind-address: 0.0.0.0 -------------------------------------------------------------------------------- /Kubernetes/Cluster/readme.md: -------------------------------------------------------------------------------- 1 | # Creating the Cluster 2 | 3 | 1. [Preparing the machines](https://github.com/urbaman/HomeLab/tree/main/Kubernetes/Cluster/01-Prepare-Machines) 4 | 2. [External Etcd](https://github.com/urbaman/HomeLab/tree/main/Kubernetes/Cluster/02-External-Etcd) 5 | 3. [High Availability](https://github.com/urbaman/HomeLab/tree/main/Kubernetes/Cluster/03-High-Availability) 6 | 4. [Kubernetes](https://github.com/urbaman/HomeLab/tree/main/Kubernetes/Cluster/04-Kubernetes) 7 | -------------------------------------------------------------------------------- /Kubernetes/Coding/CodeServer/readme.md: -------------------------------------------------------------------------------- 1 | # Installing Code-Server 2 | 3 | Deploy the yaml file 4 | 5 | ```bash 6 | kubectl apply -f code-server-deploy.yaml 7 | ``` 8 | -------------------------------------------------------------------------------- /Kubernetes/Collabora/readme.md: -------------------------------------------------------------------------------- 1 | # Collabora Online installation 2 | 3 | Create the namespace and a secret with collabora online username and password, install the helm chart with the provided collabora-values.yaml values file, then apply the traefik ig-collabora.yaml 4 | 5 | ```bash 6 | kubectl create namespace collabora-online 7 | kubectl create secret generic -n collabora-online collabora-auth \ 8 | --from-literal=username='USERNAME' \ 9 | --from-literal=password='PASSWORD' 10 | helm repo add collabora https://collaboraonline.github.io/online/ 11 | helm repo update 12 | helm show values collabora/collabora-online > collabora-values.yaml 13 | ``` 14 | 15 | Set the values you want or need (probaly resources requests and limits), see example. 16 | 17 | ```bash 18 | helm upgrade -i --create-namespace --namespace collabora-online collabora-online collabora/collabora-online -f collabora-values.yaml 19 | kubectl apply -f ig-collabora.yaml 20 | ``` 21 | -------------------------------------------------------------------------------- /Kubernetes/Collabora/yamls/collabora-values.yaml: -------------------------------------------------------------------------------- 1 | collabora: 2 | 3 | extra_params: --o:ssl.enable=false --o:ssl.termination=true 4 | 5 | existingSecret: 6 | enabled: true 7 | secretName: "collabora-auth" 8 | usernameKey: "username" 9 | passwordKey: "password" 10 | 11 | #resources: 12 | # limits: 13 | # cpu: "1800m" 14 | # memory: "2000Mi" 15 | # requests: 16 | # cpu: "1800m" 17 | # memory: "2000Mi" 18 | 19 | replicaCount: 1 20 | 21 | autoscaling: 22 | enabled: false 23 | # minReplicas: 1 24 | # maxReplicas: 9 25 | # targetCPUUtilizationPercentage: 70 26 | # targetMemoryUtilizationPercentage: 50 27 | 28 | prometheus: 29 | servicemonitor: 30 | enabled: true 31 | labels: 32 | release: "kube-prometheus-stack" 33 | rules: 34 | enabled: true 35 | additionalLabels: 36 | release: "kube-prometheus-stack" 37 | grafana: 38 | dashboards: 39 | enabled: true -------------------------------------------------------------------------------- /Kubernetes/CommonCommands/readme.md: -------------------------------------------------------------------------------- 1 | # Common commands (Linux) 2 | 3 | ## Export a given yaml key repetitions to a file 4 | 5 | ```bash 6 | sed -rn 's/.*"(expr)": (.*)/\1 \2/p' prova.json 7 | ``` 8 | 9 | ## Get pods in "Non-Ready" status 10 | 11 | ```bash 12 | kubectl get pods --field-selector="status.phase!=Succeeded,status.phase!=Running" -A 13 | ``` 14 | -------------------------------------------------------------------------------- /Kubernetes/Composecraft/readme.md: -------------------------------------------------------------------------------- 1 | # Composecraft deployment 2 | 3 | ## Prerequisites 4 | 5 | You need a MongoDB up and running 6 | 7 | ## Deployment 8 | 9 | Get the root MongoDB password and define the mongodb-uri (mongodb://root:password@mongodb.mongodb.svc.cluster.local), then generate a random 64 length secret key, and put them in the secret value 10 | 11 | ```bash 12 | export LC_CTYPE=C; cat /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c 64; echo 13 | echo -n '' | base64 14 | echo -n '' | base64 15 | echo -n 'true' | base64 16 | echo -n 'http://localhot:3000' | base64 17 | ``` 18 | 19 | ```bash 20 | kubectl apply -f composecraft.yaml 21 | ``` 22 | -------------------------------------------------------------------------------- /Kubernetes/Computing/Boinc/readme.md: -------------------------------------------------------------------------------- 1 | # Boinc Installation 2 | 3 | Apply the deploy manifest 4 | 5 | ```bash 6 | kubectl apply -f boinc-deploy.yaml 7 | ``` 8 | -------------------------------------------------------------------------------- /Kubernetes/Cyberchef/readme.md: -------------------------------------------------------------------------------- 1 | # Install Cyberchef 2 | 3 | ```bash 4 | helm repo add cyberchef https://tamcore.github.io/cyberchef 5 | helm repo update 6 | helm upgrade -i -n cyberchef --create-namespace cyberchef cyberchef/cyberchef 7 | kubectl apply -f ig-cyberchef.yaml 8 | ``` 9 | -------------------------------------------------------------------------------- /Kubernetes/Dashboard/yamls/k8dashcrb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: admin-user 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: admin-user 12 | namespace: kubernetes-dashboard 13 | -------------------------------------------------------------------------------- /Kubernetes/Dashboard/yamls/k8dashsa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kubernetes-dashboard 6 | -------------------------------------------------------------------------------- /Kubernetes/Database/CloudBeaver/readme.md: -------------------------------------------------------------------------------- 1 | # Cloudbeaver installation 2 | 3 | ## Installation 4 | 5 | Create and substitute the Environment Variables in the secret: 6 | 7 | ```bash 8 | echo -n '' | base64 9 | echo -n '' | base64 10 | echo -n '' | base64 11 | echo -n '' | base64 12 | ``` 13 | 14 | Deploy the proposed yaml after checking the domains and other specs 15 | 16 | ```bash 17 | kubectl apply -f cloudbeaver.yaml 18 | ``` 19 | -------------------------------------------------------------------------------- /Kubernetes/Database/Couchdb/readme.md: -------------------------------------------------------------------------------- 1 | # Installation 2 | 3 | Add the repo and install 4 | 5 | ```bash 6 | helm repo add couchdb https://apache.github.io/couchdb-helm 7 | helm repo update 8 | helm upgrade -i couchdb couchdb/couchdb --set persistentVolume.enabled=true --set persistentVolume.storageClass=rook-ceph-nvme2tb --set couchdbConfig.chttpd.require_valid_user=true --set prometheusPort.enabled=true --set couchdbConfig.couchdb.uuid=90486a5d-b089-4356-8c1a-4f99fe63cb13 --set labels.app.kubernetes.io/name=couchdb --namespace couchdb --create-namespace 9 | ``` 10 | 11 | Get the password for the admin user 12 | 13 | ```bash 14 | kubectl get secret -n couchdb couchdb-couchdb -o go-template='{{ .data.adminPassword }}' | base64 -d 15 | ``` 16 | 17 | Connect to the Fauxton GUI to check the cluster and begin using it: [http://couchdb-couchdb.couchdb.svc.cluster.local:5984/_utils/#/](http://couchdb-couchdb.couchdb.svc.cluster.local:5984/_utils/#/) 18 | 19 | Add Traefik: 20 | 21 | ```bash 22 | kubectl apply -n cluchdb-traefik.yaml 23 | ``` 24 | 25 | Now you can connect to couchdb.domain.com to contact the cluster 26 | -------------------------------------------------------------------------------- /Kubernetes/Database/Dbgate/readme.md: -------------------------------------------------------------------------------- 1 | # Dbgate installation 2 | 3 | ## Installation 4 | 5 | Deploy the yaml file 6 | 7 | ```bash 8 | kubectl apply -f dbgate.yaml 9 | ``` 10 | -------------------------------------------------------------------------------- /Kubernetes/Database/Mariadb/yamls/ig-mariadb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: IngressRouteTCP 3 | metadata: 4 | name: traefik-mariadb 5 | namespace: mariadb 6 | spec: 7 | entryPoints: 8 | - mysql 9 | routes: 10 | - match: HostSNI(`*`) 11 | services: 12 | - name: proxysql 13 | port: 6033 -------------------------------------------------------------------------------- /Kubernetes/Database/Memcached/readme.md: -------------------------------------------------------------------------------- 1 | # Install memcached 2 | 3 | Install the helm chart, customizing the settings (we add high-availability, metrics and persistence) 4 | 5 | ```bash 6 | helm upgrade -i memcached -n memcached --create-namespace oci://registry-1.docker.io/bitnamicharts/memcached --set architecture=high-availability --set autoscaling.enabled=true --set persistence.enabled=true --set persistence.storageClass=rook-ceph-nvme2tb --set metrics.enabled=true --set metrics.serviceMonitor.enabled=true --set metrics.serviceMonitor.labels.release=kube-prometheus-stack --set metrics.resources.requests.cpu=250m --set metrics.resources.requests.memory=256Mi 7 | ``` 8 | 9 | Memcached can be accessed via port 11211 on the following DNS name from within your cluster: `memcached.memcached.svc.cluster.local` 10 | 11 | ## Expose through Traefik 12 | 13 | Define an entrypoint for memcached (port 11211) in Traefik, then deploy the `ig-memcached.yaml` file 14 | -------------------------------------------------------------------------------- /Kubernetes/Database/Memcached/yamls/ig-memcached.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: IngressRouteTCP 3 | metadata: 4 | name: traefik-memcached 5 | namespace: memcached 6 | spec: 7 | entryPoints: 8 | - memcached 9 | routes: 10 | - match: HostSNI(`*`) 11 | services: 12 | - name: memcached 13 | port: 11211 14 | -------------------------------------------------------------------------------- /Kubernetes/Database/Mongodb/readme.md: -------------------------------------------------------------------------------- 1 | # MongoDB installation 2 | 3 | ## Prerequisites 4 | 5 | You need the AVX feature enabled for your CPU 6 | 7 | ```bash 8 | cat /proc/cpuinfo | grep avx 9 | ``` 10 | 11 | ## Prepare 12 | 13 | Get the helm values and customize 14 | 15 | ```bash 16 | helm show values oci://registry-1.docker.io/bitnamicharts/mongodb > mongodb-values.yaml 17 | vi mongodb-values.yaml 18 | ``` 19 | 20 | Set persistence, resourcesPreset, metrics and servicemonitor values. 21 | 22 | ```yaml 23 | persistence: 24 | storageClass: "ceph-rbd" 25 | metrics: 26 | enabled: true 27 | serviceMonitor: 28 | enabled: true 29 | labels: kube-prometheus-stack 30 | resourcesPreset: "small" 31 | ``` 32 | 33 | ## Install 34 | 35 | ```bash 36 | helm upgrade -i mongodb -n mongodb --create-namespace oci://registry-1.docker.io/bitnamicharts/mongodb -f mongodb-values.yaml 37 | ``` 38 | 39 | ## Connect 40 | 41 | Get the root password, and use it to connect with the root user 42 | 43 | ```bash 44 | kubectl get secret --namespace mongodb mongodb -o jsonpath="{.data.mongodb-root-password}" | base64 -d 45 | ``` 46 | 47 | ## Upgrade 48 | 49 | ```bash 50 | helm upgrade -i mongodb -n mongodb --create-namespace oci://registry-1.docker.io/bitnamicharts/mongodb -f mongodb-values.yaml --set auth.rootPassword= 51 | ``` 52 | -------------------------------------------------------------------------------- /Kubernetes/Database/Mysql/yamls/ig-mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: IngressRouteTCP 3 | metadata: 4 | name: traefik-mysql 5 | namespace: mysql 6 | spec: 7 | entryPoints: 8 | - mysql-mariadb 9 | routes: 10 | - match: HostSNI(`*`) 11 | services: 12 | - name: proxysql 13 | port: 6033 -------------------------------------------------------------------------------- /Kubernetes/Database/Postgresql/yamls/ig-postgresql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: IngressRouteTCP 3 | metadata: 4 | name: traefik-postgresql 5 | namespace: postgresql 6 | spec: 7 | entryPoints: 8 | - postgresql 9 | routes: 10 | - match: HostSNI(`*`) 11 | services: 12 | - name: postgresql 13 | port: 5432 14 | -------------------------------------------------------------------------------- /Kubernetes/Database/PostgresqlHA/yamls/ig-postgresql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.io/v1alpha1 2 | kind: IngressRouteTCP 3 | metadata: 4 | name: traefik-postgresql 5 | namespace: postgresql 6 | spec: 7 | entryPoints: 8 | - postgresql 9 | routes: 10 | - match: HostSNI(`*`) 11 | services: 12 | - name: postgresql-postgresql-ha-pgpool 13 | port: 5432 14 | -------------------------------------------------------------------------------- /Kubernetes/Database/Redis/yamls/ig-redis.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: IngressRouteTCP 3 | metadata: 4 | name: traefik-redis 5 | namespace: redis 6 | spec: 7 | entryPoints: 8 | - redis 9 | routes: 10 | - match: HostSNI(`*`) 11 | services: 12 | - name: redis-master 13 | port: 6379 -------------------------------------------------------------------------------- /Kubernetes/Database/readme.md: -------------------------------------------------------------------------------- 1 | # Expose DBs with Traefik 2 | 3 | The majority of DBs (postgres, mariadb, ...)to not interact via HTTP(s), but need TCP. 4 | 5 | - Create an appropriate entrypoint in the Traefik deployment with the desired port 6 | - Create a simple IngressRouteTCP on that entrypoint going to the needed DB 7 | - Be aware: you'll need TLS with SNI to recognize the host and use the same port for different DBs. Alternative: an entrypoint for each DB and ('*') as HostSNI. 8 | -------------------------------------------------------------------------------- /Kubernetes/Descheduler/readme.md: -------------------------------------------------------------------------------- 1 | # Running Descheduler 2 | 3 | Descheduler helps keeping the cluster clean and balanced between nodes. 4 | 5 | Look through the descheduler-values.yaml file for the plugin settings, [following the user guide](https://github.com/kubernetes-sigs/descheduler?tab=readme-ov-file#user-guide), then deploy with helm. 6 | 7 | ```bash 8 | helm repo add descheduler https://kubernetes-sigs.github.io/descheduler/ 9 | helm repo update 10 | helm upgrade -i descheduler -n kube-system descheduler/descheduler -f descheduler-values.yaml 11 | ``` 12 | -------------------------------------------------------------------------------- /Kubernetes/Drawio/readme.md: -------------------------------------------------------------------------------- 1 | # Install Draw.io 2 | 3 | Run the yaml file and go to drawio.domain.com 4 | 5 | ```bash 6 | kubectl apply -f drawio.yaml 7 | ``` 8 | -------------------------------------------------------------------------------- /Kubernetes/FireflyIII/readme.md: -------------------------------------------------------------------------------- 1 | # Firefly III installation with data importer 2 | 3 | After having deployed postgresql with a dedicated db (eventually called firefly), create a pv and pvc, size 10Gi 4 | 5 | Generate a random 32 character key to set as APP_KEY: 6 | 7 | ```bash 8 | head /dev/urandom | LC_ALL=C tr -dc 'A-Za-z0-9' | head -c 32 && echo 9 | ``` 10 | 11 | Deploy the yaml file setting the APP_KEY value and the Postgresql db values. 12 | 13 | ```bash 14 | kubectl apply -f fireflyIII.yaml 15 | ``` 16 | 17 | Login and create a Personal Access Token, and set the variables in fireflyIIIimporter.yaml, then deploy. 18 | 19 | ```bash 20 | kubectl apply -f fireflyIIIimporter.yaml 21 | ``` 22 | -------------------------------------------------------------------------------- /Kubernetes/FreshRSS/readme.md: -------------------------------------------------------------------------------- 1 | # FreshRSS Installation 2 | 3 | Apply the deployment 4 | 5 | ```bash 6 | kubectl apply -f freshrss.yaml 7 | ``` 8 | -------------------------------------------------------------------------------- /Kubernetes/Groundcover/yamls/groundcover-values.yaml: -------------------------------------------------------------------------------- 1 | agent: 2 | # Obfuscation 3 | alligator: 4 | obfuscateData: true 5 | 6 | clickhouse: 7 | # logs storage 8 | persistence: 9 | storageClass: longhorn-nvme 10 | size: 128Gi 11 | 12 | victoria-metrics-single: 13 | # metrics storage 14 | server: 15 | persistentVolume: 16 | storageClass: longhorn-nvme 17 | size: 100Gi 18 | -------------------------------------------------------------------------------- /Kubernetes/Helm/readme.md: -------------------------------------------------------------------------------- 1 | # Helm 2 | 3 | ## From Apt (Debian/Ubuntu) 4 | 5 | Members of the Helm community have contributed a Helm package for Apt. This package is generally up to date. 6 | 7 | > Beware: this repo is not usable in unattended-upgrades, it's best to use snap even if the package is generally less up to date 8 | 9 | ```bash 10 | curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null 11 | sudo apt-get install apt-transport-https --yes 12 | echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list 13 | sudo apt-get update 14 | sudo apt-get install helm 15 | ``` 16 | 17 | ## From Snap 18 | 19 | The Snapcrafters community maintains the Snap version of the Helm package: 20 | 21 | ```bash 22 | sudo snap install helm --classic 23 | ``` 24 | -------------------------------------------------------------------------------- /Kubernetes/Homepage/images/gethomepage.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Kubernetes/Homepage/images/gethomepage.png -------------------------------------------------------------------------------- /Kubernetes/Homer/assets/tools/datree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Kubernetes/Homer/assets/tools/datree.png -------------------------------------------------------------------------------- /Kubernetes/Homer/assets/tools/dell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Kubernetes/Homer/assets/tools/dell.png -------------------------------------------------------------------------------- /Kubernetes/Homer/assets/tools/haproxy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Kubernetes/Homer/assets/tools/haproxy.png -------------------------------------------------------------------------------- /Kubernetes/Homer/assets/tools/longhorn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Kubernetes/Homer/assets/tools/longhorn.png -------------------------------------------------------------------------------- /Kubernetes/Homer/assets/tools/portainer.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Kubernetes/Homer/assets/tools/prometheus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Kubernetes/Homer/assets/tools/prometheus.png -------------------------------------------------------------------------------- /Kubernetes/Homer/assets/tools/proxmox.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Kubernetes/Homer/assets/tools/proxmox.png -------------------------------------------------------------------------------- /Kubernetes/Homer/assets/tools/uptimekuma.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /Kubernetes/Homer/readme.md: -------------------------------------------------------------------------------- 1 | # Homer dashboard installation 2 | 3 | ## Preparation 4 | 5 | Create a Longhorn Volume, and the required homer.domain.com DNS entry both internally (we use pfSense) and externally (we use cloudflare) 6 | ## Deploy 7 | 8 | Run the yaml file to deploy Homer. 9 | 10 | ```bash 11 | kubectl apply -f homer.yaml 12 | ``` 13 | 14 | ## Config 15 | 16 | Run `kubectl cp` to put logos into the assets/tools directory inside the pod 17 | 18 | ```bash 19 | kubectl cp logo.png namespace/pod:/www/assets/tools 20 | ``` 21 | 22 | Run kubectl exec to get inside the pod console and edit the config, to add apps and whatever. 23 | 24 | ```bash 25 | kubectl exec -it -n namespace pod -- /bin/sh 26 | /www $ vi assets/config.yml 27 | ``` 28 | 29 | You can also use `kubectl cp` to extract the config.yml file, edit it outside of the pod, and copy it back in. 30 | 31 | ```bash 32 | kubectl cp namespace/pod:/www/assets/config.yml /full/local/path 33 | ``` 34 | -------------------------------------------------------------------------------- /Kubernetes/Hosting/readme.md: -------------------------------------------------------------------------------- 1 | # Using Traefik in front of virtualmin 2 | 3 | Apply the yaml after having customized the domains in it. 4 | 5 | ```bash 6 | kubectl apply -f ig-hosting.yaml 7 | ``` 8 | -------------------------------------------------------------------------------- /Kubernetes/Intel-GPU/readme.md: -------------------------------------------------------------------------------- 1 | # Install and use Intel GPUs 2 | 3 | Prerequisites: 4 | 5 | - Cert manager 6 | - Node Feature Discovery 7 | 8 | **Note:** do not use the microk8s kubectl command, as it's not compliant with the version management of the git needed by the following apply commands. 9 | 10 | Install NFD rules: 11 | 12 | ```bash 13 | kubectl apply -k 'https://github.com/intel/intel-device-plugins-for-kubernetes/deployments/nfd/overlays/node-feature-rules?ref=v0.31.0' 14 | ``` 15 | 16 | Deploy the plugin operator and the GPU custom resource (sample below): 17 | 18 | ```bash 19 | kubectl apply -k 'https://github.com/intel/intel-device-plugins-for-kubernetes/deployments/operator/default?ref=v0.31.0' 20 | kubectl apply -f https://raw.githubusercontent.com/intel/intel-device-plugins-for-kubernetes/main/deployments/operator/samples/deviceplugin_v1_gpudeviceplugin.yaml 21 | ``` 22 | 23 | ```yaml 24 | apiVersion: deviceplugin.intel.com/v1 25 | kind: GpuDevicePlugin 26 | metadata: 27 | name: gpudeviceplugin-sample 28 | spec: 29 | image: intel/intel-gpu-plugin:0.31.0 30 | sharedDevNum: 10 31 | logLevel: 4 32 | enableMonitoring: true 33 | nodeSelector: 34 | intel.feature.node.kubernetes.io/gpu: "true" 35 | ``` 36 | -------------------------------------------------------------------------------- /Kubernetes/It-tools/readme.md: -------------------------------------------------------------------------------- 1 | # It-tools self-hosting 2 | 3 | Deploy the yaml file 4 | 5 | ```bash 6 | kubectl apply -f it-tools.yaml 7 | ``` 8 | -------------------------------------------------------------------------------- /Kubernetes/Karakeep/readme.md: -------------------------------------------------------------------------------- 1 | # Karakeep installation 2 | 3 | Generate two random strings and encode them in base64 with 4 | 5 | ```bash 6 | openssl rand -base64 36 | base64 7 | ``` 8 | 9 | Put them in the `NEXTAUTH_SECRET` and `MEILI_MASTER_KEY` env values in the secret, then apply the deployment 10 | 11 | ```bash 12 | kubectl apply -f karakeep.yaml 13 | ``` 14 | -------------------------------------------------------------------------------- /Kubernetes/Kubectl/readme.md: -------------------------------------------------------------------------------- 1 | # Kubectl tips 2 | 3 | Get all pods in a namespace with a given state 4 | 5 | ```bash 6 | kubectl get pod -n --field-selector status.phase= 7 | ``` 8 | 9 | Delete all pods in a namespace with a given state 10 | 11 | ```bash 12 | kubectl delete pod -n --field-selector status.phase= 13 | ``` 14 | 15 | Get pods not ready in a namespace 16 | 17 | ```bash 18 | kubectl get pods -n | grep "0/" 19 | ``` 20 | 21 | Add a counter to the previous commands 22 | 23 | ```bash 24 | | wc -l 25 | ``` 26 | -------------------------------------------------------------------------------- /Kubernetes/Kubetail/readme.md: -------------------------------------------------------------------------------- 1 | # Kubetail installation 2 | 3 | ```bash 4 | helm repo add kubetail https://kubetail-org.github.io/helm-charts/ 5 | helm repo update 6 | helm upgrade -i kubetail kubetail/kubetail --namespace kubetail --create-namespace 7 | kubectl apply -f ig-kubetail.yaml 8 | ``` 9 | -------------------------------------------------------------------------------- /Kubernetes/LocalAI/readme.md: -------------------------------------------------------------------------------- 1 | # Install Local-ai 2 | 3 | Select the image (no GPU, Nvidia, Intel, ...) from here: 4 | 5 | - [All in one images - with pre-installed models](https://localai.io/basics/container/#all-in-one-images) 6 | - [Standard images - without pre-installed models](https://localai.io/basics/container/#standard-container-images) 7 | 8 | ## Deploy the helm chart 9 | 10 | ```bash 11 | helm repo add go-skynet https://go-skynet.github.io/helm-charts/ 12 | helm repo update 13 | helm show values go-skynet/local-ai > localai-values.yaml 14 | ``` 15 | 16 | Define the image, persistence, resources, node-selector, or anything else, then deploy the helm chart and the ig 17 | 18 | ```bash 19 | vi localai-values.yaml 20 | helm upgrade -i -n local-ai --create-namespace local-ai go-skynet/local-ai -f localai-values.yaml 21 | kubectl apply -f ig-localai.yaml 22 | ``` 23 | -------------------------------------------------------------------------------- /Kubernetes/Metallb/yamls/metallb-pool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: metallb-pool 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 10.0.50.100-10.0.50.199 9 | --- 10 | apiVersion: metallb.io/v1beta1 11 | kind: L2Advertisement 12 | metadata: 13 | name: metallb-l2-advertisement 14 | namespace: metallb-system 15 | -------------------------------------------------------------------------------- /Kubernetes/Node-Feature-Discovery/readme.md: -------------------------------------------------------------------------------- 1 | # Node feature discovery installation 2 | 3 | ## Install node feature discovery 4 | 5 | Get the repo 6 | 7 | ```bash 8 | helm repo add nfd https://kubernetes-sigs.github.io/node-feature-discovery/charts 9 | helm repo update 10 | ``` 11 | 12 | Eventually modify the values enabling the topology updater with its CRDs 13 | 14 | ```bash 15 | helm show values nfd/node-feature-discovery > nodefeaturediscovery.yaml 16 | vi nodefeaturediscovery.yaml 17 | ``` 18 | 19 | Install 20 | 21 | ```bash 22 | helm upgrade -i nfd nfd/node-feature-discovery --namespace node-feature-discovery --create-namespace [-f nodefeaturediscovery.yaml] 23 | ``` 24 | 25 | ## Install GPU feature discovery 26 | 27 | Get the repo and install (nfd is a prerequisite) 28 | 29 | ```bash 30 | helm repo add nvgfd https://nvidia.github.io/gpu-feature-discovery 31 | helm repo update 32 | helm upgrade -i nvgfd nvgfd/gpu-feature-discovery --namespace gpu-feature-discovery --create-namespace 33 | ``` 34 | 35 | ## Check the discovery 36 | 37 | ```bash 38 | kubectl get no -o json | jq .items[].metadata.labels 39 | ``` 40 | -------------------------------------------------------------------------------- /Kubernetes/Openproject/readme.md: -------------------------------------------------------------------------------- 1 | # Install Openproject 2 | 3 | Create a postgresql db and user. 4 | 5 | ```bash 6 | helm repo add openproject https://charts.openproject.org 7 | helm repo update 8 | helm shaw values openproject/openproject > openproject-values.yaml 9 | vi openproject-values.yaml 10 | ``` 11 | 12 | Fill the values with your custom settings (SMTP, postgresql, memcached, ...) 13 | 14 | ```bash 15 | helm upgrade -i openproject -n openproject --create-namespace openproject/openproject -f openproject-values.yaml 16 | kubectl apply -f ig-openproject 17 | ``` 18 | -------------------------------------------------------------------------------- /Kubernetes/Paperless-ngx/readme.md: -------------------------------------------------------------------------------- 1 | # Install Paperless-ngx 2 | 3 | Create a paperless db in postgresql (or mysql/mariadb, changing the environment variables) with user paperless, define the env variables in the secret, then apply the deployment 4 | 5 | ```bash 6 | kubectl apply -f paperless-ngx.yaml 7 | ``` 8 | -------------------------------------------------------------------------------- /Kubernetes/Portainer/readme.md: -------------------------------------------------------------------------------- 1 | # Portainer 2 | 3 | To deploy Portainer, add the helm repo 4 | 5 | ```bash 6 | helm repo add portainer https://portainer.github.io/k8s/ 7 | helm repo update 8 | helm show values portainer/portainer > portainer-values.yaml 9 | ``` 10 | 11 | Set the service.type to ClusterIP, the tls.force=true and the persistence.storageclass to rook-ceph-nvme2tb (or whatever storageclass you're using) 12 | 13 | ```bash 14 | helm upgrade -i portainer portainer/portainer --create-namespace --namespace portainer -f portainer-values.yaml 15 | ``` 16 | 17 | Or set the values directly 18 | 19 | ```bash 20 | helm upgrade -i portainer portainer/portainer --create-namespace --namespace portainer --set service.type=ClusterIP --set tls.force=true --set persistence.storageclass=rook-ceph-nvme2tb 21 | ``` 22 | 23 | Go directly to the dashboard to create a user and login, or the instance will need to be restarted. 24 | 25 | ```bash 26 | kubectl port-forward svc/portainer -n portainer 9443 27 | ``` 28 | 29 | ```bash 30 | http://localhost:9443 31 | ``` 32 | 33 | Inside the dashboard, select the local environment, and go to to the Cluster/Setup page. 34 | 35 | Here, define a "traefik" ingress class of type traefik, and enable "Enable features using the metrics API" (only if you installed the metrics server!). Save. 36 | 37 | ## Traefik exposure (admin portaineradmin) 38 | 39 | ```bash 40 | kubectl apply -f ig-portainer.yaml 41 | ``` 42 | -------------------------------------------------------------------------------- /Kubernetes/Privatebin/readme.md: -------------------------------------------------------------------------------- 1 | # Install Privatebin 2 | 3 | Deploy 4 | 5 | ```bash 6 | kubectl apply -f privatebin.yaml 7 | ``` 8 | 9 | Go to the config PVC or to the /srv/cfg directory of the pod to create the `conf.php` file, then modify the configuration at will 10 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Authentik/readme.md: -------------------------------------------------------------------------------- 1 | # Authentik monitoring 2 | 3 | Set metrics and servicemonitor to true in the helm chart values, then add the grafana dashboard 4 | 5 | ```bash 6 | kubectl create configmap grafana-dashboard-authentik -n monitoring --from-file=grafana-authentik.json 7 | kubectl label configmap grafana-dashboard-authentik -n monitoring grafana_dashboard="1" 8 | ``` 9 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Automation/ArgoCD/readme.md: -------------------------------------------------------------------------------- 1 | # ArgoCD Monitoring 2 | 3 | Enable metrics and serviceMonitors. 4 | 5 | ```bash 6 | kubectl create configmap grafana-dashboard-argocd -n monitoring --from-file=grafana-argocd.json 7 | kubectl label configmap grafana-dashboard-argocd -n monitoring grafana_dashboard="1" 8 | kubectl create configmap grafana-dashboard-argocd-application-overview -n monitoring --from-file=grafana-argocd-application-overview.json 9 | kubectl label configmap grafana-dashboard-argocd-application-overview -n monitoring grafana_dashboard="1" 10 | kubectl create configmap grafana-dashboard-argocd-notifications-overview -n monitoring --from-file=grafana-argocd-notifications-overview.json 11 | kubectl label configmap grafana-dashboard-argocd-notifications-overview -n monitoring grafana_dashboard="1" 12 | kubectl create configmap grafana-dashboard-argocd-operational-overview -n monitoring --from-file=grafana-argocd-operational-overview.json 13 | kubectl label configmap grafana-dashboard-argocd-operational-overview -n monitoring grafana_dashboard="1" 14 | ``` 15 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Automation/Vault/readme.md: -------------------------------------------------------------------------------- 1 | # Vault Monitoring 2 | 3 | Enable metrics and serviceMonitors. 4 | 5 | ```bash 6 | kubectl create configmap grafana-dashboard-vault -n monitoring --from-file=grafana-vault.json 7 | kubectl label configmap grafana-dashboard-vault -n monitoring grafana_dashboard="1" 8 | ``` 9 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Cert-manager/readme.md: -------------------------------------------------------------------------------- 1 | # Monitoring Cert Manager 2 | 3 | Deploy the PodMonitor with the `podmonitor-certmanager.yaml` file, after having added the cert-manager namespace to the kube-prometheus-stack-operator deployment 4 | 5 | Search and add the cert-manager grafana dashboard 6 | 7 | ```bash 8 | kubectl create configmap grafana-dashboard-cert-manager -n monitoring --from-file=grafana-certmanager.json 9 | kubectl label configmap grafana-dashboard-cert-manager -n monitoring grafana_dashboard="1" 10 | ``` 11 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Cert-manager/yamls/podmonitor-certmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PodMonitor 3 | metadata: 4 | name: cert-manager 5 | namespace: cert-manager 6 | labels: 7 | app: cert-manager 8 | app.kubernetes.io/name: cert-manager 9 | app.kubernetes.io/instance: cert-manager 10 | app.kubernetes.io/component: "controller" 11 | release: kube-prometheus-stack 12 | spec: 13 | jobLabel: app.kubernetes.io/name 14 | selector: 15 | matchLabels: 16 | app: cert-manager 17 | app.kubernetes.io/name: cert-manager 18 | app.kubernetes.io/instance: cert-manager 19 | app.kubernetes.io/component: "controller" 20 | podMetricsEndpoints: 21 | - port: http-metrics 22 | honorLabels: true -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Crowdsec/readme.md: -------------------------------------------------------------------------------- 1 | # Crowdsec monitoring 2 | 3 | Remember to deploy Crowdsec with metrics = true and serviceMonitor = true, then add a label `release: prometheus-kube-stack` to the service monitors. 4 | 5 | Create the dashboards (now in v5): 6 | 7 | ```bash 8 | kubectl create configmap grafana-dashboard-crowdsec-dpm -n monitoring --from-file=crowdsec-details-per-machine.json 9 | kubectl label configmap grafana-dashboard-crowdsec-dpm -n monitoring grafana_dashboard="1" 10 | kubectl create configmap grafana-dashboard-crowdsec-overview -n monitoring --from-file=crowdsec-overview.json 11 | kubectl label configmap grafana-dashboard-crowdsec-overview -n monitoring grafana_dashboard="1" 12 | kubectl create configmap grafana-dashboard-crowdsec-insight -n monitoring --from-file=crowdsec-insight.json 13 | kubectl label configmap grafana-dashboard-crowdsec-insight -n monitoring grafana_dashboard="1" 14 | kubectl create configmap grafana-dashboard-crowdsec-lapi -n monitoring --from-file=crowdsec-lapi.json 15 | kubectl label configmap grafana-dashboard-crowdsec-lapi -n monitoring grafana_dashboard="1" 16 | ``` 17 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Database/Couchdb/readme.md: -------------------------------------------------------------------------------- 1 | # Couchdb monitoring 2 | 3 | Deploy the couchdb exporter 4 | 5 | ```bash 6 | helm upgrade -i -n couchdb prometheus-couchdb-exporter prometheus-community/prometheus-couchdb-exporter --set couchdb.uri=http://couchdb-couchdb.couchdb.svc:5984 --set couchdb.username=admin --set couchdb.password= --set rbac.pspEnabled=false 7 | ```bash 8 | 9 | Deploy a servicemonitor 10 | 11 | ```bash 12 | kubectl apply -f prom-couchdb.yaml 13 | ``` 14 | 15 | ## Add the grafana dashboard (not yet found) 16 | 17 | ```bash 18 | kubectl create configmap grafana-dashboard-couchdb -n monitoring --from-file=grafana-couchdb.json 19 | kubectl label configmap grafana-dashboard-couchdb -n monitoring grafana_dashboard="1" 20 | kubectl create configmap grafana-dashboard-couchdb2 -n monitoring --from-file=grafana-couchdb2.json 21 | kubectl label configmap grafana-dashboard-couchdb2 -n monitoring grafana_dashboard="1" 22 | ``` 23 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Database/Couchdb/yamls/prom-couchdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: couchdb 5 | namespace: couchdb 6 | labels: 7 | app: prometheus-couchdb-exporter 8 | release: kube-prometheus-stack 9 | spec: 10 | endpoints: 11 | - interval: 30s 12 | port: http 13 | scheme: http 14 | relabelings: 15 | - action: replace 16 | sourceLabels: [node_name] 17 | regex: "couchdb@couchdb-couchdb-0.couchdb-couchdb.couchdb.svc.cluster.local" 18 | targetLabel: "node_name" 19 | replacement: "couchdb-couchdb-0" 20 | - action: replace 21 | sourceLabels: [node_name] 22 | regex: "couchdb@couchdb-couchdb-1.couchdb-couchdb.couchdb.svc.cluster.local" 23 | targetLabel: "node_name" 24 | replacement: "couchdb-couchdb-1" 25 | - action: replace 26 | sourceLabels: [node_name] 27 | regex: "couchdb@couchdb-couchdb-2.couchdb-couchdb.couchdb.svc.cluster.local" 28 | targetLabel: "node_name" 29 | replacement: "couchdb-couchdb-2" 30 | jobLabel: couchdb 31 | selector: 32 | matchLabels: 33 | app: prometheus-couchdb-exporter -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Database/Mariadb/readme.md: -------------------------------------------------------------------------------- 1 | # Mariadb Monitoring 2 | 3 | Add the grafana Dashboard (see installation for metrics exposure) 4 | 5 | ```bash 6 | kubectl create configmap grafana-dashboard-mysql -n monitoring --from-file=grafana-mysql.json 7 | kubectl label configmap grafana-dashboard-mysql -n monitoring grafana_dashboard="1" 8 | ``` 9 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Database/Memcached/readme.md: -------------------------------------------------------------------------------- 1 | # Monitoring Memcached 2 | 3 | Remember to activate the servicemonitor in the helm values, and add the namespace to the prometheus operator, then add the dashboards 4 | 5 | ```bash 6 | kubectl create configmap grafana-dashboard-memcached-pods -n monitoring --from-file=memcached_pods.json 7 | kubectl label configmap grafana-dashboard-memcached-pods -n monitoring grafana_dashboard="1" 8 | kubectl create configmap grafana-dashboard-memcached -n monitoring --from-file=Memcached.json 9 | kubectl label configmap grafana-dashboard-memcached -n monitoring grafana_dashboard="1" 10 | kubectl create configmap grafana-dashboard-memcached2 -n monitoring --from-file=Memcached2.json 11 | kubectl label configmap grafana-dashboard-memcached2 -n monitoring grafana_dashboard="1" 12 | kubectl create configmap grafana-dashboard-memcached3 -n monitoring --from-file=Memcached3.json 13 | kubectl label configmap grafana-dashboard-memcached3 -n monitoring grafana_dashboard="1" 14 | ``` 15 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Database/Mongodb/readme.md: -------------------------------------------------------------------------------- 1 | # Mongodb monitoring 2 | 3 | Remember to activate the servicemonitor in the helm values, and add the namespace to the prometheus operator, then add the dashboards 4 | 5 | ```bash 6 | kubectl create configmap grafana-dashboard-mongodb -n monitoring --from-file=grafana-mongodb.json 7 | kubectl label configmap grafana-dashboard-mongodb -n monitoring grafana_dashboard="1" 8 | kubectl create configmap grafana-dashboard-mongodb2 -n monitoring --from-file=grafana-mongodb2.json 9 | kubectl label configmap grafana-dashboard-mongodb2 -n monitoring grafana_dashboard="1" 10 | ``` 11 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Database/Mysql/readme.md: -------------------------------------------------------------------------------- 1 | # Add the grafana Dashboard (see installation for metrics exposure) 2 | 3 | ```bash 4 | kubectl create configmap grafana-dashboard-mysql -n monitoring --from-file=grafana-mysql.json 5 | kubectl label configmap grafana-dashboard-mysql -n monitoring grafana_dashboard="1" 6 | ``` 7 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Database/Postgresql/readme.md: -------------------------------------------------------------------------------- 1 | # Add the grafana Dashboard (see installation for metrics exposure) 2 | 3 | ```bash 4 | kubectl create configmap grafana-dashboard-postgresql -n monitoring --from-file=grafana-postgresql.json 5 | kubectl label configmap grafana-dashboard-postgresql -n monitoring grafana_dashboard="1" 6 | ``` 7 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Database/Redis/readme.md: -------------------------------------------------------------------------------- 1 | # Redis monitoring 2 | 3 | See redis deployment for metrics and servicemonitor enablement, then create the dashboard 4 | 5 | ```bash 6 | kubectl create configmap grafana-dashboard-redis -n monitoring --from-file=grafana-redis.json 7 | kubectl label configmap grafana-dashboard-redis -n monitoring grafana_dashboard="1" 8 | ``` 9 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/ExternalEtcd/yamls/prom-etcd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: etcd-cluster 5 | labels: 6 | app: etcd 7 | namespace: monitoring 8 | subsets: 9 | - addresses: 10 | - ip: 10.0.50.64 11 | nodeName: etcd 12 | ports: 13 | - name: metrics 14 | port: 2379 15 | protocol: TCP 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: etcd-cluster 21 | labels: 22 | app: etcd 23 | namespace: monitoring 24 | spec: 25 | type: ClusterIP 26 | clusterIP: None 27 | ports: 28 | - name: metrics 29 | port: 2379 30 | targetPort: 2379 31 | --- 32 | apiVersion: monitoring.coreos.com/v1 33 | kind: ServiceMonitor 34 | metadata: 35 | name: etcd-cluster 36 | namespace: monitoring 37 | labels: 38 | app: etcd 39 | release: kube-prometheus-stack 40 | spec: 41 | endpoints: 42 | - interval: 30s 43 | port: metrics 44 | scheme: https 45 | tlsConfig: 46 | caFile: /etc/prometheus/secrets/etcd-client-cert/ca.crt 47 | certFile: /etc/prometheus/secrets/etcd-client-cert/apiserver-etcd-client.crt 48 | keyFile: /etc/prometheus/secrets/etcd-client-cert/apiserver-etcd-client.key 49 | #serverName: etcdclstr 50 | jobLabel: etcd-cluster 51 | selector: 52 | matchLabels: 53 | app: etcd 54 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Fritzbox-Eporter/readme.md: -------------------------------------------------------------------------------- 1 | # Fritzbox exporter for Prometheus Monitoring 2 | 3 | ## Preparation 4 | 5 | 1. Add a user to the Fritzbox with admin access. 6 | 2. Activate UPnP in Home Network settings 7 | 3. Make sure the k8s nodes can ping the host fritz.box (just the domanin without any subdomain/host) to the Fritbox IP (add it to your internal DNS server or to the hosts file of the nodes) 8 | 9 | ## Apply the prom-fritzbox.yaml 10 | 11 | Customize the environment variables in the prom-fritzbox.yaml matching your environment (use the user previously added to te Fritzbox) 12 | 13 | ```bash 14 | kubectl apply -f prom-fritzbox.yaml 15 | ``` 16 | 17 | ## Create the Grafana dashboard 18 | 19 | ```bash 20 | kubectl create configmap grafana-dashboard-fritzbox --from-file=grafana-fritzbox.json 21 | kubectl label configmap grafana-dashboard-fritzbox grafana_dashboard="1" 22 | ``` 23 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Haproxy-Monitoring/readme.md: -------------------------------------------------------------------------------- 1 | # Haproxy monitoring with prometheus and grafana 2 | 3 | ## Haproxy prometheus exporter 4 | 5 | Be sure to enable the native prometheus exporter in Haproxy (see [Haproxy Loadbalance](https://github.com/urbaman/HomeLab/tree/main/Kubernetes/Cluster/03-High-Availability)) 6 | 7 | ## Prometheus 8 | 9 | Apply Endpoint, Servce and ServiceMonitor 10 | 11 | ```bash 12 | kubectl apply -f prom-haproxy1.yaml -f prom-haproxy2.yaml -f prom-haproxy3.yaml 13 | ``` 14 | 15 | Then, add Grafana dashboard 16 | 17 | ```bash 18 | kubectl create configmap grafana-dashboard-haproxy -n monitoring --from-file=grafana-haproxy.json 19 | kubectl label configmap grafana-dashboard-haproxy -n monitoring grafana_dashboard="1" 20 | ``` 21 | 22 | ## Haproxy stats through traefik 23 | 24 | ```bash 25 | kubectl apply -f traefik-haproxy1-svc.yaml -f traefik-haproxy1.yaml -f traefik-haproxy2-svc.yaml -f traefik-haproxy2.yaml -f traefik-haproxy3-svc.yaml -f traefik-haproxy3.yaml 26 | ``` 27 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Haproxy-Monitoring/yamls/prom-haproxy1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: haproxy1 5 | labels: 6 | app: haproxy1 7 | namespace: monitoring 8 | subsets: 9 | - addresses: 10 | - ip: 10.0.50.61 11 | nodeName: haproxy1 12 | ports: 13 | - name: haproxy1 14 | port: 8404 15 | protocol: TCP 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: haproxy1 21 | labels: 22 | app: haproxy1 23 | namespace: monitoring 24 | spec: 25 | type: ClusterIP 26 | clusterIP: None 27 | ports: 28 | - name: haproxy1 29 | port: 8404 30 | targetPort: 8404 31 | --- 32 | apiVersion: monitoring.coreos.com/v1 33 | kind: ServiceMonitor 34 | metadata: 35 | name: haproxy1 36 | namespace: monitoring 37 | labels: 38 | app: haproxy1 39 | release: kube-prometheus-stack 40 | spec: 41 | endpoints: 42 | - interval: 30s 43 | port: haproxy1 44 | scheme: http 45 | path: "/metrics" 46 | jobLabel: haproxy1 47 | selector: 48 | matchLabels: 49 | app: haproxy1 50 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Haproxy-Monitoring/yamls/prom-haproxy2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: haproxy2 5 | labels: 6 | app: haproxy2 7 | namespace: monitoring 8 | subsets: 9 | - addresses: 10 | - ip: 10.0.50.62 11 | nodeName: haproxy2 12 | ports: 13 | - name: haproxy2 14 | port: 8404 15 | protocol: TCP 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: haproxy2 21 | labels: 22 | app: haproxy2 23 | namespace: monitoring 24 | spec: 25 | type: ClusterIP 26 | clusterIP: None 27 | ports: 28 | - name: haproxy2 29 | port: 8404 30 | targetPort: 8404 31 | --- 32 | apiVersion: monitoring.coreos.com/v1 33 | kind: ServiceMonitor 34 | metadata: 35 | name: haproxy2 36 | namespace: monitoring 37 | labels: 38 | app: haproxy2 39 | release: kube-prometheus-stack 40 | spec: 41 | endpoints: 42 | - interval: 30s 43 | port: haproxy2 44 | scheme: http 45 | path: "/metrics" 46 | jobLabel: haproxy2 47 | selector: 48 | matchLabels: 49 | app: haproxy2 50 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Haproxy-Monitoring/yamls/prom-haproxy3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: haproxy3 5 | labels: 6 | app: haproxy3 7 | namespace: monitoring 8 | subsets: 9 | - addresses: 10 | - ip: 10.0.50.63 11 | nodeName: haproxy3 12 | ports: 13 | - name: haproxy3 14 | port: 8404 15 | protocol: TCP 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: haproxy3 21 | labels: 22 | app: haproxy3 23 | namespace: monitoring 24 | spec: 25 | type: ClusterIP 26 | clusterIP: None 27 | ports: 28 | - name: haproxy3 29 | port: 8404 30 | targetPort: 8404 31 | --- 32 | apiVersion: monitoring.coreos.com/v1 33 | kind: ServiceMonitor 34 | metadata: 35 | name: haproxy3 36 | namespace: monitoring 37 | labels: 38 | app: haproxy3 39 | release: kube-prometheus-stack 40 | spec: 41 | endpoints: 42 | - interval: 30s 43 | port: haproxy3 44 | scheme: http 45 | path: "/metrics" 46 | jobLabel: haproxy3 47 | selector: 48 | matchLabels: 49 | app: haproxy3 50 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Haproxy-Monitoring/yamls/traefik-haproxy1-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: haproxy1-service 5 | labels: 6 | app: haproxy1-service 7 | namespace: traefik-external 8 | subsets: 9 | - addresses: 10 | - ip: 10.0.50.61 11 | nodeName: haproxy1-service 12 | ports: 13 | - name: stats 14 | port: 9000 15 | protocol: TCP 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: haproxy1-service 21 | labels: 22 | app: haproxy1-service 23 | namespace: traefik-external 24 | spec: 25 | type: ClusterIP 26 | clusterIP: None 27 | ports: 28 | - name: stats 29 | port: 9000 30 | targetPort: 9000 31 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Haproxy-Monitoring/yamls/traefik-haproxy2-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: haproxy2-service 5 | labels: 6 | app: haproxy2-service 7 | namespace: traefik-external 8 | subsets: 9 | - addresses: 10 | - ip: 10.0.50.62 11 | nodeName: haproxy2-service 12 | ports: 13 | - name: stats 14 | port: 9000 15 | protocol: TCP 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: haproxy2-service 21 | labels: 22 | app: haproxy2-service 23 | namespace: traefik-external 24 | spec: 25 | type: ClusterIP 26 | clusterIP: None 27 | ports: 28 | - name: stats 29 | port: 9000 30 | targetPort: 9000 31 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Haproxy-Monitoring/yamls/traefik-haproxy3-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: haproxy3-service 5 | labels: 6 | app: haproxy3-service 7 | namespace: traefik-external 8 | subsets: 9 | - addresses: 10 | - ip: 10.0.50.63 11 | nodeName: haproxy3-service 12 | ports: 13 | - name: stats 14 | port: 9000 15 | protocol: TCP 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: haproxy3-service 21 | labels: 22 | app: haproxy3-service 23 | namespace: traefik-external 24 | spec: 25 | type: ClusterIP 26 | clusterIP: None 27 | ports: 28 | - name: stats 29 | port: 9000 30 | targetPort: 9000 31 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/K8sBackup/Velero/readme.md: -------------------------------------------------------------------------------- 1 | # Grafana Dashboard for Velero 2 | 3 | Deploy the dashboard 4 | 5 | ```bash 6 | kubectl create configmap grafana-dashboard-velero -n monitoring --from-file=grafana-velero.json 7 | kubectl label configmap grafana-dashboard-velero -n monitoring grafana_dashboard="1" 8 | ``` 9 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Kubescape/readme.md: -------------------------------------------------------------------------------- 1 | # Monitoring Kubescape 2 | 3 | Remember to install kubescape with the serviceMonitor enabled: [Kubescape for observability and security scan](https://github.com/urbaman/HomeLab/tree/main/Kubernetes/Kubescape) 4 | 5 | Edit the serviceMonitor adding the `release: kube-prometheus-stack` label, then add the `kubescape` namespace to the `kube-prometheus-stack-operator` deploymet and rollout restart it. 6 | 7 | Finally, add the grafana dashboard 8 | 9 | ```bash 10 | kubectl create configmap grafana-dashboard-kubescape --from-file=grafana-kubescape.json 11 | kubectl label configmap grafana-dashboard-kubescape grafana_dashboard="1" 12 | ``` 13 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Kured/readme.md: -------------------------------------------------------------------------------- 1 | # Kured monitoring 2 | 3 | If you deploy with kubectl, also deploy the servicemonitor. 4 | 5 | ```bash 6 | kubectl apply -f servicemonitor-kured.yaml 7 | ``` 8 | 9 | If you deploy with helm, set metrics and servicemonitor to true and set the release label. 10 | 11 | Import the kured Grafana dashboard, fix the datasource id to match "prometheus" and the namespace variable to match "kube-system", then copy the json content to a grafana-kured.json file. 12 | 13 | ```bash 14 | kubectl create configmap grafana-dashboard-kured -n monitoring --from-file=grafana-kured.json 15 | kubectl label configmap grafana-dashboard-kured -n monitoring grafana_dashboard="1" 16 | ``` 17 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Kured/yamls/servicemonitor-kured.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kured 5 | namespace: kube-system 6 | labels: 7 | name: kured 8 | spec: 9 | ports: 10 | - name: metrics 11 | port: 8088 12 | protocol: TCP 13 | targetPort: metrics 14 | selector: 15 | name: kured 16 | sessionAffinity: None 17 | type: ClusterIP 18 | --- 19 | apiVersion: monitoring.coreos.com/v1 20 | kind: ServiceMonitor 21 | metadata: 22 | name: kured-sm 23 | namespace: monitoring 24 | labels: 25 | app: kured 26 | name: kured 27 | release: kube-prometheus-stack 28 | spec: 29 | endpoints: 30 | - interval: 60s 31 | targetPort: 8080 32 | path: /metrics 33 | scrapeTimeout: 30s 34 | jobLabel: kured 35 | namespaceSelector: 36 | matchNames: 37 | - kube-system 38 | selector: 39 | matchLabels: 40 | name: kured -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Linux/readme.md: -------------------------------------------------------------------------------- 1 | # Node Exporter on linux 2 | 3 | ## On the node to monitor 4 | 5 | ```bash 6 | curl -s https://api.github.com/repos/prometheus/node_exporter/releases/latest| grep browser_download_url|grep linux-amd64|cut -d '"' -f 4|wget -qi - 7 | tar -xvf node_exporter*.tar.gz 8 | cd node_exporter*/ 9 | sudo cp node_exporter /usr/local/bin 10 | cd 11 | node_exporter --version 12 | rm -rf node* 13 | ``` 14 | 15 | ```bash 16 | sudo tee /etc/systemd/system/node_exporter.service < 24 | ExecStart=/usr/local/bin/node_exporter 25 | 26 | [Install] 27 | WantedBy=default.target 28 | EOF 29 | ``` 30 | 31 | ```bash 32 | sudo systemctl daemon-reload 33 | sudo systemctl start node_exporter 34 | sudo systemctl enable node_exporter 35 | ``` 36 | 37 | The service is online on port 9100. 38 | 39 | ## Kubernetes ServiceMonitor and Grafana Dashboards 40 | 41 | ```bash 42 | kubectl apply -f sm-node-exporter.yaml 43 | kubectl create configmap grafana-dashboard-node-exporter -n monitoring --from-file=grafana-node-exporter.json 44 | kubectl label configmap grafana-dashboard-node-exporter -n monitoring grafana_dashboard="1" 45 | ``` 46 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Linux/yamls/sm-node-exporter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: node-exporter 5 | labels: 6 | app: node-exporter 7 | namespace: monitoring 8 | subsets: 9 | - addresses: 10 | - ip: 10.0.100.11 11 | nodeName: pve1 12 | - ip: 10.0.100.12 13 | nodeName: pve2 14 | - ip: 10.0.100.13 15 | nodeName: pve3 16 | ports: 17 | - name: node-exporter 18 | port: 9100 19 | protocol: TCP 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: node-exporter 25 | labels: 26 | app: node-exporter 27 | namespace: monitoring 28 | spec: 29 | type: ClusterIP 30 | clusterIP: None 31 | ports: 32 | - name: node-exporter 33 | port: 9100 34 | targetPort: 9100 35 | --- 36 | apiVersion: monitoring.coreos.com/v1 37 | kind: ServiceMonitor 38 | metadata: 39 | name: node-exporter 40 | namespace: monitoring 41 | labels: 42 | app: node-exporter 43 | release: kube-prometheus-stack 44 | spec: 45 | endpoints: 46 | - interval: 30s 47 | port: node-exporter 48 | scheme: http 49 | path: "/metrics" 50 | jobLabel: pve-cluster 51 | selector: 52 | matchLabels: 53 | app: node-exporter 54 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Metallb/readme.md: -------------------------------------------------------------------------------- 1 | # Monitoring metallb 2 | 3 | After implementing the prometheus-stack, get the prom-metallb.yaml file from the github project, add the label ```release: kube-prometheus-stack``` to both the Pod Monitors and apply (see the given yamls) 4 | 5 | Install the grafana dashboard, download the json from grafana, create the configmap in the monitoring namespace and label it. 6 | 7 | ```bash 8 | kubectl create configmap grafana-dashboard-metallb -n monitoring --from-file=grafana-metallb.json 9 | kubectl label configmap grafana-dashboard-metallb -n monitoring grafana_dashboard="1" 10 | ``` 11 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Metallb/yamls/prom-metallb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PodMonitor 3 | metadata: 4 | name: metallb-controller 5 | namespace: metallb-system 6 | labels: 7 | release: kube-prometheus-stack 8 | spec: 9 | jobLabel: metallb-controller 10 | selector: 11 | matchLabels: 12 | component: controller 13 | namespaceSelector: 14 | matchNames: 15 | - metallb-system 16 | podMetricsEndpoints: 17 | - port: monitoring 18 | path: /metrics 19 | --- 20 | apiVersion: monitoring.coreos.com/v1 21 | kind: PodMonitor 22 | metadata: 23 | name: metallb-speaker 24 | namespace: metallb-system 25 | labels: 26 | release: kube-prometheus-stack 27 | spec: 28 | jobLabel: metallb-speaker 29 | selector: 30 | matchLabels: 31 | component: speaker 32 | namespaceSelector: 33 | matchNames: 34 | - metallb-system 35 | podMetricsEndpoints: 36 | - port: monitoring 37 | path: /metrics 38 | --- 39 | apiVersion: rbac.authorization.k8s.io/v1 40 | kind: Role 41 | metadata: 42 | name: prometheus-k8s 43 | namespace: metallb-system 44 | rules: 45 | - apiGroups: 46 | - "" 47 | resources: 48 | - pods 49 | verbs: 50 | - get 51 | - list 52 | - watch 53 | --- 54 | apiVersion: rbac.authorization.k8s.io/v1 55 | kind: RoleBinding 56 | metadata: 57 | name: prometheus-k8s 58 | namespace: metallb-system 59 | roleRef: 60 | apiGroup: rbac.authorization.k8s.io 61 | kind: Role 62 | name: prometheus-k8s 63 | subjects: 64 | - kind: ServiceAccount 65 | name: prometheus-k8s 66 | namespace: monitoring -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Nextcloud/readme.md: -------------------------------------------------------------------------------- 1 | # Nextcloud Monitoring 2 | 3 | Finally, we deploy the nextcloud exporter for monitoring, but we need another step before that: creating an auth token for nextcloud, using its occ function. 4 | 5 | ## Create the Credentials 6 | 7 | **Option 1**: best choice, create a token 8 | 9 | ```bash 10 | kubectl exec -it -n nextcloud -- bash 11 | TOKEN=$(openssl rand -hex 32) 12 | su -s /bin/bash www-data -c "php occ config:app:set serverinfo token --value "$TOKEN"" 13 | ``` 14 | 15 | **Option 2**: create an admin user, put it in a fake no-TFA group, access and create a web-app password, then exclude the user from the no-TFA group to re-activate the need for the TFA, use username/webapp-password in the exporter. 16 | 17 | ## Monitoring Deploy 18 | 19 | Deploy the `sm-nextcloud.yaml` file, changing the auth-token or the username and password with the one(s) just generated, commenting the variables not used. 20 | 21 | ```bash 22 | kubectl apply -f sm-nextcloud.yaml 23 | ``` 24 | 25 | Last step: the grafana dashboard. 26 | 27 | ```bash 28 | kubectl create configmap grafana-dashboard-nextcloud -n monitoring --from-file=grafana-nextcloud.json 29 | kubectl label configmap grafana-dashboard-nextcloud -n monitoring grafana_dashboard="1" 30 | ``` 31 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/PfSense/readme.md: -------------------------------------------------------------------------------- 1 | # PfSense with node-exporter 2 | 3 | Install the node exporter in PfSense, then apply the endpoint, service and servicemonitor, finally create the dashboard(s). 4 | 5 | ```bash 6 | kubectl apply -f sm-pfsense.yaml 7 | kubectl create configmap grafana-dashboard-pfsense-node-exporter -n monitoring --from-file=grafana-pfsense.json 8 | kubectl label configmap grafana-dashboard-pfsense-node-exporter -n monitoring grafana_dashboard="1" 9 | kubectl create configmap grafana-dashboard-pfsense-node-exporter2 -n monitoring --from-file=grafana-pfsense2.json 10 | kubectl label configmap grafana-dashboard-pfsense-node-exporter2 -n monitoring grafana_dashboard="1" 11 | ``` 12 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Prometheus-snmp/readme.md: -------------------------------------------------------------------------------- 1 | # Prometheus snmp exporter for Arista Switch and Dell IDRAC (valid for any other snmp monitored device) 2 | 3 | ## Generate your snmp.yml 4 | 5 | Use the promethueus snmp exporter generator to produce your custom snmp.yml file 6 | See the MIB files for Dell IDRAC and Arista Switch, eventually use yours. 7 | 8 | ## Create two configmaps from the snmp.yml file 9 | 10 | Set the username and passwords (both passwords) for both the modules in `snmp.yml` file 11 | 12 | ```bash 13 | kubectl create configmap prometheus-snmp-exporter-idrac -n monitoring --from-file snmp.yml 14 | kubectl create configmap prometheus-snmp-exporter-arista-switch -n monitoring --from-file snmp.yml 15 | ``` 16 | 17 | ## Deploy the given snmp exporters (customizing the yaml files to your needs) 18 | 19 | Set the IPs to the devices in both files 20 | 21 | ```bash 22 | helm upgrade -i prometheus-snmp-exporter-idrac prometheus-community/prometheus-snmp-exporter -f snmp-exporter-idrac.yaml -n monitoring 23 | helm upgrade -i prometheus-snmp-exporter-arista-switch prometheus-community/prometheus-snmp-exporter -f snmp-exporter-arista-switch.yaml -n monitoring 24 | ``` 25 | 26 | ## Add the dashboards to Grafana 27 | 28 | ```bash 29 | kubectl create configmap grafana-dashboard-arista-switch -n monitoring --from-file=grafana-arista.json 30 | kubectl label configmap grafana-dashboard-arista-switch -n monitoring grafana_dashboard="1" 31 | kubectl create configmap grafana-dashboard-dell-idrac -n monitoring --from-file=grafana-idrac.json 32 | kubectl label configmap grafana-dashboard-dell-idrac -n monitoring grafana_dashboard="1" 33 | ``` 34 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Proxmox-Backup-Monitoring/readme.md: -------------------------------------------------------------------------------- 1 | # Proxmox Backup Monitoring with prometheus and pushgateway 2 | 3 | ## Requisites 4 | 5 | Install Pushgateway 6 | 7 | ## Exporter 8 | 9 | Install the Proxmox Backup Server Exporter from [here](https://github.com/rare-magma/pbs-exporter) 10 | 11 | **Note:** you'll probably need to tweak it a little bit to make it properly source the conf file. 12 | {: .note} 13 | 14 | ## Add grafana Dashboard 15 | 16 | ```bash 17 | kubectl create configmap grafana-dashboard-proxmox-bs -n monitoring --from-file=grafana-proxmox-backup-server.json 18 | kubectl label configmap grafana-dashboard-proxmox-bs -n monitoring grafana_dashboard="1" 19 | ``` 20 | 21 | # PBS with PBS Exporter 22 | 23 | Create a user `exporter@pbs`, an associated API Token `exporter` in PBS, both with Audit Role Privileges, define the variables in the `exporter-pbs.yaml` 24 | 25 | ```bash 26 | kubectl apply -f exporter-pbs.yaml 27 | ``` 28 | 29 | See [here](https://github.com/natrontech/pbs-exporter) 30 | 31 | Add the dashboard 32 | 33 | ```bash 34 | kubectl create configmap grafana-dashboard-pbs -n monitoring --from-file=grafana-pbs-v2.json 35 | kubectl label configmap grafana-dashboard-pbs -n monitoring grafana_dashboard="1" 36 | ``` 37 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Proxmox-Monitoring/yamls/prom-proxmox.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: pve-cluster 5 | labels: 6 | app: pve 7 | namespace: monitoring 8 | subsets: 9 | - addresses: 10 | - ip: 10.0.100.11 11 | nodeName: pvenode1 12 | - ip: 10.0.100.12 13 | nodeName: pvenode2 14 | - ip: 10.0.100.13 15 | nodeName: pvenode3 16 | ports: 17 | - name: pve 18 | port: 9221 19 | protocol: TCP 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: pve-cluster 25 | labels: 26 | app: pve 27 | namespace: monitoring 28 | spec: 29 | type: ClusterIP 30 | clusterIP: None 31 | ports: 32 | - name: pve 33 | port: 9221 34 | targetPort: 9221 35 | --- 36 | apiVersion: monitoring.coreos.com/v1 37 | kind: ServiceMonitor 38 | metadata: 39 | name: pve-cluster 40 | namespace: monitoring 41 | labels: 42 | app: pve 43 | release: kube-prometheus-stack 44 | spec: 45 | endpoints: 46 | - interval: 30s 47 | port: pve 48 | scheme: http 49 | path: "/pve" 50 | jobLabel: pve-cluster 51 | selector: 52 | matchLabels: 53 | app: pve 54 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Pushgateway/readme.md: -------------------------------------------------------------------------------- 1 | # Install prometheus pushgateway on kubernetes 2 | 3 | Add the repo if not present, extract the values file 4 | 5 | ```bash 6 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 7 | helm repo update 8 | helm show values prometheus-community/prometheus-pushgateway > pushgateway-values.yaml 9 | ``` 10 | 11 | Define the namespace to monitoring, the Service Monitor to true and add the `release: kube-prometheus-stack` additional label to the Service Monitor. 12 | 13 | ```bash 14 | helm upgrade -i prometheus-pushgateway prometheus-community/prometheus-pushgateway --values pushgateway-values.yaml -n monitoring 15 | ``` 16 | 17 | Then, create the ingressroute from the yaml template -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Storage/Ceph/yamls/prom-ceph.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: ceph-cluster 5 | labels: 6 | app: ceph 7 | namespace: monitoring 8 | subsets: 9 | - addresses: 10 | - ip: xx.xx.xx.xx #IP of pvenode1 11 | nodeName: cephnode1 12 | - ip: xx.xx.xx.xx #IP of pvenode2 13 | nodeName: cephnode2 14 | - ip: xx.xx.xx.xx #IP of pvenode3 15 | nodeName: cephnode3 16 | ports: 17 | - name: ceph 18 | port: 9283 19 | protocol: TCP 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: ceph-cluster 25 | labels: 26 | app: ceph 27 | namespace: monitoring 28 | spec: 29 | type: ClusterIP 30 | clusterIP: None 31 | ports: 32 | - name: ceph 33 | port: 9283 34 | targetPort: 9283 35 | --- 36 | apiVersion: monitoring.coreos.com/v1 37 | kind: ServiceMonitor 38 | metadata: 39 | name: ceph-cluster 40 | namespace: monitoring 41 | labels: 42 | app: ceph 43 | release: kube-prometheus-stack 44 | spec: 45 | endpoints: 46 | - interval: 15s 47 | port: ceph 48 | scheme: http 49 | path: "/metrics" 50 | jobLabel: ceph-cluster 51 | selector: 52 | matchLabels: 53 | app: ceph 54 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Storage/Glusterfs/readme.md: -------------------------------------------------------------------------------- 1 | # Gluster monitoring 2 | 3 | ## Install the exporter 4 | 5 | In all of the Glusterfs nodes, do 6 | 7 | ```bash 8 | sudo curl -fsSL https://github.com/kadalu/gluster-metrics-exporter/releases/latest/download/install.sh | sudo bash -x 9 | sudo systemctl enable gluster-metrics-exporter 10 | sudo systemctl start gluster-metrics-exporter 11 | ``` 12 | 13 | ## Install the endpoits, service and servicemonitor 14 | 15 | In the yaml, we use relabeling to make the hostname value readable and manageable. Adjust to your situation. 16 | 17 | ```bash 18 | kubectl apply -f sm-gluster-exporter.yaml 19 | ``` 20 | 21 | ## Install the Grafana dashboard 22 | 23 | ```bash 24 | kubectl create configmap grafana-dashboard-glusterfs --from-file=grafana-glusterfs.json 25 | kubectl label configmap grafana-dashboard-glusterfs grafana_dashboard="1" 26 | ``` 27 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Storage/Longhorn/readme.md: -------------------------------------------------------------------------------- 1 | # Monitoring Longhorn 2 | 3 | Deploy the serviceMonitor 4 | 5 | ```bash 6 | kubectl apply -f sm-longhorn.yaml 7 | ``` 8 | 9 | Import a suitable grafana dashboard (Longhorn Example is ok), get the json definition and create the json definition file. 10 | 11 | ```bash 12 | kubectl create configmap grafana-dashboard-longhorn --from-file=grafana-longhorn.json 13 | kubectl label configmap grafana-dashboard-longhorn grafana_dashboard="1" 14 | ``` 15 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Storage/Longhorn/yamls/sm-longhorn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: longhorn-prometheus-servicemonitor 5 | namespace: monitoring 6 | labels: 7 | name: longhorn-prometheus-servicemonitor 8 | release: kube-prometheus-stack 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: longhorn-manager 13 | namespaceSelector: 14 | matchNames: 15 | - longhorn-system 16 | endpoints: 17 | - port: manager -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Storage/NFS-server/readme.md: -------------------------------------------------------------------------------- 1 | # Monitoring the NFS cluster 2 | 3 | ## Install the node exporter 4 | 5 | ```bash 6 | sudo apt-get update 7 | sudo apt-get install prometheus-node-exporter -y 8 | sudo systemctl status prometheus-node-exporter 9 | ``` 10 | 11 | ## Install the endpoits, service and servicemonitor 12 | 13 | ```bash 14 | kubectl apply -f sm-nodeexporter.yaml 15 | ``` 16 | 17 | ## Install the grafana dashboard 18 | 19 | ```bash 20 | kubectl create configmap grafana-dashboard-nfs-cluster -n monitoring --from-file=grafana-nfs-cluster.json 21 | kubectl label configmap grafana-dashboard-nfs-cluster -n monitoring grafana_dashboard="1" 22 | ``` 23 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Storage/NFS-server/yamls/sm-nodeexporter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: nfs-cluster 5 | labels: 6 | app: nfs-cluster 7 | namespace: monitoring 8 | subsets: 9 | - addresses: 10 | - ip: xx.xx.xx.xx #IP of the loadbanacer for the cluster 11 | nodeName: nfs-cluster 12 | ports: 13 | - name: nfs-cluster 14 | port: 9100 15 | protocol: TCP 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: nfs-cluster 21 | labels: 22 | app: nfs-cluster 23 | namespace: monitoring 24 | spec: 25 | type: ClusterIP 26 | clusterIP: None 27 | ports: 28 | - name: nfs-cluster 29 | port: 9100 30 | targetPort: 9100 31 | --- 32 | apiVersion: monitoring.coreos.com/v1 33 | kind: ServiceMonitor 34 | metadata: 35 | name: nfs-cluster 36 | namespace: monitoring 37 | labels: 38 | app: nfs-cluster 39 | release: kube-prometheus-stack 40 | spec: 41 | endpoints: 42 | - interval: 30s 43 | port: nfs-cluster 44 | scheme: http 45 | path: "/metrics" 46 | jobLabel: nfs-cluster 47 | selector: 48 | matchLabels: 49 | app: nfs-cluster 50 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Teleport/readme.md: -------------------------------------------------------------------------------- 1 | # Teleport monitoring 2 | 3 | Remember to set podMonitors to True 4 | 5 | ## Create the Grafana Dashboard 6 | 7 | ```bash 8 | kubectl create configmap grafana-dashboard-teleport-cluster -n monitoring --from-file=grafana-teleport-cluster.json 9 | kubectl label configmap grafana-dashboard-teleport-cluster -n monitoring grafana_dashboard="1" 10 | ``` 11 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Traefik/readme.md: -------------------------------------------------------------------------------- 1 | # Traefik monitoring 2 | 3 | Remember to set the metrics port expose setting to true when installing Traefik, and set metrics and servicemonitor to true, with the proper additional labels. 4 | 5 | To add a grafana dashboard, first install it in grafana, then download the json from grafana itself, and then create a configmap from it in the monitoring namespace 6 | 7 | ```bash 8 | kubectl create configmap grafana-dashboard-traefik -n monitoring --from-file=grafana-traefik.json 9 | kubectl label configmap grafana-dashboard-traefik -n monitoring grafana_dashboard="1" 10 | ``` 11 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Traefik/yamls/prom-traefik.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: traefik-sm 5 | namespace: monitoring 6 | labels: 7 | traefik: http 8 | release: kube-prometheus-stack 9 | spec: 10 | jobLabel: traefik 11 | selector: 12 | matchExpressions: 13 | - {key: app.kubernetes.io/name, operator: Exists} 14 | namespaceSelector: 15 | matchNames: 16 | - traefik 17 | endpoints: 18 | - port: metrics 19 | interval: 15s -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Uptime-kuma/readme.md: -------------------------------------------------------------------------------- 1 | # Uptime Kuma monitoring 2 | 3 | ## Create the Prometheus Service Monitor 4 | 5 | Create a secret with the basic auth base64 username and password for uptime-kuma, then the service monitor 6 | 7 | 8 | ```bash 9 | echo -n "" | base64 10 | echo -n "" | base64 11 | ``` 12 | 13 | ```bash 14 | kubectl apply -f sm-ukuma.yaml 15 | ``` 16 | 17 | Then, import and define the grafana dashboard. 18 | 19 | ```bash 20 | kubectl create configmap grafana-dashboard-uptime-kuma -n monitoring --from-file=grafana-uk.json 21 | kubectl label configmap grafana-dashboard-uptime-kuma -n monitoring grafana_dashboard="1" 22 | ``` 23 | -------------------------------------------------------------------------------- /Kubernetes/Prometheus-Stack/Uptime-kuma/yamls/sm-ukuma.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: uptime-kuma-basic-auth 5 | namespace: monitoring 6 | data: 7 | username: 8 | password: 9 | --- 10 | apiVersion: monitoring.coreos.com/v1 11 | kind: ServiceMonitor 12 | metadata: 13 | name: uptimekuma-sm 14 | namespace: monitoring 15 | labels: 16 | release: kube-prometheus-stack 17 | spec: 18 | jobLabel: uptimekuma 19 | namespaceSelector: 20 | matchNames: 21 | - monitoring 22 | selector: 23 | matchLabels: 24 | app.kubernetes.io/instance: uptime-kuma 25 | app.kubernetes.io/name: uptime-kuma 26 | endpoints: 27 | - port: http 28 | interval: 15s 29 | path: '/metrics' 30 | basicAuth: # Only needed if authentication is enabled (default) 31 | username: 32 | name: uptime-kuma-basic-auth 33 | key: username 34 | password: 35 | name: uptime-kuma-basic-auth 36 | key: password 37 | -------------------------------------------------------------------------------- /Kubernetes/QRCodeGenerator/readme.md: -------------------------------------------------------------------------------- 1 | # QR Code Generator 2 | 3 | Set the required domain and eventual values, then deploy the yaml 4 | 5 | ```bash 6 | kubectl apply -f qr-code-generator.yaml 7 | ``` 8 | -------------------------------------------------------------------------------- /Kubernetes/Rancher/readme.md: -------------------------------------------------------------------------------- 1 | # Deploy Rancher K8s Dashboard 2 | 3 | Add the repo, create a values file and deploy in the cattle-system namespace (needed!) 4 | 5 | ```bash 6 | helm repo add rancher-latest https://releases.rancher.com/server-charts/latest 7 | helm repo update 8 | kubectl create namespace cattle-system 9 | helm show values rancher-latest/rancher > rancher-values.yaml 10 | vi rancher-values.yaml 11 | helm upgrade -i rancher -n cattle-system rancher-latest/rancher -f rancher-values.yaml 12 | kubectl apply -f ig-rancher.yaml 13 | ``` 14 | 15 | ## Monitoring 16 | 17 | You can install the kube-prometheus-stack from rancher (the monitoring app), following the helm values configurations from the [Prometheus-Stack section](https://github.com/urbaman/HomeLab/tree/main/Kubernetes/Prometheus-Stack). 18 | 19 | Remember to unset the resource limits and requests, to keep the stack working and not going OOM. 20 | 21 | Also, remember to set the servicemonitor and podmonitor lables to `release: rancher-monitoring` and to create the dashboards in the cattle-dashboards namespace 22 | -------------------------------------------------------------------------------- /Kubernetes/Reflector/readme.md: -------------------------------------------------------------------------------- 1 | # Install reflector 2 | 3 | ```bash 4 | helm repo add emberstack https://emberstack.github.io/helm-charts 5 | helm repo update 6 | helm upgrade -i reflector emberstack/reflector --create-namespace --n reflector 7 | ``` 8 | -------------------------------------------------------------------------------- /Kubernetes/Roundcube/readme.md: -------------------------------------------------------------------------------- 1 | # Install roundcube webmail 2 | 3 | Create a roundcube db in postgresql (or in Mysql) and change the db settings in the deployment and vars in the secret, then deploy the yaml. 4 | 5 | ```bash 6 | echo -n '' | base64 7 | echo -n '' | base64 8 | vi roundcube.yaml 9 | kubectl apply -f roundcube.yaml 10 | ``` 11 | -------------------------------------------------------------------------------- /Kubernetes/Shlink/readme.md: -------------------------------------------------------------------------------- 1 | # Install Shlink 2 | 3 | Get a Geolite license key, setup a redis and a postgresql server, and a postgresql dedicated user and db; setup the values in the secret (base64) and in the configmap. 4 | 5 | ```bash 6 | echo -n '' | base64 7 | echo -n '' | base64 8 | echo -n '' | base64 9 | vi shlink.yaml 10 | kubectl apply -f shlink.yaml 11 | ``` 12 | 13 | Generate an API key, put it in the shlink-web-client secret in base64, then deploy shlink-web-client. 14 | 15 | ```bash 16 | kubectl exec -ti -n shlink shlink-5846b5d888-jn6zw -- shlink api-key:generate 17 | echo -n '' | base64 18 | vi shlink-web-client.yaml 19 | kubectl apply -f shlink-web-client.yaml 20 | ``` 21 | -------------------------------------------------------------------------------- /Kubernetes/Stirling-PDF/readme.md: -------------------------------------------------------------------------------- 1 | # Stirling PDF installation 2 | 3 | ## Preparation 4 | 5 | Create the required s-pdf.domain.com DNS entry both internally (we use pfSense) and externally if you expose it (we use cloudflare) 6 | 7 | ## Deploy 8 | 9 | Run the yaml file to deploy Stirling PDF, after having changed the config file. 10 | 11 | ```bash 12 | kubectl apply -f s-pdf.yaml 13 | ``` 14 | 15 | The first username is `admin`, password `stirling`. You'll have to change them. 16 | -------------------------------------------------------------------------------- /Kubernetes/Storage/Glusterfs/yamls/01-endpoints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | name: glusterfs-cluster 5 | subsets: 6 | - addresses: 7 | - ip: 10.0.50.21 8 | hostname: truenas1 9 | - ip: 10.0.50.22 10 | hostname: truenas2 11 | - ip: 10.0.50.23 12 | hostname: truenas3 13 | ports: 14 | - port: 1 15 | --- 16 | apiVersion: v1 17 | kind: Service 18 | metadata: 19 | name: glusterfs-cluster 20 | spec: 21 | ports: 22 | - port: 1 -------------------------------------------------------------------------------- /Kubernetes/Storage/Glusterfs/yamls/02a-direct.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: test 5 | spec: 6 | containers: 7 | - name: alpine 8 | image: alpine:latest 9 | command: 10 | - touch 11 | - /data/test 12 | volumeMounts: 13 | - name: glusterfs-volume 14 | mountPath: /data 15 | volumes: 16 | - name: glusterfs-volume 17 | glusterfs: 18 | endpoints: glusterfs-cluster 19 | path: HDD5T/path/... 20 | readOnly: no -------------------------------------------------------------------------------- /Kubernetes/Storage/Glusterfs/yamls/02b1-presistent-volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: glusterfs-path 5 | namespace: project-namespace 6 | spec: 7 | accessModes: 8 | - ReadWriteMany 9 | capacity: 10 | storage: 10Gi 11 | storageClassName: "" 12 | volumeMode: Filesystem 13 | glusterfs: 14 | endpoints: glusterfs-cluster 15 | path: HDD5T/path/... 16 | readOnly: false 17 | claimRef: 18 | name: glusterfs-path-pvc 19 | namespace: project-namespace 20 | persistentVolumeReclaimPolicy: Retain 21 | --- 22 | apiVersion: v1 23 | kind: PersistentVolumeClaim 24 | metadata: 25 | name: glusterfs-path-pvc 26 | namespace: project-namespace 27 | spec: 28 | accessModes: 29 | - ReadWriteMany 30 | resources: 31 | requests: 32 | storage: 10Gi 33 | storageClassName: "" 34 | volumeName: glusterfs-path -------------------------------------------------------------------------------- /Kubernetes/Storage/Glusterfs/yamls/02b2-persistent-volume-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mypod 5 | namespace: project-namespace 6 | spec: 7 | containers: 8 | - name: myfrontend 9 | image: nginx 10 | volumeMounts: 11 | - mountPath: "/var/www/html" 12 | name: mypd 13 | volumes: 14 | - name: mypd 15 | persistentVolumeClaim: 16 | claimName: glusterfs-path-pvc -------------------------------------------------------------------------------- /Kubernetes/Storage/Longhorn/yamls/longhorn-nvme-sc.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: longhorn-nvme 5 | provisioner: driver.longhorn.io 6 | allowVolumeExpansion: true 7 | reclaimPolicy: "Delete" 8 | volumeBindingMode: Immediate 9 | parameters: 10 | numberOfReplicas: "3" 11 | staleReplicaTimeout: "30" 12 | fromBackup: "" 13 | fsType: "ext4" 14 | dataLocality: "best-effort" 15 | diskSelector: "nvme" 16 | nodeSelector: "storage" -------------------------------------------------------------------------------- /Kubernetes/Storage/NFS/yamls/nfs-test.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: demo-claim-hdd5t 5 | spec: 6 | storageClassName: hdd5t-nfs-client 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 10Mi 12 | --- 13 | kind: Pod 14 | apiVersion: v1 15 | metadata: 16 | name: test-pod-hdd5t 17 | spec: 18 | containers: 19 | - name: test-pod-hdd5t 20 | image: busybox:latest 21 | command: 22 | - "/bin/sh" 23 | args: 24 | - "-c" 25 | - "touch /mnt/SUCCESS && sleep 600" 26 | volumeMounts: 27 | - name: nfs-pvc-hdd5t 28 | mountPath: "/mnt" 29 | restartPolicy: "Never" 30 | volumes: 31 | - name: nfs-pvc-hdd5t 32 | persistentVolumeClaim: 33 | claimName: demo-claim-hdd5t 34 | --- 35 | kind: PersistentVolumeClaim 36 | apiVersion: v1 37 | metadata: 38 | name: demo-claim-sdd2t 39 | spec: 40 | storageClassName: sdd2t-nfs-client 41 | accessModes: 42 | - ReadWriteMany 43 | resources: 44 | requests: 45 | storage: 10Mi 46 | --- 47 | kind: Pod 48 | apiVersion: v1 49 | metadata: 50 | name: test-pod-sdd2t 51 | spec: 52 | containers: 53 | - name: test-pod-sdd2t 54 | image: busybox:latest 55 | command: 56 | - "/bin/sh" 57 | args: 58 | - "-c" 59 | - "touch /mnt/SUCCESS && sleep 600" 60 | volumeMounts: 61 | - name: nfs-pvc-sdd2t 62 | mountPath: "/mnt" 63 | restartPolicy: "Never" 64 | volumes: 65 | - name: nfs-pvc-sdd2t 66 | persistentVolumeClaim: 67 | claimName: demo-claim-sdd2t -------------------------------------------------------------------------------- /Kubernetes/Storage/NFS/yamls/sc-csi-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: -nfs-csi 5 | provisioner: nfs.csi.k8s.io 6 | parameters: 7 | server: 8 | share: 9 | reclaimPolicy: Delete 10 | volumeBindingMode: Immediate 11 | mountOptions: 12 | - hard 13 | - nfsvers=4.2 -------------------------------------------------------------------------------- /Kubernetes/Storage/NFS/yamls/snapshotclass-csi-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapshot.storage.k8s.io/v1 2 | kind: VolumeSnapshotClass 3 | metadata: 4 | name: -csi-nfs-snapclass 5 | driver: nfs.csi.k8s.io 6 | deletionPolicy: Delete -------------------------------------------------------------------------------- /Kubernetes/Storage/OpenEBS/readme.md: -------------------------------------------------------------------------------- 1 | # Installing and configuring OpenEBS Maystore storage 2 | 3 | See [here](https://mayastor.gitbook.io/introduction/) 4 | -------------------------------------------------------------------------------- /Kubernetes/Storage/Rook/yamls/rook-mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: wordpress-mysql 5 | labels: 6 | app: wordpress 7 | spec: 8 | ports: 9 | - port: 3306 10 | selector: 11 | app: wordpress 12 | tier: mysql 13 | clusterIP: None 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: mysql-pv-claim 19 | labels: 20 | app: wordpress 21 | spec: 22 | storageClassName: rook-ceph-nvme2tb 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 20Gi 28 | --- 29 | apiVersion: apps/v1 30 | kind: Deployment 31 | metadata: 32 | name: wordpress-mysql 33 | labels: 34 | app: wordpress 35 | tier: mysql 36 | spec: 37 | selector: 38 | matchLabels: 39 | app: wordpress 40 | tier: mysql 41 | strategy: 42 | type: Recreate 43 | template: 44 | metadata: 45 | labels: 46 | app: wordpress 47 | tier: mysql 48 | spec: 49 | containers: 50 | - image: mysql:5.6 51 | name: mysql 52 | env: 53 | - name: MYSQL_ROOT_PASSWORD 54 | value: changeme 55 | ports: 56 | - containerPort: 3306 57 | name: mysql 58 | volumeMounts: 59 | - name: mysql-persistent-storage 60 | mountPath: /var/lib/mysql 61 | volumes: 62 | - name: mysql-persistent-storage 63 | persistentVolumeClaim: 64 | claimName: mysql-pv-claim 65 | -------------------------------------------------------------------------------- /Kubernetes/Storage/Rook/yamls/rook-sc-nvme1tb.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: rook-ceph-nvme1tb 6 | parameters: 7 | clusterID: rook-ceph-external 8 | csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner 9 | csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph-external 10 | csi.storage.k8s.io/fstype: ext4 11 | csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node 12 | csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph-external 13 | csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner 14 | csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph-external 15 | imageFeatures: layering 16 | imageFormat: "2" 17 | pool: Ceph-NVMe1TB 18 | provisioner: rook-ceph.rbd.csi.ceph.com 19 | reclaimPolicy: Delete 20 | volumeBindingMode: Immediate 21 | -------------------------------------------------------------------------------- /Kubernetes/Storage/Rook/yamls/rook-sc-nvme2tb.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: rook-ceph-nvme2tb 6 | parameters: 7 | clusterID: rook-ceph-external 8 | csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner 9 | csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph-external 10 | csi.storage.k8s.io/fstype: ext4 11 | csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node 12 | csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph-external 13 | csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner 14 | csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph-external 15 | imageFeatures: layering 16 | imageFormat: "2" 17 | pool: Ceph-NVMe2TB 18 | provisioner: rook-ceph.rbd.csi.ceph.com 19 | reclaimPolicy: Delete 20 | volumeBindingMode: Immediate 21 | -------------------------------------------------------------------------------- /Kubernetes/Storage/Rook/yamls/rook-sc-ssd2t.yaml: -------------------------------------------------------------------------------- 1 | allowVolumeExpansion: true 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: rook-cephfs-ssd2t 6 | parameters: 7 | clusterID: rook-ceph-external 8 | csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner 9 | csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph-external 10 | csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node 11 | csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph-external 12 | csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner 13 | csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph-external 14 | fsName: Cephfs-SSD2T 15 | pool: Cephfs-SSD2T_data 16 | provisioner: rook-ceph.cephfs.csi.ceph.com 17 | reclaimPolicy: Delete 18 | volumeBindingMode: Immediate 19 | -------------------------------------------------------------------------------- /Kubernetes/Storage/Rook/yamls/rook-wordpress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: wordpress 5 | labels: 6 | app: wordpress 7 | spec: 8 | ports: 9 | - port: 80 10 | selector: 11 | app: wordpress 12 | tier: frontend 13 | type: LoadBalancer 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: wp-pv-claim 19 | labels: 20 | app: wordpress 21 | spec: 22 | storageClassName: rook-ceph-nvme1tb 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 20Gi 28 | --- 29 | apiVersion: apps/v1 30 | kind: Deployment 31 | metadata: 32 | name: wordpress 33 | labels: 34 | app: wordpress 35 | tier: frontend 36 | spec: 37 | selector: 38 | matchLabels: 39 | app: wordpress 40 | tier: frontend 41 | strategy: 42 | type: Recreate 43 | template: 44 | metadata: 45 | labels: 46 | app: wordpress 47 | tier: frontend 48 | spec: 49 | containers: 50 | - image: wordpress:4.6.1-apache 51 | name: wordpress 52 | env: 53 | - name: WORDPRESS_DB_HOST 54 | value: wordpress-mysql 55 | - name: WORDPRESS_DB_PASSWORD 56 | value: changeme 57 | ports: 58 | - containerPort: 80 59 | name: wordpress 60 | volumeMounts: 61 | - name: wordpress-persistent-storage 62 | mountPath: /var/www/html 63 | volumes: 64 | - name: wordpress-persistent-storage 65 | persistentVolumeClaim: 66 | claimName: wp-pv-claim 67 | -------------------------------------------------------------------------------- /Kubernetes/Teleport/yaml/teleport-ssl-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: teleport-domain-com 5 | namespace: teleport-cluster 6 | spec: 7 | # Certificate will be valid for these domain names 8 | dnsNames: 9 | - "teleport.domain.com" 10 | - "*.teleport.domain.com" 11 | # Reference our issuer 12 | # As it's a ClusterIssuer, it can be in a different namespace 13 | issuerRef: 14 | kind: ClusterIssuer 15 | name: cert-manager-acme-issuer 16 | # Secret that will be created with our certificate and private keys 17 | secretName: teleport-domain-com 18 | -------------------------------------------------------------------------------- /Kubernetes/Truenas/readme.md: -------------------------------------------------------------------------------- 1 | # Netbox behind Traefik 2 | 3 | ```bash 4 | kubectl apply -f ig-truenas.yaml 5 | ``` 6 | -------------------------------------------------------------------------------- /Kubernetes/Uptimekuma/readme.md: -------------------------------------------------------------------------------- 1 | # Uptime Kuma for service uptime monitoring 2 | 3 | ## Install Uptime Kuma through helm 4 | 5 | Once Helm has been set up correctly, add the repo as follows: 6 | 7 | ```bash 8 | helm repo add uptime-kuma https://dirsigler.github.io/uptime-kuma-helm 9 | helm repo update 10 | helm show values uptime-kuma/uptime-kuma > uptime-kuma-values.yaml 11 | ``` 12 | 13 | If you had already added this repo earlier, run helm repo update to retrieve the latest versions of the packages. You can then run helm search repo uptime-kuma to see the charts. 14 | 15 | Change the values to install the latest image, on desired storageClass, with serviceMonitor enabled and with the proper serviceMonitor additionalLabels and basicAuth user/pass: 16 | 17 | ```bash 18 | helm upgrade -i uptime-kuma uptime-kuma/uptime-kuma -n monitoring -f uptime-kuma-values.yaml 19 | ``` 20 | 21 | ## Expose through IngressRoute 22 | 23 | Create and install the ssl cert and ingressroute 24 | 25 | ```bash 26 | kubectl apply -f ig-uptimekuma.yaml 27 | ``` 28 | -------------------------------------------------------------------------------- /Kubernetes/Wazuh/readme.md: -------------------------------------------------------------------------------- 1 | # Wazuh Security Scanner 2 | 3 | Follow their guide, but remember to change the following: 4 | 5 | - Properly define the storageClass, copying from an existing sc for the selected provider 6 | - Change all of the services to ClusterIP 7 | - Change the indexer and the wazuh api URLs in the dashboard deployment to properly point to the relative services: 8 | - indexer.wazuh.svc.cluster.local:9200 9 | - wazuh.wazuh.svc.cluster.local 10 | - Do your Traefik things to reverse proxy the dashboard, the indexer, the api and all of the other ports and services 11 | - Deploy agents 12 | -------------------------------------------------------------------------------- /Kubernetes/k8tz/readme.md: -------------------------------------------------------------------------------- 1 | # Install k8tz 2 | 3 | To make all the pods use the same timezone: 4 | 5 | ```bash 6 | helm repo add k8tz https://k8tz.github.io/k8tz/ 7 | helm repo update 8 | helm upgrade -i k8tz -n kube-system k8tz/k8tz --set timezone=Europe/Rome 9 | ``` 10 | 11 | From now on, all of the pods created, will have the same selected timezone. 12 | -------------------------------------------------------------------------------- /LinuxTips.md: -------------------------------------------------------------------------------- 1 | # Lots of Linux Tips 2 | 3 | ## Purge previously uninstalled packages 4 | 5 | ```bash 6 | sudo apt purge `dpkg --list | grep ^rc | awk '{ print $2; }'` 7 | ``` 8 | 9 | ## list disks 10 | 11 | ```bash 12 | ls -l /dev/disk/by-id 13 | ls -l /dev/disk/by-uuid 14 | sudo hwinfo --disk 15 | sudo hwinfo --disk --short 16 | sudo fdisk -l 17 | sudo lshw -class disk 18 | sudo lshw -class disk | grep -A 5 -B 19 | lsblk 20 | lsblk -f 21 | ``` 22 | 23 | ## Modify text with sed 24 | 25 | ```bash 26 | sed -i 's/old-text/new-text/g' input.txt 27 | ``` 28 | -------------------------------------------------------------------------------- /Nethserver/readme.md: -------------------------------------------------------------------------------- 1 | # Nethserver 2 | 3 | IP: 10.0.50.25 4 | 5 | Hostname: nethserver.urbaman.it 6 | 7 | AD 8 | 9 | IP: 10.0.50.26 10 | 11 | domain: AD.URBAMAN.IT 12 | -------------------------------------------------------------------------------- /Network/Switch/readme.md: -------------------------------------------------------------------------------- 1 | # Switch config 2 | -------------------------------------------------------------------------------- /Network/V2/readme.md: -------------------------------------------------------------------------------- 1 | # Network version 2 2 | 3 | Here's the version 2 of my Homelab Network, after the downscale due to power and noise efficiency of my rack. 4 | 5 | ![Network2 drawio](https://github.com/urbaman/HomeLab/assets/26753344/ddb161e1-9302-4a4d-9068-de205b9da6fb) 6 | -------------------------------------------------------------------------------- /Network/readme.md: -------------------------------------------------------------------------------- 1 | # Network 2 | 3 | ## VLANS 4 | 5 | WAN 192.168.1.0 6 | 7 | VLAN1 192.168.2.0 (switch management) 8 | 9 | VLAN10 10.0.10.0 (LAN) 10 | 11 | VLAN20 10.0.20.0 (PVE Cluster) 12 | 13 | VLAN30 10.0.30.0 (CephCluster - Storage) 14 | 15 | VLAN40 10.0.40.0 (CephPublic) 16 | 17 | VLAN50 10.0.50.0 (VMs) 18 | 19 | VLAN60 10.0.60.0 (pfSync) 20 | 21 | VLAN70 10.0.70.0 (Gluster Internal - Storage) 22 | 23 | VLAN100 10.0.100.0 (Management - IDRACs) 24 | 25 | ## VLANS (version2) 26 | 27 | WAN 192.168.1.0 28 | 29 | VLAN1 192.168.2.0 (switch management?) 30 | 31 | VLAN10 10.0.10.0 (LAN) 32 | 33 | VLAN20 10.0.20.0 (PVE Cluster) 34 | 35 | VLAN30 10.0.30.0 (CephCluster - Storage) 36 | 37 | VLAN40 10.0.40.0 (CephPublic) 38 | 39 | VLAN50 10.0.50.0 (VMs) 40 | 41 | VLAN60 10.0.60.0 (pfSync) 42 | 43 | VLAN70 10.0.70.0 (Gluster Internal - Storage) 44 | 45 | VLAN100 10.0.100.0 (Management - IDRACs,Pve,TrueNAS management,K8s management,...) only from VLAN10 PCs 46 | 47 | ## Network Diagram 48 | 49 | ![Network Diagram](network.drawio.svg) 50 | -------------------------------------------------------------------------------- /Proxmox/Clustering.md: -------------------------------------------------------------------------------- 1 | # Setup a Cluster 2 | 3 | ## Best practices 4 | 5 | 1. Define at least three subnets/vlans 6 | * Web gui subnet (the one with the gateway - vlan100, Management) 7 | * Cluster communication subnet (vlan20 - Proxmox Cluster) - Choosen during cluster setup 8 | * Migration network (vlan80 - Proxmox VM Migration network) - This is to separate the network through which Proxmox performs VM migration between nodes, set in Datacenter/Options 9 | 2. Optional (see Ceph): define another two subnets/vlans for Ceph 10 | * Ceph Storage (vlan30 - Ceph Storage - at least 10G) 11 | * Ceph Public (vlan40 - Ceph Public) 12 | -------------------------------------------------------------------------------- /Proxmox/Dell3rdPartyHardwareCheck.md: -------------------------------------------------------------------------------- 1 | # Quiet the Dell 3rd Party hardware check spinning the fans 2 | 3 | Access the iDRAC through ssh (same IP, same user/password), and ran the command: 4 | 5 | ```bash 6 | racadm set system.thermalsettings.ThirdPartyPCIFanResponse 0 7 | ``` 8 | -------------------------------------------------------------------------------- /Proxmox/Network.md: -------------------------------------------------------------------------------- 1 | # Network for Proxmox 2 | 3 | ## Interface check 4 | 5 | To check which interface is actually linked to which physical port, enable all of the interfaces. For each one: 6 | 7 | ```bash 8 | ip link set dev eth1 up 9 | ``` 10 | 11 | Then, check where the following script gives a "1" result with the cable connected, moving the cable through the physical ports and taking note of the port-interface link. 12 | 13 | ```bash 14 | for i in $( ls /sys/class/net ); do echo -n $i: ; cat /sys/class/net/$i/carrier; done 15 | ``` 16 | 17 | ## Set the network (bonds, bridges, vlans) 18 | 19 | See the example /etc/network/interfaces file [here](https://github.com/urbaman/HomeLab/tree/main/Proxmox/network/interfaces) -------------------------------------------------------------------------------- /Proxmox/VirtualMachines.md: -------------------------------------------------------------------------------- 1 | # Virtual Machines management 2 | 3 | ## Change VM ID on Ceph 4 | 5 | - Stop the VM 6 | - Check the storage on Ceph 7 | 8 | ```bash 9 | rbd -p ls | grep vm-OLDID 10 | ``` 11 | 12 | - Move all the devices (disks and cloudinit devices) 13 | 14 | ```bash 15 | rbd -p mv vm-OLDID-xxx vm-NEWID-xxx 16 | ``` 17 | 18 | - Move the vm config file, and change all internal reference to old id 19 | 20 | ```bash 21 | mv /etc/pve/nodes/NODE/qemu-server/OLDID.conf /etc/pve/nodes/NODE/qemu-server/NEWID.conf 22 | sed -i "s/vm-OLDID/vm-NEWID/g" /etc/pve/nodes/NODE/qemu-server/NEWID.conf 23 | ``` 24 | 25 | - Restart the VM 26 | -------------------------------------------------------------------------------- /Proxmox/iDRACservicemodule.md: -------------------------------------------------------------------------------- 1 | # iDRAC Service Module(iSM) Installation Procedure 2 | 3 | To properly install the iDRAC service module, if not installed by the Openmanage installation procedure: 4 | 5 | ```bash 6 | wget https://linux.dell.com/repo/community/openmanage/iSM/4300/focal/pool/main/d/dcism-osc/dcism-osc_7.0.1.0_amd64.deb 7 | wget https://linux.dell.com/repo/community/openmanage/iSM/4300/focal/pool/main/d/dcism/dcism_4.3.0.0-2781.ubuntu20_amd64.deb 8 | ``` 9 | 10 | ```bash 11 | dpkg -i dcism-osc_7.0.1.0_amd64.deb 12 | dpkg -i dcism_4.3.0.0-2781.ubuntu20_amd64.deb 13 | ``` 14 | -------------------------------------------------------------------------------- /Proxmox/readme.md: -------------------------------------------------------------------------------- 1 | # Proxmox installation and setup 2 | 3 | Here are the steps to recreate my own installation, with all of the tools to properly manage the Dell r730xd server: 4 | 5 | 1. [Quiet the fans for Dell 3rd party hardware check](https://github.com/urbaman/HomeLab/blob/main/Proxmox/Dell3rdPartyHardwareCheck.md) 6 | 2. [Installation](https://github.com/urbaman/HomeLab/blob/main/Proxmox/Installation.md) 7 | 3. [Optimizations](https://github.com/urbaman/HomeLab/blob/main/Proxmox/Optimizations.md) 8 | 4. [Network management](https://github.com/urbaman/HomeLab/blob/main/Proxmox/Network.md) 9 | 5. [Pcie Passthrough implementation](https://github.com/urbaman/HomeLab/blob/main/Proxmox/Passtrhough.md) 10 | 6. [Dell Openmanage 10.3 installation](https://github.com/urbaman/HomeLab/blob/main/Proxmox/Openmanage.md) 11 | 7. [iDRAC Service Module installation](https://github.com/urbaman/HomeLab/blob/main/Proxmox/iDRACservicemodule.md) 12 | 8. [ipmtools installation, tips and tricks](https://github.com/urbaman/HomeLab/blob/main/Proxmox/ipmitool.md) 13 | -------------------------------------------------------------------------------- /Rack/RackDiagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/urbaman/HomeLab/144f7a583a0d3e8669f0ebc6e05ef65e75aafd8d/Rack/RackDiagram.png -------------------------------------------------------------------------------- /homeassistant.md: -------------------------------------------------------------------------------- 1 | # Installare Homeassistant su Proxmox 2 | 3 | https://www.juanmtech.com/install-proxmox-and-virtualize-home-assistant/ 4 | 5 | https://community.home-assistant.io/t/home-assistant-os-on-proxmox-guide/262263 6 | 7 | https://jhartman.pl/2021/11/08/proxmox-7-cant-boot-to-uefi-home-assistant-and-other-uefi-images 8 | -------------------------------------------------------------------------------- /postgres_ha.md: -------------------------------------------------------------------------------- 1 | # Istruzioni per mettere postgres in HA 2 | 3 | ## Ubuntu 4 | 5 | https://arctype.com/blog/postgres-patroni/#main-components-of-postgresql-cluster 6 | 7 | ## RedHat/Centos 8 | 9 | https://digitalis.io/blog/technology/part1-postgresql-ha-patroni-etcd-haproxy/ 10 | 11 | https://digitalis.io/blog/postgresql/deploying-postgresql-for-high-availability-with-patroni-etcd-and-haproxy-part-2/ 12 | 13 | # Spostare la directory di data 14 | 15 | https://www.digitalocean.com/community/tutorials/how-to-move-a-postgresql-data-directory-to-a-new-location-on-ubuntu-16-04 16 | 17 | # PgBouncer 18 | 19 | https://www.pgbouncer.org/ 20 | -------------------------------------------------------------------------------- /zfs.md: -------------------------------------------------------------------------------- 1 | # ZFS: aggiungere un disco creando un mirror 2 | 3 | psSense 4 | 5 | ```bash 6 | #check the zpool and the available drives (freeBSD/pfSense) 7 | zpool status 8 | sysctl kern.disks 9 | 10 | #backup the partition table from da0 to restore to (new) da1 11 | gpart backup da0 > /tmp/da0.bak 12 | gpart restore -l da1 < /tmp/da0.bak 13 | 14 | #add new partitioned disk to zpool zpool1 15 | zpool attach zpool1 da0p3 da1p3 16 | 17 | #write bootloader to new disk 18 | gpart bootcode -b /boot/pmbr -p /boot/gptzfsboot -i 2 da1 19 | 20 | #write EFI partition to new disk 21 | dd if=/boot/boot1.efifat of=/dev/da1p1 bs=4m 22 | ``` 23 | 24 | TrueNAS 25 | 26 | ```bash 27 | gdisk /dev/sda 28 | b (enter filename) 29 | 30 | #5: restore partition backup to sdb with gdisk 31 | gdisk /dev/sdb 32 | r (recover and then l to load backup filename 33 | 34 | #6: Check the boot-pool status 35 | It should display boot-pool and the largest partition of the sda drive like following 36 | zpool status boot-pool 37 | pool: boot-pool 38 | state: ONLINE 39 | config: 40 | 41 | NAME STATE READ WRITE CKSUM 42 | boot-pool ONLINE 0 0 0 43 | sda3 ONLINE 0 0 0 44 | 45 | errors: No known data errors 46 | 47 | #7: Attach the sdb to the zpool 48 | Keep in mind to use the largest partition which should be the same as on sda 49 | zpool attach boot-pool sdb3 50 | 51 | #8: Copy the EFI Boot partition 52 | dd if=/dev/sda2 of=/dev/sdb2 53 | 54 | #9: Copy the BIOS Boot partition (not sure if needed) 55 | dd if=/dev/sda1 of=/dev/sdb1 56 | ``` 57 | --------------------------------------------------------------------------------