├── .github ├── FUNDING.yml ├── renovate-config.json └── workflows │ └── ci.yaml ├── minio ├── backups │ ├── testing │ │ ├── restores │ │ │ ├── .gitignore │ │ │ ├── .env_sample │ │ │ └── restore_files.yaml │ │ └── backups │ │ │ └── root_backup.yaml │ ├── external_secrets_argocd_app.yaml │ └── helm │ │ └── Chart.yaml └── vanilla │ ├── persistence │ └── minio_pvc.yaml │ ├── external_secrets │ └── values.yaml │ ├── minio_pvc_argocd_app.yaml │ └── README.md ├── demo ├── kepler │ └── README.md ├── test-app │ ├── nginx-hello-world │ │ ├── README.md │ │ ├── manifests │ │ │ ├── service.yaml │ │ │ ├── deployment.yaml │ │ │ └── ingress.yaml │ │ └── nginx_hello_world_argocd_app.yaml │ ├── nginx-hello-world-vouch │ │ ├── README.md │ │ ├── manifests │ │ │ ├── service.yaml │ │ │ └── deployment.yaml │ │ └── nginx_hello_world_vouch_argocd_app.yaml │ └── postgres-backup │ │ └── README.md ├── pixelfed │ ├── storage │ │ ├── seaweedfs │ │ │ └── README.md │ │ ├── seaweedfs_with_tolerations │ │ │ └── README.md │ │ └── pvc │ │ │ ├── templates │ │ │ ├── peertube_pvc.yaml │ │ │ └── k8up_pod_config.yaml │ │ │ └── values.yaml │ ├── README.md │ └── external_secrets │ │ └── templates │ │ └── bitwarden │ │ └── pixelfed_app_key.yaml ├── README.md ├── vault │ ├── README.md │ └── vault_argocd_app.yaml ├── keycloak │ ├── cli_config │ │ ├── kustomization.yaml │ │ └── config-templates │ │ │ └── user.json │ ├── persistence │ │ └── postgres_persistence.yaml │ └── external_secrets │ │ └── keycloak-admin-credentials.yaml ├── minio-certs-helm-chart │ ├── values.yaml │ └── templates │ │ └── cert_issuer.yaml ├── argo-workflows │ ├── external_secrets │ │ └── values.yaml │ ├── secrets │ │ ├── workflows-pgsql-credentials.yaml │ │ ├── workflows-s3-artifact-credentials.yaml │ │ └── workflows-s3-postgres-credentials.yaml │ ├── app-of-apps │ │ ├── event-bus.yaml │ │ └── service-account.yaml │ ├── example │ │ ├── vm-webhook-event-source.yaml │ │ └── webhook-event-source.yaml │ └── README.md ├── juicefs │ ├── README.md │ ├── external_secrets │ │ ├── values.yaml │ │ └── templates │ │ │ └── redis_credentials.yaml │ ├── juicefs_argocd_app_of_apps.yaml │ └── persistence │ │ └── seaweedfs_data_pvc.yaml ├── cozy │ ├── external_secrets │ │ └── values.yaml │ ├── README.md │ └── secret-couchdb.md ├── linkerd │ ├── README.md │ └── linkerd_crds_argocd_app.yaml ├── haproxy │ └── haproxy.sh ├── local-path-provisioner │ ├── fast-raid.yaml │ ├── slow-raid.yaml │ └── README.md ├── wg-access-server │ ├── manifests │ │ └── persistence │ │ │ └── wg-pvc.yaml │ ├── README.md │ └── external_secrets │ │ └── wg-private-key.yaml ├── appflowy │ ├── README.md │ ├── storage │ │ └── minio │ │ │ └── README.md │ └── external_secrets │ │ └── values.yaml ├── openbao │ ├── README.md │ └── openbao_argocd_app.yaml ├── infisical │ ├── README.md │ ├── secrets-operator │ │ └── infisical_secrets_operator_argocd_app.yaml │ └── infisical_secrets │ │ └── secret.yaml ├── garage │ ├── README.md │ ├── manifests │ │ └── init_rbac.yaml │ └── config.sh ├── artifactory │ ├── README.md │ └── artifactory_argocd_app.yaml ├── oauth2-proxy │ └── README.md ├── rancher │ ├── manifests │ │ └── rancher-ingress.yaml │ ├── README.md │ └── rancher-argo-app.yaml ├── longhorn │ └── helm │ │ └── longhorn-helm.yaml └── cilium │ └── README.md ├── zitadel ├── backups_and_restores │ ├── .gitignore │ └── .sample-restic-env ├── external_secrets │ ├── values.yaml │ └── templates │ │ └── bitwarden │ │ └── zitadel-core-key.yaml └── storage │ └── minio_tenant │ └── minio_setup_script_argocd_app.yaml ├── .gitignore ├── grafana_stack ├── dashboards │ ├── values.yaml │ ├── Chart.yaml │ └── README.md ├── screenshots │ ├── prometheus-app-of-apps.png │ └── prometheus_stack_network.png ├── app_of_apps_with_matrix │ ├── README.md │ ├── alloy_argocd_app.yaml │ ├── s3_provider_argocd_app.yaml │ └── dashboards_argocd_app.yaml ├── scrape-configs │ └── additional-scrape-config-example.md ├── external_secrets │ ├── templates │ │ └── loki_valkey_credentials.yaml │ └── values.yaml └── README.md ├── matrix ├── backups_and_restores │ ├── .gitignore │ └── .sample-restic-env ├── app_of_apps_beta │ └── README.md ├── storage │ └── pvc │ │ ├── bridges_pvc.md │ │ └── templates │ │ ├── media_pvc.yaml │ │ ├── signingkey_pvc.yaml │ │ ├── synapse_config.yaml │ │ ├── mautrix_discord_bridge_pvc.yaml │ │ └── matrix_authentication_service_pvc.yaml └── external_secrets │ └── templates │ └── bitwarden │ ├── syncv3_env.yaml │ ├── registration.yaml │ └── trusted_key_servers.yaml ├── badargoart_small.png ├── mastodon ├── small-hack │ ├── backups_and_restores │ │ ├── .gitignore │ │ └── .sample-restic-env │ ├── app_of_apps │ │ └── migratedb_argocd_app.yaml │ ├── app_of_apps_with_tolerations │ │ └── migratedb_argocd_app.yaml │ ├── storage │ │ └── pvc │ │ │ ├── values.yaml │ │ │ └── templates │ │ │ ├── k8up_pod_config.yaml │ │ │ └── valkey_pvc.yaml │ └── external_secrets │ │ └── values.yaml ├── mastodon │ └── app_of_apps_with_tolerations │ │ └── migratedb_argocd_app.yaml └── README.md ├── netmaker ├── netmaker.png ├── netmaker-network.png ├── manifests │ └── persistence │ │ ├── mq_pvc.md │ │ ├── shared_data_pvc.md │ │ └── postgres_persistence.yaml ├── external_secrets │ └── values.yaml └── app_of_apps │ └── netmaker_persistsence.yaml ├── nextcloud ├── backups_and_restores │ ├── .gitignore │ ├── .sample-restic-env │ ├── root_backup.yaml │ └── restore_files.yaml ├── screenshots │ └── nextcloud_app.png ├── app_of_apps_with_tolerations │ ├── README.md │ └── install_apps │ │ ├── values.yaml │ │ └── templates │ │ └── before_starting_scripts_configmap.yaml ├── no-nginx │ ├── phpconfigmap.yaml │ ├── README.md │ └── fastcgi_configmap.yaml ├── storage │ └── pvc │ │ ├── .helmignore │ │ └── templates │ │ ├── nextcloud_files_pvc.yaml │ │ ├── nextcloud_config_pvc.yaml │ │ └── k8up_pod_config.yaml ├── app_of_apps │ └── before_starting_scripts_configmap.yaml ├── maintenance_mode_cronjobs │ └── values.yaml └── external_secrets │ └── templates │ └── bitwarden │ └── redis_credentials.yaml ├── cert-manager ├── cluster-issuers │ ├── values.yaml │ ├── Chart.yaml │ └── templates │ │ ├── production.yaml │ │ └── staging.yaml ├── external_secrets │ ├── values.yaml │ ├── templates │ │ └── bitwarden │ │ │ └── cloudflare_token.yaml │ └── README.md ├── README.md ├── cert-manager_argocd_app.yaml └── app_of_apps │ └── cert-manager_argocd_app.yaml ├── home-assistant ├── backups_and_restores │ ├── .gitignore │ └── .sample-restic-env ├── external_secrets │ ├── README.md │ └── values.yaml └── storage │ └── templates │ └── pvc.yaml ├── jellyfin ├── external_secrets │ ├── README.md │ └── values.yaml ├── storage │ └── pvc │ │ ├── .helmignore │ │ └── templates │ │ ├── jellyfin_web_pvc.yaml │ │ ├── jellyfin_media_pvc.yaml │ │ ├── jellyfin_config_pvc.yaml │ │ └── k8up_pod_config.yaml └── README.md ├── k8tz └── screenshots │ └── k8tz_app.png ├── seaweedfs ├── backups.drawio.png ├── external_secrets │ └── values.yaml ├── app_of_apps │ └── persistence_argocd_app.yaml ├── operator │ └── seaweedfs_argocd_app.yaml ├── README.md └── persistence │ └── seaweedfs_data_pvc.yaml ├── argocd ├── docs │ ├── keycloak │ │ ├── step_3.png │ │ ├── step_4.png │ │ ├── step_3.1.png │ │ ├── mapper_details.png │ │ ├── step_1_create_client.png │ │ ├── step_5_create_groups.png │ │ └── step_2_create_client_scope.png │ └── screenshots │ │ ├── argo_ingress.png │ │ └── argocd_app.png ├── external_secrets │ ├── values.yaml │ └── .helmignore └── manifests │ ├── apps │ └── nextcloud_app.yaml │ └── projects │ ├── matrix.yaml │ ├── keycloak.yaml │ ├── social.yaml │ ├── zitadel.yaml │ ├── external-secrets.yaml │ ├── ingress_project.yaml │ ├── nextcloud.yaml │ └── monitoring-project.yaml ├── vouch-proxy ├── keycloak │ ├── step_3.png │ ├── step_4.png │ ├── step_3.1.png │ ├── mapper_details.png │ ├── step_1_create_client.png │ ├── step_5_create_groups.png │ └── step_2_create_client_scope.png ├── screenshots │ └── vouch_app_of_apps.png └── external_secrets │ └── values.yaml ├── coturn ├── README.md └── coturn_argocd_app.yaml ├── k8up ├── screenshots │ └── k8up-app-of-apps.png ├── README.md └── k8up_argocd_app.md ├── kyverno ├── README.md └── kyverno_argocd_app.yaml ├── peertube ├── storage │ ├── seaweedfs │ │ └── README.md │ ├── seaweedfs_with_tolerations │ │ └── README.md │ └── pvc │ │ ├── templates │ │ ├── peertube_pvc.yaml │ │ └── k8up_pod_config.yaml │ │ └── values.yaml ├── app_of_apps_with_tolerations │ └── README.md └── external_secrets │ └── templates │ └── bitwarden │ └── peertube-secret.yaml ├── metallb ├── crds │ ├── l2-advertisement.yaml │ └── ip-addr-pool.yaml ├── metallb_argocd_app.yaml └── README.md ├── external-secrets-operator ├── screenshots │ ├── eso.png │ └── bweso.png ├── app_of_apps │ ├── external-secrets-crds_argocd_app.yaml │ ├── with_metrics │ │ └── external-secrets-argocd_app.yaml │ └── external-secrets-argocd_app.yaml └── providers │ └── bitwarden │ └── bitwarden_argocd_app.yaml ├── ingress-nginx ├── screenshots │ ├── ingress-nginx.png │ └── ingress-nginx-namespace.png └── modsecurity_configmap │ ├── modsecurity_exception_files │ ├── vnc.conf │ ├── README.md │ ├── loki.conf │ └── jellyfin.conf │ ├── .helmignore │ ├── values.yaml │ └── templates │ └── modsecurity_plugins_configmap.yaml ├── prometheus ├── screenshots │ ├── prometheus-app-of-apps.png │ └── prometheus_stack_network.png ├── app_of_apps │ ├── loki-cluster-rules.yaml │ ├── secrets │ │ └── thanos-secret.yaml │ └── dashboards_argocd_app.yaml ├── app_of_apps_with_matrix │ ├── README.md │ └── dashboards_argocd_app.yaml ├── dashboards │ └── README.md ├── external_secrets │ ├── values.yaml │ └── README.md ├── scrape-configs │ └── additional-scrape-config-example.md ├── scrape-configs_argocd_app_example.md └── crds │ └── prometheus_crds_argocd_app.yaml ├── nvidia_device_plugin ├── README.md └── nvidia_device_plugin_argocd_app.yaml ├── postgres ├── operators │ ├── zalando │ │ ├── postgres_cluster │ │ │ ├── values.yaml │ │ │ └── .helmignore │ │ ├── README.md │ │ └── external_secrets │ │ │ └── values.yaml │ └── cloud-native-postgres │ │ ├── cnpg_operator_dashboard_app.yaml │ │ └── cnpg_operator_argocd_app.yaml ├── backups │ ├── examples │ │ ├── restore-target-pvc.yaml │ │ ├── operator-database.yaml │ │ └── backup-job.yaml │ └── k8up-test │ │ ├── restore-test │ │ └── target-pvc.yaml │ │ ├── test-database │ │ ├── database_argocd_app.yaml │ │ └── manifests │ │ │ └── test-db-operator.yaml │ │ └── k8up-job │ │ ├── backup-job-argocd_app.yaml │ │ ├── manifests │ │ ├── backup-as-root.yaml │ │ └── external-secrets_argocd_app.yaml │ │ └── external_secrets │ │ └── restic-repo-secret.yaml └── README.md ├── tempo ├── README.md ├── app_of_apps │ └── configmap-tempo-runtime.yaml └── external_secrets │ ├── values.yaml │ └── templates │ └── valkey_credentials.yaml ├── opa └── README.md ├── renovate └── external_secrets │ ├── templates │ ├── generator_ssh_key.yaml │ ├── renovate-ssh-key.yaml │ ├── renovate-config.yaml │ └── pat.yaml │ ├── values.yaml │ └── .helmignore ├── valkey ├── external_secrets │ ├── values.yaml │ ├── templates │ │ └── bitwarden │ │ │ └── valkey_credentials.yaml │ └── README.md └── README.md ├── valkey_cluster ├── external_secrets │ ├── values.yaml │ ├── templates │ │ └── bitwarden │ │ │ └── valkey_credentials.yaml │ └── README.md └── README.md ├── collabora_online ├── external_secrets │ └── values.yaml └── README.md ├── libretranslate └── external_secrets │ └── values.yaml ├── alloy └── templates │ ├── alloy-logs-configmap.yaml │ ├── alloy-traces-configmap.yaml │ └── alloy-metrics-configmap.yaml ├── s3_persistence_and_backups └── templates │ ├── minio_pvc.yaml │ ├── juicefs_valkey_pvc.yaml │ └── pod_config.yaml ├── gotosocial └── storage │ └── pvc │ ├── templates │ ├── gotosocial_pvc.yaml │ └── k8up_pod_config.yaml │ └── values.yaml ├── writefreely ├── README.md ├── storage │ └── templates │ │ ├── pvc.yaml │ │ ├── pvc_mysql.yaml │ │ └── prebackup_pod.yaml └── external_secrets │ ├── templates │ ├── writefreely_admin_credentials.yaml │ ├── smtp-credentials.yaml │ └── mysql_credentials.yaml │ └── values.yaml ├── forgejo └── storage │ └── pvc │ ├── postgres-certs.yaml │ └── templates │ ├── pvc.yaml │ └── pod_config.yaml ├── ghost ├── storage │ └── templates │ │ ├── pvc.yaml │ │ ├── pvc_mysql.yaml │ │ └── prebackup_pod_ghost.yaml └── external_secrets │ └── values.yaml ├── s3_bucket_ingresses ├── .helmignore ├── values.yaml └── templates │ └── ingress.yaml ├── generic-device-plugin └── README.md ├── kubevirt ├── examples │ ├── disks │ │ ├── windows-10-ro.yaml │ │ ├── jammy-cloud-rwo.yaml │ │ ├── lunar-cloud-rwo.yaml │ │ ├── windows-autoconfig.yaml │ │ ├── debian13-cloud-rwo.yaml │ │ ├── deian13-iso-rwo.yaml │ │ ├── fedora-cloud-rwo.yaml │ │ └── virtio-drivers.yaml │ └── machine-sizes │ │ ├── high-cpu.yaml │ │ ├── high-mem.yaml │ │ └── standard.yaml ├── kubevirt-disks-argo-app.yaml └── kubevirt-machine-sizes.yaml ├── generic-app └── README.md ├── scripts └── autoupdate-prometheus-crds.sh └── mysql └── percona-pxc-operator └── pxc_operator_argocd_app.yaml /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [jessebot] 2 | -------------------------------------------------------------------------------- /minio/backups/testing/restores/.gitignore: -------------------------------------------------------------------------------- 1 | .env 2 | -------------------------------------------------------------------------------- /demo/kepler/README.md: -------------------------------------------------------------------------------- 1 | # Kepler Argo CD Application 2 | -------------------------------------------------------------------------------- /zitadel/backups_and_restores/.gitignore: -------------------------------------------------------------------------------- 1 | .zitadel-restic-password 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/.DS_Store 2 | **/.env 3 | **/*tls.yaml 4 | **/*tls.yml 5 | -------------------------------------------------------------------------------- /grafana_stack/dashboards/values.yaml: -------------------------------------------------------------------------------- 1 | dashboards: 2 | deploy_all: true 3 | -------------------------------------------------------------------------------- /matrix/backups_and_restores/.gitignore: -------------------------------------------------------------------------------- 1 | .matrix-restic-password 2 | .restic.env 3 | -------------------------------------------------------------------------------- /badargoart_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/badargoart_small.png -------------------------------------------------------------------------------- /mastodon/small-hack/backups_and_restores/.gitignore: -------------------------------------------------------------------------------- 1 | .mastodon-restic-password 2 | .restic-env 3 | -------------------------------------------------------------------------------- /netmaker/netmaker.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/netmaker/netmaker.png -------------------------------------------------------------------------------- /nextcloud/backups_and_restores/.gitignore: -------------------------------------------------------------------------------- 1 | .nextcloud-restic-password 2 | .restic.env 3 | tmp/** 4 | -------------------------------------------------------------------------------- /cert-manager/cluster-issuers/values.yaml: -------------------------------------------------------------------------------- 1 | # the email address to use for Issuers with acme type 2 | email: '' 3 | -------------------------------------------------------------------------------- /demo/test-app/nginx-hello-world/README.md: -------------------------------------------------------------------------------- 1 | # Hello World with Nginx 2 | 3 | A minimal testing application 4 | -------------------------------------------------------------------------------- /home-assistant/backups_and_restores/.gitignore: -------------------------------------------------------------------------------- 1 | .restic.env 2 | .restic-env 3 | .home-assistant-restic-password 4 | -------------------------------------------------------------------------------- /jellyfin/external_secrets/README.md: -------------------------------------------------------------------------------- 1 | # External Secrets For Home Assistant 2 | Includes: 3 | - admin credentials 4 | -------------------------------------------------------------------------------- /k8tz/screenshots/k8tz_app.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/k8tz/screenshots/k8tz_app.png -------------------------------------------------------------------------------- /netmaker/netmaker-network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/netmaker/netmaker-network.png -------------------------------------------------------------------------------- /seaweedfs/backups.drawio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/seaweedfs/backups.drawio.png -------------------------------------------------------------------------------- /argocd/docs/keycloak/step_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/argocd/docs/keycloak/step_3.png -------------------------------------------------------------------------------- /argocd/docs/keycloak/step_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/argocd/docs/keycloak/step_4.png -------------------------------------------------------------------------------- /home-assistant/external_secrets/README.md: -------------------------------------------------------------------------------- 1 | # External Secrets For Home Assistant 2 | Includes: 3 | - admin credentials 4 | -------------------------------------------------------------------------------- /vouch-proxy/keycloak/step_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/vouch-proxy/keycloak/step_3.png -------------------------------------------------------------------------------- /vouch-proxy/keycloak/step_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/vouch-proxy/keycloak/step_4.png -------------------------------------------------------------------------------- /argocd/docs/keycloak/step_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/argocd/docs/keycloak/step_3.1.png -------------------------------------------------------------------------------- /demo/test-app/nginx-hello-world-vouch/README.md: -------------------------------------------------------------------------------- 1 | # Hello World with Nginx and Vouch 2 | 3 | A minimal testing application 4 | -------------------------------------------------------------------------------- /vouch-proxy/keycloak/step_3.1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/vouch-proxy/keycloak/step_3.1.png -------------------------------------------------------------------------------- /coturn/README.md: -------------------------------------------------------------------------------- 1 | # Coturn 2 | 3 | See our helm chart here: [small-hack/coturn-chart](https://github.com/small-hack/coturn-chart) 4 | -------------------------------------------------------------------------------- /k8up/screenshots/k8up-app-of-apps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/k8up/screenshots/k8up-app-of-apps.png -------------------------------------------------------------------------------- /kyverno/README.md: -------------------------------------------------------------------------------- 1 | # Kyverno ArgoCD app 2 | Using this helm chart: https://github.com/kyverno/kyverno/tree/main/charts/kyverno 3 | -------------------------------------------------------------------------------- /matrix/app_of_apps_beta/README.md: -------------------------------------------------------------------------------- 1 | This is the beta version of the matrix stack that includes the Matrix Authentication Service 2 | -------------------------------------------------------------------------------- /peertube/storage/seaweedfs/README.md: -------------------------------------------------------------------------------- 1 | Deploy a SeaweedFS cluster as well as k8s Ingresses for each bucket required by PeerTube. 2 | -------------------------------------------------------------------------------- /argocd/docs/keycloak/mapper_details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/argocd/docs/keycloak/mapper_details.png -------------------------------------------------------------------------------- /argocd/docs/screenshots/argo_ingress.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/argocd/docs/screenshots/argo_ingress.png -------------------------------------------------------------------------------- /argocd/docs/screenshots/argocd_app.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/argocd/docs/screenshots/argocd_app.png -------------------------------------------------------------------------------- /demo/pixelfed/storage/seaweedfs/README.md: -------------------------------------------------------------------------------- 1 | Deploy a SeaweedFS cluster as well as k8s Ingresses for each bucket required by pixelfed. 2 | -------------------------------------------------------------------------------- /metallb/crds/l2-advertisement.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: metallb.io/v1beta1 3 | kind: L2Advertisement 4 | metadata: 5 | name: default 6 | -------------------------------------------------------------------------------- /nextcloud/screenshots/nextcloud_app.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/nextcloud/screenshots/nextcloud_app.png -------------------------------------------------------------------------------- /vouch-proxy/keycloak/mapper_details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/vouch-proxy/keycloak/mapper_details.png -------------------------------------------------------------------------------- /argocd/docs/keycloak/step_1_create_client.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/argocd/docs/keycloak/step_1_create_client.png -------------------------------------------------------------------------------- /argocd/docs/keycloak/step_5_create_groups.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/argocd/docs/keycloak/step_5_create_groups.png -------------------------------------------------------------------------------- /demo/README.md: -------------------------------------------------------------------------------- 1 | # Directory for Proof of Concept Apps 2 | 3 | Apps in this directory are in testing/development and not ready for deployment. 4 | -------------------------------------------------------------------------------- /external-secrets-operator/screenshots/eso.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/external-secrets-operator/screenshots/eso.png -------------------------------------------------------------------------------- /ingress-nginx/screenshots/ingress-nginx.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/ingress-nginx/screenshots/ingress-nginx.png -------------------------------------------------------------------------------- /peertube/storage/seaweedfs_with_tolerations/README.md: -------------------------------------------------------------------------------- 1 | Deploy a SeaweedFS cluster as well as k8s Ingresses for each bucket required by PeerTube. 2 | -------------------------------------------------------------------------------- /vouch-proxy/keycloak/step_1_create_client.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/vouch-proxy/keycloak/step_1_create_client.png -------------------------------------------------------------------------------- /vouch-proxy/keycloak/step_5_create_groups.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/vouch-proxy/keycloak/step_5_create_groups.png -------------------------------------------------------------------------------- /vouch-proxy/screenshots/vouch_app_of_apps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/vouch-proxy/screenshots/vouch_app_of_apps.png -------------------------------------------------------------------------------- /demo/pixelfed/storage/seaweedfs_with_tolerations/README.md: -------------------------------------------------------------------------------- 1 | Deploy a SeaweedFS cluster as well as k8s Ingresses for each bucket required by pixelfed. 2 | -------------------------------------------------------------------------------- /external-secrets-operator/screenshots/bweso.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/external-secrets-operator/screenshots/bweso.png -------------------------------------------------------------------------------- /demo/vault/README.md: -------------------------------------------------------------------------------- 1 | # Argo CD Application for Hashicorp Vault 2 | 3 | We're using this helm chart: https://github.com/hashicorp/vault-helm/tree/main 4 | -------------------------------------------------------------------------------- /prometheus/screenshots/prometheus-app-of-apps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/prometheus/screenshots/prometheus-app-of-apps.png -------------------------------------------------------------------------------- /argocd/docs/keycloak/step_2_create_client_scope.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/argocd/docs/keycloak/step_2_create_client_scope.png -------------------------------------------------------------------------------- /grafana_stack/screenshots/prometheus-app-of-apps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/grafana_stack/screenshots/prometheus-app-of-apps.png -------------------------------------------------------------------------------- /prometheus/screenshots/prometheus_stack_network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/prometheus/screenshots/prometheus_stack_network.png -------------------------------------------------------------------------------- /vouch-proxy/keycloak/step_2_create_client_scope.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/vouch-proxy/keycloak/step_2_create_client_scope.png -------------------------------------------------------------------------------- /argocd/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | ## if this is not set to "bitwarden", we will not actually deploy any templates 2 | provider: "" 3 | 4 | bitwardenItemID: "" 5 | -------------------------------------------------------------------------------- /grafana_stack/screenshots/prometheus_stack_network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/grafana_stack/screenshots/prometheus_stack_network.png -------------------------------------------------------------------------------- /ingress-nginx/screenshots/ingress-nginx-namespace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/small-hack/argocd-apps/HEAD/ingress-nginx/screenshots/ingress-nginx-namespace.png -------------------------------------------------------------------------------- /nextcloud/app_of_apps_with_tolerations/README.md: -------------------------------------------------------------------------------- 1 | This directory is the same as `nextcloud/app_of_apps/`, however you can set affinity and tolerations argocd secret keys 2 | -------------------------------------------------------------------------------- /nvidia_device_plugin/README.md: -------------------------------------------------------------------------------- 1 | The simplest helm chart Argo CD Application to deploy the [NVIDIA Kubernetes device plugin](https://github.com/NVIDIA/k8s-device-plugin). 2 | -------------------------------------------------------------------------------- /metallb/crds/ip-addr-pool.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: metallb.io/v1beta1 3 | kind: IPAddressPool 4 | metadata: 5 | name: default 6 | spec: 7 | addresses: {{.address_pool}} 8 | -------------------------------------------------------------------------------- /demo/keycloak/cli_config/kustomization.yaml: -------------------------------------------------------------------------------- 1 | configMapGenerator: 2 | - name: keycloak-config-cli 3 | namespace: keycloak 4 | files: 5 | - config.json=config.json 6 | -------------------------------------------------------------------------------- /demo/minio-certs-helm-chart/values.yaml: -------------------------------------------------------------------------------- 1 | # -- name of the tenant to create certs for 2 | tenant_name: "" 3 | 4 | # -- hostname of the minio console api we think 5 | hostname: "" 6 | -------------------------------------------------------------------------------- /grafana_stack/app_of_apps_with_matrix/README.md: -------------------------------------------------------------------------------- 1 | This grafana stack is just like the one in the above directory, however, Alertmanger uses a webhook to post alerts to a matrix channel. 2 | -------------------------------------------------------------------------------- /prometheus/app_of_apps/loki-cluster-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: loki-cluster-rules 5 | namespace: monitoring 6 | data: 7 | foo: "bar" 8 | -------------------------------------------------------------------------------- /prometheus/app_of_apps_with_matrix/README.md: -------------------------------------------------------------------------------- 1 | This prometheus stack is just like the one in the above directory, however, Alertmanger uses a webhook to post alerts to a matrix channel. 2 | -------------------------------------------------------------------------------- /demo/test-app/postgres-backup/README.md: -------------------------------------------------------------------------------- 1 | # Postgres Backups with K8up 2 | 3 | ## Requirements 4 | 5 | This requires a postgres database. See the [postgres](./postgres) directory for instructions. 6 | -------------------------------------------------------------------------------- /postgres/operators/zalando/postgres_cluster/values.yaml: -------------------------------------------------------------------------------- 1 | cluster_name: "nextcloud" 2 | user: "nextcloud" 3 | databases: 4 | # nextcloud user is owner of nextcloud database 5 | nextcloud: nextcloud 6 | -------------------------------------------------------------------------------- /cert-manager/cluster-issuers/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | version: 0.0.1 3 | name: cluster-issuers 4 | appVersion: 0.0.1 5 | maintainers: 6 | - name: jessebot 7 | url: https://github.com/jessebot 8 | -------------------------------------------------------------------------------- /tempo/README.md: -------------------------------------------------------------------------------- 1 | # Tempo Argo CD ApplicationSet 2 | 3 | This is an experimental Argo CD ApplicationSet to deploy tempo. 4 | 5 | ⚠️This is under heavy construction and may not work properly right now. 6 | -------------------------------------------------------------------------------- /nextcloud/no-nginx/phpconfigmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: nextcloud-php-fpm-conf 5 | data: 6 | www.conf: |- 7 | [www] 8 | security.limit_extensions = .php .css .js 9 | -------------------------------------------------------------------------------- /postgres/operators/zalando/README.md: -------------------------------------------------------------------------------- 1 | # Deploy the Zalando Postgresql Operator as a Argo CD Applcation 2 | 3 | We deploy the following charts: 4 | - https://github.com/zalando/postgres-operator/tree/master/charts 5 | -------------------------------------------------------------------------------- /peertube/app_of_apps_with_tolerations/README.md: -------------------------------------------------------------------------------- 1 | This is the same PeerTube Argo CD Application, but this one includes the option to set Tolerations and Affinity to make sure PeerTube related resources go to a specific node. 2 | -------------------------------------------------------------------------------- /opa/README.md: -------------------------------------------------------------------------------- 1 | # OPA Gatekeeper Argo CD ApplicationSet 2 | 3 | This is a small Argo CD ApplicationSet to install OPA Gatekeeper. 4 | 5 | We use this helm chart: 6 | https://github.com/open-policy-agent/gatekeeper/tree/master 7 | -------------------------------------------------------------------------------- /cert-manager/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret providers in the future 3 | provider: "" 4 | 5 | cloudflareBitwardenID: "" 6 | -------------------------------------------------------------------------------- /nextcloud/app_of_apps_with_tolerations/install_apps/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for install_apps. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | affinity: {} 5 | 6 | tolerations: [] 7 | -------------------------------------------------------------------------------- /nextcloud/no-nginx/README.md: -------------------------------------------------------------------------------- 1 | This is a directory for test files related to ditching the baked in nginx container in the nextcloud pod. 2 | 3 | You can see more info here on this saga here: 4 | https://github.com/nextcloud/helm/issues/367 5 | -------------------------------------------------------------------------------- /renovate/external_secrets/templates/generator_ssh_key.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: generators.external-secrets.io/v1alpha1 2 | kind: SSHKey 3 | metadata: 4 | name: renovate-ssh-key-generator 5 | spec: 6 | keyType: "ed25519" 7 | comment: "renovate@bot.fake.tld" 8 | -------------------------------------------------------------------------------- /minio/vanilla/persistence/minio_pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: minio-data 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 10Gi 12 | -------------------------------------------------------------------------------- /demo/argo-workflows/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # set to bitwarden to use bitwarden as an external secrets provider, or "" to not deploy secrets 2 | # other providers may be supported in the future 3 | provider: "bitwarden" 4 | 5 | workflowsPgsqlConfigBitwardenID: "" 6 | -------------------------------------------------------------------------------- /tempo/app_of_apps/configmap-tempo-runtime.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: tempo-runtime-config 6 | labels: 7 | app.kubernetes.io/name: tempo 8 | data: 9 | overrides.yaml: | 10 | overrides: 11 | null 12 | -------------------------------------------------------------------------------- /valkey/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret providers in the future 3 | provider: "" 4 | 5 | # -- valkey Credentials 6 | valkeyCredentialsBitwardenID: "" 7 | 8 | -------------------------------------------------------------------------------- /minio/vanilla/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | ## if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret stores in the future :) 3 | provider: "" 4 | 5 | rootCredentialsBitwardenID: "" 6 | oidcCredentialsBitwardenID: "" 7 | -------------------------------------------------------------------------------- /demo/argo-workflows/secrets/workflows-pgsql-credentials.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: Opaque 4 | metadata: 5 | name: workflows-pgsql-credentials 6 | namespace: argocd 7 | stringData: 8 | password: "your-password-here" 9 | username: "workflows" 10 | -------------------------------------------------------------------------------- /valkey_cluster/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret providers in the future 3 | provider: "" 4 | 5 | # -- valkey Credentials 6 | valkeyCredentialsBitwardenID: "" 7 | 8 | -------------------------------------------------------------------------------- /collabora_online/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret providers in the future 3 | provider: "" 4 | 5 | # -- collabora admin Credentials 6 | adminCredentialsBitwardenID: "" 7 | -------------------------------------------------------------------------------- /libretranslate/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | ## if this is not set to "bitwarden", we will not actually deploy any templates 2 | # other providers may be supported in the future 3 | provider: "true" 4 | 5 | # the item ID of the Bitwarden admin credentials 6 | apiCredentialsBitwardenID: "" 7 | -------------------------------------------------------------------------------- /netmaker/manifests/persistence/mq_pvc.md: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | namespace: netmaker 6 | name: netmaker-mq-pvc 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 8Gi 13 | -------------------------------------------------------------------------------- /vouch-proxy/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # set to bitwarden to use bitwarden as an external secrets provider, or "" to not deploy secrets 2 | # other providers may be supported in the future 3 | provider: "bitwarden" 4 | 5 | vouchOauthConfigBitwardenID: "" 6 | vouchConfigBitwardenID: "" 7 | -------------------------------------------------------------------------------- /demo/argo-workflows/secrets/workflows-s3-artifact-credentials.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: Opaque 4 | metadata: 5 | name: workflows-s3-artifact-credentials 6 | namespace: argocd 7 | stringData: 8 | accesskey: "artifacts" 9 | secretkey: "artifacts-key-goes-here" 10 | -------------------------------------------------------------------------------- /demo/juicefs/README.md: -------------------------------------------------------------------------------- 1 | # JuiceFS 2 | 3 | JuiceFS is a High-Performance, Cloud-Native, Distributed File System. It works by placing a fully POSIX-compatible interface in-front-of redis and a s3-fielsystem. This can be mounted like a drive or used as a PVC with a `ReadWriteMany` access mode. 4 | 5 | -------------------------------------------------------------------------------- /seaweedfs/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # Use external secrets if set to bitwarden, else don't deploy external secrets 2 | # other providers may be supported in the future 3 | provider: "" 4 | 5 | s3CredentialsBitwardenID: "" 6 | # adminCredentialsBitwardenID: "" 7 | pgsqlCredentialsBitwardenID: "" 8 | -------------------------------------------------------------------------------- /demo/cozy/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # Use external secrets if set to bitwarden, else don't deploy external secrets 2 | # other providers may be supported in the future 3 | provider: "" 4 | 5 | # s3CredentialsBitwardenID: "" 6 | 7 | adminCredentialsBitwardenID: "" 8 | couchDBCredentialsBitwardenID: "" 9 | -------------------------------------------------------------------------------- /grafana_stack/dashboards/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: dashboards 3 | description: Turn json dashboard files into yaml configmaps 4 | 5 | type: application 6 | version: 0.0.1 7 | appVersion: 0.0.0 8 | 9 | maintainers: 10 | - name: "cloudymax" 11 | url: "https://github.com/cloudymax/" 12 | -------------------------------------------------------------------------------- /grafana_stack/dashboards/README.md: -------------------------------------------------------------------------------- 1 | All of these dashboards are imported into grafana automatically. 2 | 3 | We've tested these as of grafana 11.0 and all of them work except nextcloud for some reason. Nextcloud's dashboard does still work on the last version of 10.x though and we'll get it working soonish. 4 | -------------------------------------------------------------------------------- /netmaker/manifests/persistence/shared_data_pvc.md: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | namespace: netmaker 6 | name: netmaker-shared-data-pvc 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 8Gi 13 | -------------------------------------------------------------------------------- /prometheus/dashboards/README.md: -------------------------------------------------------------------------------- 1 | All of these dashboards are imported into grafana automatically. 2 | 3 | We've tested these as of grafana 11.0 and all of them work except nextcloud for some reason. Nextcloud's dashboard does still work on the last version of 10.x though and we'll get it working soonish. 4 | -------------------------------------------------------------------------------- /alloy/templates/alloy-logs-configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: alloy-logs-cfg 6 | data: 7 | config.alloy: |- 8 | {{ range $path, $_ := .Files.Glob "alloy_files/logs-config.alloy" }} 9 | {{- $.Files.Get $path | nindent 4 }} 10 | {{ end }} 11 | -------------------------------------------------------------------------------- /jellyfin/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | ## if this is not set to "bitwarden", we will not actually deploy any templates 2 | # other providers may be supported in the future 3 | provider: "true" 4 | 5 | # -- existing kubernetes secret with s3 credentials for the remote backups 6 | s3BackupCredentialsBitwardenID: "" 7 | -------------------------------------------------------------------------------- /alloy/templates/alloy-traces-configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: alloy-traces-cfg 6 | data: 7 | config.alloy: |- 8 | {{ range $path, $_ := .Files.Glob "alloy_files/traces-config.alloy" }} 9 | {{- $.Files.Get $path | nindent 4 }} 10 | {{ end }} 11 | -------------------------------------------------------------------------------- /demo/test-app/nginx-hello-world/manifests/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: nginx-hello-service 6 | namespace: nginx-hello 7 | spec: 8 | selector: 9 | app: nginx-hello 10 | ports: 11 | - protocol: TCP 12 | port: 5000 13 | targetPort: 80 14 | -------------------------------------------------------------------------------- /alloy/templates/alloy-metrics-configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: alloy-metrics-cfg 6 | data: 7 | config.alloy: |- 8 | {{ range $path, $_ := .Files.Glob "alloy_files/metrics-config.alloy" }} 9 | {{- $.Files.Get $path | nindent 4 }} 10 | {{ end }} 11 | -------------------------------------------------------------------------------- /demo/test-app/nginx-hello-world-vouch/manifests/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: nginx-hello-service 6 | namespace: nginx-hello 7 | spec: 8 | selector: 9 | app: nginx-hello 10 | ports: 11 | - protocol: TCP 12 | port: 5000 13 | targetPort: 80 14 | -------------------------------------------------------------------------------- /cert-manager/README.md: -------------------------------------------------------------------------------- 1 | ## Cert Manager Argo CD App 2 | 3 | If you just want cert manager, just deploy the app in this directory. 4 | 5 | ## App of apps 6 | 7 | If you want to deploy both a bitwarden backed external secret for your cloudflare api token AND the cert manager helm chart app, use the app_of_apps directory here. 8 | -------------------------------------------------------------------------------- /prometheus/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret providers in the future 3 | provider: "" 4 | 5 | # -- zitadel OIDC Credentials 6 | oidcCredentialsBitwardenID: "" 7 | thanosObjstoreBitwardenID: "" 8 | lokiConfigBitwardenID: "" 9 | -------------------------------------------------------------------------------- /matrix/storage/pvc/bridges_pvc.md: -------------------------------------------------------------------------------- 1 | ```yaml 2 | --- 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | namespace: matrix 7 | name: matrix-bridges 8 | spec: 9 | storageClassName: local-path 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Mi 15 | ``` 16 | -------------------------------------------------------------------------------- /demo/linkerd/README.md: -------------------------------------------------------------------------------- 1 | ## Linkerd Argo CD App of Apps 2 | 3 | [Linkerd](https://linkerd.io/) is "the world's most advanced service mesh". The helm charts are hosted at [linkerd/linkerd2](https://github.com/linkerd/linkerd2). 4 | 5 | ## Sync Waves 6 | 7 | - Wave 1: Linked CRD helm chart install 8 | - Wave 2: Linkerd control panel helm chart install 9 | -------------------------------------------------------------------------------- /demo/argo-workflows/secrets/workflows-s3-postgres-credentials.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: Opaque 4 | metadata: 5 | name: workflows-s3-postgres-credentials 6 | namespace: argocd 7 | stringData: 8 | accesskey: "workflows-postgres" 9 | secretkey: "postgres-key-goes-here" 10 | resticRepoPassword: "restic-repo-password-goes-here" 11 | -------------------------------------------------------------------------------- /demo/haproxy/haproxy.sh: -------------------------------------------------------------------------------- 1 | docker run -d --name haproxy --user root \ 2 | -v /etc/haproxy:/usr/local/etc/haproxy:ro \ 3 | --sysctl net.ipv4.ip_unprivileged_port_start=0 \ 4 | -p 178.162.171.22:80:80 \ 5 | -p 178.162.171.22:443:443 \ 6 | -p 178.162.171.22:4244:4244 \ 7 | -p 178.162.171.22:8404:8404 \ 8 | haproxy:latest 9 | -------------------------------------------------------------------------------- /grafana_stack/scrape-configs/additional-scrape-config-example.md: -------------------------------------------------------------------------------- 1 | examples for additional scrape configs: 2 | ```yaml 3 | - job_name: "nginx-ingress" 4 | static_configs: 5 | - targets: ["ingress-nginx-controller-metrics.ingress.svc.cluster.local:10254"] 6 | - job_name: "opnsense" 7 | static_configs: 8 | - targets: ["yourddnsdomain.tld:9100"] 9 | ``` 10 | -------------------------------------------------------------------------------- /prometheus/scrape-configs/additional-scrape-config-example.md: -------------------------------------------------------------------------------- 1 | examples for additional scrape configs: 2 | ```yaml 3 | - job_name: "nginx-ingress" 4 | static_configs: 5 | - targets: ["ingress-nginx-controller-metrics.ingress.svc.cluster.local:10254"] 6 | - job_name: "opnsense" 7 | static_configs: 8 | - targets: ["yourddnsdomain.tld:9100"] 9 | ``` 10 | -------------------------------------------------------------------------------- /netmaker/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # set to bitwarden to use bitwarden as an external secrets provider, or "" to not deploy secrets 2 | # other providers may be supported in the future 3 | provider: "bitwarden" 4 | 5 | netmakerMQConfigBitwardenID: "" 6 | netmakerOauthConfigBitwardenID: "" 7 | netmakerPgsqlConfigBitwardenID: "" 8 | netmakerAdminCredentialsBitwardenID: "" 9 | -------------------------------------------------------------------------------- /demo/minio-certs-helm-chart/templates/cert_issuer.yaml: -------------------------------------------------------------------------------- 1 | # taken directly from the example here: 2 | # https://github.com/minio/operator/blob/master/examples/kustomization/tenant-certmanager/certificates.yaml 3 | 4 | apiVersion: cert-manager.io/v1 5 | kind: Issuer 6 | metadata: 7 | name: tenant-certmanager-issuer 8 | namespace: minio 9 | spec: 10 | selfSigned: { } 11 | -------------------------------------------------------------------------------- /tempo/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret providers in the future 3 | provider: "" 4 | 5 | # -- existing kubernetes secret with s3 mimir credentials 6 | s3CredentialsBitwardenID: "" 7 | 8 | # -- valkey Credentials for loki 9 | valkeyCredentialsBitwardenID: "" 10 | -------------------------------------------------------------------------------- /k8up/README.md: -------------------------------------------------------------------------------- 1 | # K8up via ArgoCD 2 | This directory is used to deploy a [k8up](https://github.com/k8up-io/k8up/) app via Argo CD. [K8up](https://k8up.io) is a kubernetes native backups utility that used [restic](https://restic.net/) under the hood to upload your data to an S3 endpoint. 3 | 4 | 5 | 6 | ## Sync Waves 7 | 1. k8up helm chart 8 | -------------------------------------------------------------------------------- /renovate/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- git server platform (github, gitea, forgejo, etc) 2 | platform: "forgejo" 3 | # -- git server endpoint to contact with renovate 4 | endpoint: "https://git.smallhack.org/api/v1" 5 | # -- username of the renovate user for that endpoint 6 | username: "renovate" 7 | 8 | # -- bitwarden id for your renovate personal access token 9 | patBitwardenID: "" 10 | -------------------------------------------------------------------------------- /demo/local-path-provisioner/fast-raid.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: fast-raid 5 | annotations: 6 | defaultVolumeType: local 7 | provisioner: rancher.io/local-path 8 | parameters: 9 | nodePath: /mnt/raid1 10 | pathPattern: "{{ .PVC.Namespace }}/{{ .PVC.Name }}" 11 | volumeBindingMode: WaitForFirstConsumer 12 | reclaimPolicy: Delete 13 | -------------------------------------------------------------------------------- /demo/local-path-provisioner/slow-raid.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: slow-raid 5 | annotations: 6 | defaultVolumeType: local 7 | provisioner: rancher.io/local-path 8 | parameters: 9 | nodePath: /mnt/raid0 10 | pathPattern: "{{ .PVC.Namespace }}/{{ .PVC.Name }}" 11 | volumeBindingMode: WaitForFirstConsumer 12 | reclaimPolicy: Delete 13 | -------------------------------------------------------------------------------- /s3_persistence_and_backups/templates/minio_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "minio" }} 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: minio-data 7 | spec: 8 | storageClassName: {{ .Values.pvc_storageClassName }} 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: {{ .Values.pvc_capacity }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /demo/pixelfed/README.md: -------------------------------------------------------------------------------- 1 | # PixelFed Argo CD App of Apps 2 | 3 | This directory is meant to deploy the [Pixelfed](https://pixelfed.org/) app, which is a federated FOSS Instagram replacement app. 4 | 5 | This uses the [small-hack/pixelfed-chart](https://github.com/small-hack/pixelfed-chart/tree/main) helm chart. 6 | 7 | PixelFed docs on config are here: https://docs.pixelfed.org/running-pixelfed/installation.html 8 | -------------------------------------------------------------------------------- /gotosocial/storage/pvc/templates/gotosocial_pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: gotosocial 6 | annotations: 7 | k8up.io/backup: 'true' 8 | spec: 9 | storageClassName: {{ .Values.pvc.storageClassName }} 10 | accessModes: 11 | - {{ .Values.pvc.accessMode }} 12 | resources: 13 | requests: 14 | storage: {{ .Values.pvc.storage }} 15 | -------------------------------------------------------------------------------- /writefreely/README.md: -------------------------------------------------------------------------------- 1 | # WriteFreely Argo CD Application 2 | 3 | This is an Argo CD Application to deploy [WriteFreely](https://writefreely.org/). 4 | 5 | We're currently using our own helm chart located at [small-hack/writefreely-helm-chart](https://github.com/small-hack/writefreely-helm-chart). 6 | 7 | ## Sync Waves 8 | 9 | 1. External Secrets and PVCs 10 | 2. MySQL 11 | 3. WriteFreely docker web app 12 | -------------------------------------------------------------------------------- /demo/argo-workflows/app-of-apps/event-bus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: EventBus 3 | metadata: 4 | name: default 5 | namespace: argocd 6 | spec: 7 | nats: 8 | native: 9 | # Optional, defaults to 3. If it is < 3, set it to 3, that is the minimal requirement. 10 | replicas: 3 11 | # Optional, authen strategy, "none" or "token", defaults to "none" 12 | auth: none 13 | -------------------------------------------------------------------------------- /demo/juicefs/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # Use external secrets. Set to bitwarden to use this chart. other providers may be supported in the future 2 | provider: "bitwarden" 3 | 4 | # -- if set to seaweedfs we deploy a policy secret. can also be minio 5 | s3_provider: "seaweedfs" 6 | 7 | # -- existing kubernetes secret with s3 postgres credentials 8 | s3CredentialsBitwardenID: "juicefs-s3-credentials" 9 | 10 | -------------------------------------------------------------------------------- /home-assistant/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | ## if this is not set to "bitwarden", we will not actually deploy any templates 2 | # other providers may be supported in the future 3 | provider: "true" 4 | 5 | # the item ID of the Bitwarden admin credentials 6 | bitwardenAdminCredentialsID: "" 7 | 8 | # -- existing kubernetes secret with s3 credentials for the remote backups 9 | s3BackupCredentialsBitwardenID: "" 10 | -------------------------------------------------------------------------------- /forgejo/storage/pvc/postgres-certs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Holds postgres certs 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | namespace: forgejo 7 | name: postgres-certs 8 | annotations: 9 | k8up.io/backup: 'true' 10 | spec: 11 | storageClassName: {{ .Values.pvc_storageClassName }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 64Mi 17 | -------------------------------------------------------------------------------- /.github/renovate-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "onboarding": false, 4 | "username": "renovate-release", 5 | "gitAuthor": "Renovate Bot ", 6 | "platform": "github", 7 | "repositories": [ 8 | "small-hack/argocd-apps" 9 | ], 10 | "allowedPostUpgradeCommands": ["^bash scripts/autoupdate-prometheus-crds.sh"] 11 | } 12 | -------------------------------------------------------------------------------- /demo/wg-access-server/manifests/persistence/wg-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Dynamic persistent volume claim for nexctcloud data (/var/www/html) to persist 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | namespace: wireguard 7 | name: wireguard-pvc 8 | annotations: 9 | k8up.io/backup: "true" 10 | spec: 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: 8Gi 16 | -------------------------------------------------------------------------------- /demo/appflowy/README.md: -------------------------------------------------------------------------------- 1 | # Appflowy Argo CD ApplicationSet 2 | 3 | Currently testing the helm chart here: 4 | https://github.com/jessebot/appflowy-helm/pull/1 5 | 6 | docker images: 7 | https://hub.docker.com/r/appflowyinc/appflowy_cloud/tags 8 | https://hub.docker.com/r/appflowyinc/admin_frontend/tags 9 | 10 | ## Sync Waves 11 | 12 | 1. external secrets and PVCs 13 | 2. seaweedfs 14 | 3. postgres 15 | 4. gotrue 16 | 5. appflowy 17 | -------------------------------------------------------------------------------- /demo/cozy/README.md: -------------------------------------------------------------------------------- 1 | # Cozy Argo CD ApplicationSet 2 | 3 | We're demoing with this helm chart: https://github.com/jessebot/cozy-helm-chart which uses this docker image: https://github.com/cozy/cozy-stack/tree/0fe78134b2d09c73813be48274c66ed8582328e6/scripts/docker/production 4 | 5 | ## Sync Waves 6 | 1. couchdb: https://github.com/apache/couchdb-helm/tree/main/couchdb 7 | 2. cozy-stack helm chart: https://github.com/jessebot/cozy-helm-chart 8 | -------------------------------------------------------------------------------- /prometheus/app_of_apps/secrets/thanos-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: thanos-objstore-config 5 | namespace: monitoring 6 | type: opaque 7 | stringData: 8 | objstore.yml: |- 9 | type: s3 10 | config: 11 | bucket: buildstars-thanos 12 | endpoint: s3.us-west-004.backblazeb2.com 13 | region: us-west-004 14 | insecure: false 15 | access_key: "" 16 | secret_key: "" 17 | -------------------------------------------------------------------------------- /ghost/storage/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Dynamic Persistent volume claim for postgresql specifically to persist 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | name: ghost-data 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.pvc_storageClassName }} 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: {{ .Values.pvc_capacity }} 16 | -------------------------------------------------------------------------------- /nextcloud/no-nginx/fastcgi_configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | annotations: 5 | argocd.argoproj.io/hook: PreSync 6 | name: nextcloud-fastcgi-cm 7 | # removed fastcgi options: 8 | data: 9 | modHeadersAvailable: "true" 10 | front_controller_active: "true" 11 | DOCUMENT_ROOT: "/var/www/html" 12 | SCRIPT_FILENAME: "$document_root$fastcgi_script_name" 13 | PATH_INFO: "$fastcgi_path_info" 14 | HTTPS: "1" 15 | -------------------------------------------------------------------------------- /renovate/external_secrets/templates/renovate-ssh-key.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: renovate-ssh-key 5 | spec: 6 | refreshInterval: "1d0m0s" 7 | target: 8 | name: renovate-key-secret 9 | dataFrom: 10 | - sourceRef: 11 | generatorRef: 12 | apiVersion: generators.external-secrets.io/v1alpha1 13 | kind: SSHKey 14 | name: renovate-ssh-key-generator 15 | -------------------------------------------------------------------------------- /peertube/storage/pvc/templates/peertube_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.pvc.enabled "true" }} 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: peertube-data 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /demo/pixelfed/storage/pvc/templates/peertube_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.pvc.enabled "true" }} 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: pixelfed-data 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /forgejo/storage/pvc/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Dynamic Persistent volume claim for postgresql specifically to persist 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | namespace: forgejo 7 | name: forgejo 8 | labels: 9 | forgejo-pvc: "true" 10 | spec: 11 | storageClassName: {{ .Values.pvc_storageClassName }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: {{ .Values.pvc_capacity }} 17 | -------------------------------------------------------------------------------- /jellyfin/storage/pvc/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /nextcloud/storage/pvc/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /s3_bucket_ingresses/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /argocd/external_secrets/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /ingress-nginx/modsecurity_configmap/modsecurity_exception_files/vnc.conf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------- 2 | # Restricted File Access Attempt /package.json 3 | # Rule ID #930130 4 | # ------------------------------------------------------------------------- 5 | SecRule REQUEST_URI "@streq /package.json" \ 6 | "id:180000,\ 7 | phase:1,\ 8 | ver:'vnc-rule-exclusions-plugin/1.0.0',\ 9 | allow,\ 10 | t:none,\ 11 | nolog" 12 | -------------------------------------------------------------------------------- /jellyfin/storage/pvc/templates/jellyfin_web_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.web_pvc.enabled "true" }} 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: jellyfin-web 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.web_pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.web_pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.web_pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /renovate/external_secrets/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /writefreely/storage/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Dynamic Persistent volume claim for postgresql specifically to persist 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | namespace: writefreely 7 | name: writefreely 8 | annotations: 9 | k8up.io/backup: 'true' 10 | spec: 11 | storageClassName: {{ .Values.pvc_storageClassName }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: {{ .Values.pvc_capacity }} 17 | -------------------------------------------------------------------------------- /demo/openbao/README.md: -------------------------------------------------------------------------------- 1 | # Argo CD Application for OpenBao 2 | 3 | We're using our own helm chart, [jessebot/openbao-helm](https://github.com/jessebot/openbao-helm), until the official [openbao/openbao-helm](https://github.com/openbao/openbao-helm) is ready. 4 | 5 | ## Status 6 | 7 | We're currently waiting on a PR or two before we can move forward with official support for openbao: 8 | 9 | - https://github.com/openbao/openbao-helm/issues/3 - release includes official docker hub images 10 | -------------------------------------------------------------------------------- /matrix/storage/pvc/templates/media_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.media_pvc.enabled }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | namespace: matrix 6 | name: matrix-media 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.media_pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.media_pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.media_pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /demo/test-app/nginx-hello-world-vouch/manifests/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: nginx-hello-web-server 6 | namespace: nginx-hello 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: nginx-hello 11 | template: 12 | metadata: 13 | labels: 14 | app: nginx-hello 15 | spec: 16 | containers: 17 | - name: nginx-hello 18 | image: nginx 19 | ports: 20 | - containerPort: 80 21 | -------------------------------------------------------------------------------- /demo/test-app/nginx-hello-world/manifests/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: nginx-hello-web-server 6 | namespace: nginx-hello 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: nginx-hello 11 | template: 12 | metadata: 13 | labels: 14 | app: nginx-hello 15 | spec: 16 | containers: 17 | - name: nginx-hello 18 | image: nginx 19 | ports: 20 | - containerPort: 80 21 | 22 | -------------------------------------------------------------------------------- /home-assistant/storage/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Dynamic Persistent volume claim for postgresql specifically to persist 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | namespace: home-assistant 7 | name: home-assistant 8 | annotations: 9 | k8up.io/backup: 'true' 10 | spec: 11 | storageClassName: {{ .Values.pvc_storageClassName }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: {{ .Values.pvc_capacity }} 17 | -------------------------------------------------------------------------------- /ingress-nginx/modsecurity_configmap/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /jellyfin/storage/pvc/templates/jellyfin_media_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.media_pvc.enabled "true" }} 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: jellyfin-media 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.media_pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.media_pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.media_pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /writefreely/storage/templates/pvc_mysql.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Dynamic Persistent volume claim for postgresql specifically to persist 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | namespace: writefreely 7 | name: writefreely-mysql 8 | annotations: 9 | k8up.io/backup: 'true' 10 | spec: 11 | storageClassName: {{ .Values.pvc_storageClassName }} 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: {{ .Values.pvc_capacity }} 17 | -------------------------------------------------------------------------------- /jellyfin/storage/pvc/templates/jellyfin_config_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.config_pvc.enabled "true" }} 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: jellyfin-config 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.config_pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.config_pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.config_pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /nextcloud/storage/pvc/templates/nextcloud_files_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.files_pvc.enabled "true" }} 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: nextcloud-files 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.files_pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.files_pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.files_pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /nextcloud/storage/pvc/templates/nextcloud_config_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.config_pvc.enabled "true" }} 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: nextcloud-config 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.config_pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.config_pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.config_pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /postgres/operators/zalando/postgres_cluster/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /demo/infisical/README.md: -------------------------------------------------------------------------------- 1 | # Infisical is a free Apache 2.0 licensed secrets manager and operator 2 | This is an app of apps that deploys both the Infisical k8s apps via their respective helm charts: 3 | app/ui/api AND the Infisical Kubernetes Secrets Operator. 4 | 5 | To deploy ONLY one or the other Infisical k8s applications, see the directories below this one. 6 | 7 | ⚠️ This is new and only kinda tested 🤷 8 | 9 | # sync waves 10 | 1. Infisical web application, db, and api 11 | 2. Infisical secrets operator 12 | -------------------------------------------------------------------------------- /minio/backups/testing/restores/.env_sample: -------------------------------------------------------------------------------- 1 | # nextcloud-backups-bucket is the name of YOUR b2 bucket 2 | export RESTIC_REPOSITORY="s3:s3.eu-central-003.backblazeb2.com/nextcloud-backups-bucket" 3 | 4 | # in this file, you need to have a single line with your restic repo password. Make sure it's `chmod`ed to 600 and has only your user as the group. 5 | export RESTIC_PASSWORD_FILE=/etc/restic-password 6 | 7 | export AWS_ACCESS_KEY_ID="012x11f5584568299998888zz" 8 | export AWS_SECRET_ACCESS_KEY="K012eFG6971Sshi/qrSSS897QC1dBfd" 9 | -------------------------------------------------------------------------------- /matrix/storage/pvc/templates/signingkey_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.signing_key_pvc.enabled }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | namespace: matrix 6 | name: matrix-signing-key 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.signing_key_pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.signing_key_pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.signing_key_pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /s3_bucket_ingresses/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for s3_bucket_ingresses. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | # -- Ingress class name. defaults to nginx 6 | className: "nginx" 7 | 8 | # -- any annotations you want on this Kubernetes Ingress 9 | annotations: {} 10 | 11 | # -- main s3 hostname in {bucket}.{s3_hostname} We will template the bucket names 12 | s3_hostname: "" 13 | 14 | # -- all bucket names to create ingresses and certs for 15 | buckets: [] 16 | -------------------------------------------------------------------------------- /generic-device-plugin/README.md: -------------------------------------------------------------------------------- 1 | # Generic Device Plugin 2 | 3 | This installs the [squat/generic-device-plugin](https://github.com/squat/generic-device-plugin/tree/main), which is recommended for exposing generic devices such as USB devices to your k8s pods. This can useful if you have an iot coordinator device such as the conbee 2 that you are using with deconz or home assistant. You can read more about device plugins in the [Kubernetes docs](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/). 4 | -------------------------------------------------------------------------------- /matrix/storage/pvc/templates/synapse_config.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.synapse_config_pvc.enabled }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | namespace: matrix 6 | name: matrix-synapse-config 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.synapse_config_pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.synapse_config_pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.synapse_config_pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /argocd/manifests/apps/nextcloud_app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: nextcloud 5 | spec: 6 | destination: 7 | name: '' 8 | namespace: nextcloud 9 | server: 'https://kubernetes.default.svc' 10 | source: 11 | path: nextcloud/ 12 | repoURL: 'https://github.com/small-hack/argocd-apps.git' 13 | targetRevision: HEAD 14 | sources: [] 15 | project: nextcloud 16 | syncPolicy: 17 | syncOptions: 18 | - ApplyOutOfSyncOnly=true 19 | - CreateNamespace=true 20 | -------------------------------------------------------------------------------- /demo/keycloak/cli_config/config-templates/user.json: -------------------------------------------------------------------------------- 1 | { 2 | "username" : "example-user", 3 | "enabled" : true, 4 | "totp" : false, 5 | "emailVerified" : true, 6 | "firstName" : "example", 7 | "lastName" : "user", 8 | "email" : "example-user@example.com", 9 | "disableableCredentialTypes" : [ ], 10 | "requiredActions" : [ ], 11 | "notBefore" : 0, 12 | "access" : { 13 | "manageGroupMembership" : true, 14 | "view" : true, 15 | "mapRoles" : true, 16 | "impersonate" : true, 17 | "manage" : true 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /kubevirt/examples/disks/windows-10-ro.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: "windows10-iso" 6 | labels: 7 | app: containerized-data-importer 8 | annotations: 9 | cdi.kubevirt.io/storage.bind.immediate.requested: "true" 10 | cdi.kubevirt.io/storage.import.endpoint: "https://f004.backblazeb2.com/file/buildstar-public-share/windows10.iso" 11 | spec: 12 | storageClassName: local-path 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 8Gi 18 | -------------------------------------------------------------------------------- /kubevirt/examples/disks/jammy-cloud-rwo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: "jammy" 6 | labels: 7 | app: containerized-data-importer 8 | annotations: 9 | cdi.kubevirt.io/storage.bind.immediate.requested: "true" 10 | cdi.kubevirt.io/storage.import.endpoint: "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img" 11 | spec: 12 | storageClassName: local-path 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 3Gi 18 | -------------------------------------------------------------------------------- /kubevirt/examples/disks/lunar-cloud-rwo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: "noble" 6 | labels: 7 | app: containerized-data-importer 8 | annotations: 9 | cdi.kubevirt.io/storage.bind.immediate.requested: "true" 10 | cdi.kubevirt.io/storage.import.endpoint: "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img" 11 | spec: 12 | storageClassName: local-path 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 4Gi 18 | -------------------------------------------------------------------------------- /kubevirt/examples/disks/windows-autoconfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: "win10-answers" 6 | labels: 7 | app: containerized-data-importer 8 | annotations: 9 | cdi.kubevirt.io/storage.bind.immediate.requested: "true" 10 | cdi.kubevirt.io/storage.import.endpoint: "https://f004.backblazeb2.com/file/buildstar-public-share/config.iso" 11 | spec: 12 | storageClassName: local-path 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 1Gi 18 | -------------------------------------------------------------------------------- /demo/keycloak/persistence/postgres_persistence.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Dynamic Persistent volume claim for postgresql specifically to persist 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | namespace: keycloak 7 | name: keycloak-postgresql 8 | annotations: 9 | k8up.io/backupcommand: sh -c 'PGDATABASE="$POSTGRES_DB" PGUSER="$POSTGRES_USER" PGPASSWORD="$POSTGRES_PASSWORD" pg_dump --clean' 10 | k8up.io/file-extension: .sql 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 15Gi 17 | -------------------------------------------------------------------------------- /kubevirt/examples/disks/debian13-cloud-rwo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: "debian13" 6 | labels: 7 | app: containerized-data-importer 8 | annotations: 9 | cdi.kubevirt.io/storage.bind.immediate.requested: "true" 10 | cdi.kubevirt.io/storage.import.endpoint: "https://cloud.debian.org/images/cloud/trixie/latest/debian-13-generic-arm64.qcow2" 11 | spec: 12 | storageClassName: fast-raid 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 32Gi 18 | -------------------------------------------------------------------------------- /postgres/backups/examples/restore-target-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: restore-test-mfw 6 | namespace: default 7 | annotations: 8 | # set to "true" to include in future backups 9 | k8up.io/backup: "false" 10 | # Optional: 11 | #labels: 12 | # app: multi-file-writer 13 | spec: 14 | # Optional: 15 | storageClassName: local-path 16 | accessModes: 17 | - ReadWriteOnce 18 | resources: 19 | requests: 20 | # Must be sufficient to hold your data 21 | storage: 16Gi 22 | -------------------------------------------------------------------------------- /netmaker/manifests/persistence/postgres_persistence.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Dynamic Persistent volume claim for postgresql specifically to persist 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | namespace: netmaker 7 | name: netmaker-postgresql 8 | annotations: 9 | k8up.io/backupcommand: sh -c 'PGDATABASE="$POSTGRES_DB" PGUSER="$POSTGRES_USER" PGPASSWORD="$POSTGRES_PASSWORD" pg_dump --clean' 10 | k8up.io/file-extension: .sql 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 8Gi 17 | -------------------------------------------------------------------------------- /postgres/backups/k8up-test/restore-test/target-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: restore-test-mfw 6 | namespace: default 7 | annotations: 8 | # set to "true" to include in future backups 9 | k8up.io/backup: "false" 10 | # Optional: 11 | #labels: 12 | # app: multi-file-writer 13 | spec: 14 | # Optional: 15 | storageClassName: longhorn 16 | accessModes: 17 | - ReadWriteMany 18 | resources: 19 | requests: 20 | # Must be sufficient to hold your data 21 | storage: 16Gi 22 | -------------------------------------------------------------------------------- /zitadel/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | ## if this is not set to "bitwarden", we will not actually deploy any templates 2 | # other providers may be supported in the future 3 | provider: "true" 4 | 5 | bitwardenCoreItemID: "" 6 | bitwardenDBItemID: "" 7 | 8 | # s3 related credentials 9 | s3_provider: "seaweedfs" 10 | s3AdminCredentialsBitwardenID: "" 11 | s3PostgresCredentialsBitwardenID: "" 12 | s3BackupsCredentialsBitwardenID: "" 13 | 14 | # smtp credentials 15 | smtpCredentialsBitwardenID: "" 16 | 17 | # fake postgresql credentials 18 | postgresqlBitwardenID: "" 19 | -------------------------------------------------------------------------------- /matrix/storage/pvc/templates/mautrix_discord_bridge_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.mautrix_discord_bridge_pvc.enabled }} 2 | --- 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | name: mautrix-discord-bridge-sqlite 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.mautrix_discord_bridge_pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.mautrix_discord_bridge_pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.mautrix_discord_bridge_pvc.storage }} 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /kubevirt/examples/disks/deian13-iso-rwo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: "debian13-iso" 6 | labels: 7 | app: containerized-data-importer 8 | annotations: 9 | cdi.kubevirt.io/storage.bind.immediate.requested: "true" 10 | cdi.kubevirt.io/storage.import.endpoint: "https://cdimage.debian.org/cdimage/trixie_di_rc3/amd64/iso-dvd/debian-trixie-DI-rc3-amd64-DVD-1.iso" 11 | spec: 12 | storageClassName: fast-raid 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 5Gi 18 | -------------------------------------------------------------------------------- /demo/cozy/secret-couchdb.md: -------------------------------------------------------------------------------- 1 | Secret for testing this app with existing secrets: 2 | 3 | ```yaml 4 | --- 5 | # this secret is for testing only! 6 | apiVersion: v1 7 | kind: Secret 8 | metadata: 9 | name: couchdb-couchdb 10 | type: opaque 11 | stringData: 12 | host: couchdb-svc-couchdb 13 | port: "5984" 14 | protocol: http 15 | user: couchadmin 16 | password: ilovemyfriendstodayandtomorrowandthenextday 17 | adminUsername: couchadmin 18 | adminPassword: ilovemyfriendstodayandtomorrowandthenextday 19 | erlangCookie: kjlfedjfkladshjkghjakslghjklasfhjkldshfjkdlahjkf 20 | ``` 21 | -------------------------------------------------------------------------------- /demo/garage/README.md: -------------------------------------------------------------------------------- 1 | # Garage Argo CD Application 2 | 3 | [Garage](https://git.deuxfleurs.fr/Deuxfleurs/garage) is an S3 compatible Object Store. 4 | 5 | We're experimenting with using this [helm chart](https://git.deuxfleurs.fr/Deuxfleurs/garage/src/branch/main/script/helm/garage). 6 | 7 | So far we're inclined to not use this, as we'd need to maintain one of the following: 8 | - a special set of K8s RBAC config and K8s job with an Argo CD resource hook to execute all the commands in the config.sh in this directory. 9 | - our own docker container and helm chart to provide shell access 10 | -------------------------------------------------------------------------------- /renovate/external_secrets/templates/renovate-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: renovate-config 6 | data: 7 | renovate.json: | 8 | { 9 | "platform": "{{ .Values.platform }}", 10 | "endpoint": "{{ .Values.endpoint }}", 11 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 12 | "username": "{{ .Values.username }}", 13 | "printConfig": true, 14 | "gitAuthor": "Renovate Bot ", 15 | "allowedPostUpgradeCommands": ["^scripts"], 16 | "autodiscover": true 17 | } 18 | -------------------------------------------------------------------------------- /valkey/README.md: -------------------------------------------------------------------------------- 1 | # Valkey Argo CD Application and Valkey Cluster Argo CD ApplicationSet 2 | 3 | [Valkey](https://valkey.io) is an open source (BSD) high-performance key/value datastore that supports a variety workloads such as caching, message queues, and can act as a primary database. 4 | 5 | This directory contains both an Application for deploying the [Bitnami Valkey helm chart](https://github.com/bitnami/charts/tree/main/bitnami/valkey). 6 | 7 | ## Sync Waves 8 | 9 | 1. External Secret for Valkey Admin Credentials from Bitwarden 10 | 2. Valkey Application - the helm chart application 11 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | workflow_dispatch: 5 | #pull_request: 6 | #paths-ignore: 7 | # - 'README.md' 8 | # - '**/README.md' 9 | # - '.github/**' 10 | 11 | jobs: 12 | lint: 13 | name: Lint YAML 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5 18 | with: 19 | fetch-depth: "0" 20 | 21 | - uses: ibiqlik/action-yamllint@v3 22 | with: 23 | config_data: "{extends: default, rules: {line-length: disable}}" 24 | -------------------------------------------------------------------------------- /kubevirt/examples/disks/fedora-cloud-rwo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: "fedora" 6 | labels: 7 | app: containerized-data-importer 8 | annotations: 9 | cdi.kubevirt.io/storage.bind.immediate.requested: "true" 10 | cdi.kubevirt.io/storage.import.endpoint: "https://download.fedoraproject.org/pub/fedora/linux/releases/39/Cloud/x86_64/images/Fedora-Cloud-Base-39-1.5.x86_64.qcow2" 11 | spec: 12 | storageClassName: local-path 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 5Gi 18 | -------------------------------------------------------------------------------- /kubevirt/kubevirt-disks-argo-app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: kubevirt-disks 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "5" 9 | spec: 10 | project: kubevirt 11 | destination: 12 | server: "https://kubernetes.default.svc" 13 | namespace: kubevirt 14 | source: 15 | repoURL: https://github.com/small-hack/argocd-apps.git 16 | path: kubevirt/examples/disks 17 | syncPolicy: 18 | syncOptions: 19 | - ApplyOutOfSyncOnly=true 20 | automated: 21 | selfHeal: true 22 | -------------------------------------------------------------------------------- /valkey_cluster/README.md: -------------------------------------------------------------------------------- 1 | # Valkey Argo CD Application and Valkey Cluster Argo CD ApplicationSet 2 | 3 | [Valkey Cluster](https://valkey.io) is an open source (BSD) high-performance key/value and scalable datastore that supports a variety workloads such as caching, message queues, and can act as a primary database. 4 | 5 | Deploys the [bitnami valkey-cluster helm chart](https://github.com/bitnami/charts/tree/main/bitnami/valkey-cluster). 6 | 7 | ## Sync Waves 8 | 9 | 1. External Secret for Valkey Admin Credentials from Bitwarden 10 | 2. Valkey Cluster Application - the helm chart application 11 | -------------------------------------------------------------------------------- /kubevirt/examples/disks/virtio-drivers.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: "virtio-drivers" 6 | labels: 7 | app: containerized-data-importer 8 | annotations: 9 | cdi.kubevirt.io/storage.bind.immediate.requested: "true" 10 | cdi.kubevirt.io/storage.import.endpoint: "https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.240-1/virtio-win-0.1.240.iso" 11 | spec: 12 | storageClassName: local-path 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 2Gi 18 | -------------------------------------------------------------------------------- /demo/juicefs/juicefs_argocd_app_of_apps.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: juicef-app-of-apps 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: juicefs 11 | destination: 12 | server: "https://kubernetes.default.svc" 13 | namespace: juicefs 14 | source: 15 | repoURL: https://github.com/small-hack/argocd-apps.git 16 | path: demo/juicefs/app_of_apps/ 17 | syncPolicy: 18 | syncOptions: 19 | - ApplyOutOfSyncOnly=true 20 | automated: 21 | selfHeal: true 22 | -------------------------------------------------------------------------------- /matrix/storage/pvc/templates/matrix_authentication_service_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.matrix_authentication_service_config_pvc.enabled }} 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: mas-config 6 | annotations: 7 | k8up.io/backup: 'true' 8 | spec: 9 | storageClassName: {{ .Values.matrix_authentication_service_config_pvc.storageClassName }} 10 | accessModes: 11 | - {{ .Values.matrix_authentication_service_config_pvc.accessMode }} 12 | resources: 13 | requests: 14 | storage: {{ .Values.matrix_authentication_service_config_pvc.storage }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /kubevirt/kubevirt-machine-sizes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: kubevirt-machine-sizes 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "5" 9 | spec: 10 | project: kubevirt 11 | destination: 12 | server: "https://kubernetes.default.svc" 13 | namespace: kubevirt 14 | source: 15 | repoURL: https://github.com/small-hack/argocd-apps.git 16 | path: kubevirt/examples/machine-sizes/ 17 | syncPolicy: 18 | syncOptions: 19 | - ApplyOutOfSyncOnly=true 20 | automated: 21 | selfHeal: true 22 | -------------------------------------------------------------------------------- /demo/artifactory/README.md: -------------------------------------------------------------------------------- 1 | # Jfrog Artifactory 2 | 3 | JFrog Artifactory is the single solution for housing and managing all the artifacts, binaries, packages, files, containers, and components for use throughout your software supply chain. 4 | 5 | JFrog Artifactory serves as your central hub for DevOps, integrating with your tools and processes to improve automation, increase integrity, and incorporate best practices along the way. 6 | 7 | - https://jfrog.com/help/r/jfrog-installation-setup-documentation/install-artifactory-single-node-with-helm-charts 8 | - https://github.com/jfrog/charts/tree/master/stable/artifactory-oss 9 | -------------------------------------------------------------------------------- /demo/oauth2-proxy/README.md: -------------------------------------------------------------------------------- 1 | # Oauth2 Proxy Argo CD template 2 | 3 | This Argo CD app template will install external secrets (using the ExternalSecret CRD from the External Secrets Operator) from another private repo in sync wave 1. 4 | 5 | Then, in sync wave 2, it installs the oauth2-proxy helm chart from this repo here: 6 | [oauth2-proxy:/manifests/helm/oauth2-proxy](https://github.com/oauth2-proxy/manifests/tree/main/helm/oauth2-proxy) 7 | 8 | Currently trying to get it working with the Keycloak provider, which is in alpha support at time of writing. Out of the box, the helm chart already supports Google as the primary provider. 9 | -------------------------------------------------------------------------------- /jellyfin/README.md: -------------------------------------------------------------------------------- 1 | # Jellyfin Argo CD Application 2 | 3 | An Argo CD App of apps for deploying [Jellyfin](https://jellyfin.org/). 4 | 5 | Screenshot of the Argo CD web interface for the Jellyfin app of apps, showing 3 apps: external secrets app, helm chart app, and PVC app 6 | 7 | 8 | # Sync Waves 9 | 10 | 1. Jellyfin's config and media Persistent Volume Claims AND External Secrets for S3 backups 11 | 2. [Jellyfin helm chart](https://github.com/jellyfin/jellyfin-helm/tree/master/charts/jellyfin) 12 | -------------------------------------------------------------------------------- /cert-manager/cluster-issuers/templates/production.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-prod 6 | spec: 7 | acme: 8 | # The ACME server URL 9 | server: https://acme-v02.api.letsencrypt.org/directory 10 | # Email address used for ACME registration 11 | email: {{ .Values.email }} 12 | # Name of a secret used to store the ACME account private key 13 | privateKeySecretRef: 14 | name: letsencrypt-prod 15 | # Enable the HTTP-01 challenge provider 16 | solvers: 17 | - http01: 18 | ingress: 19 | class: nginx 20 | -------------------------------------------------------------------------------- /postgres/backups/examples/operator-database.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: acid.zalan.do/v1 2 | kind: postgresql 3 | metadata: 4 | labels: 5 | team: 6 | name: k8up-test 7 | namespace: default 8 | spec: 9 | allowedSourceRanges: null 10 | databases: 11 | : 12 | numberOfInstances: 1 13 | postgresql: 14 | version: '15' 15 | resources: 16 | limits: 17 | cpu: 500m 18 | memory: 500Mi 19 | requests: 20 | cpu: 100m 21 | memory: 100Mi 22 | teamId: 23 | users: 24 | k8up: [] 25 | volume: 26 | iops: 3000 27 | size: 10Gi 28 | -------------------------------------------------------------------------------- /postgres/backups/k8up-test/test-database/database_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: k8up-pg-backup-test-postgres 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "3" 9 | spec: 10 | destination: 11 | namespace: default 12 | server: 'https://kubernetes.default.svc' 13 | source: 14 | path: postgres/backups/k8up-test/test-database/manifests/ 15 | repoURL: 'https://github.com/small-hack/argocd-apps.git' 16 | targetRevision: HEAD 17 | sources: [] 18 | project: default 19 | syncPolicy: 20 | automated: null 21 | -------------------------------------------------------------------------------- /prometheus/app_of_apps_with_matrix/dashboards_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: grafana-dashboards 6 | namespace: argocd 7 | spec: 8 | project: prometheus 9 | source: 10 | repoURL: 'https://github.com/small-hack/argocd-apps' 11 | targetRevision: main 12 | path: prometheus/dashboards/ 13 | destination: 14 | server: "https://kubernetes.default.svc" 15 | namespace: prometheus 16 | syncPolicy: 17 | syncOptions: 18 | - Replace=true 19 | - CreateNamespace=true 20 | automated: 21 | prune: true 22 | selfHeal: true 23 | -------------------------------------------------------------------------------- /demo/argo-workflows/example/vm-webhook-event-source.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: EventSource 3 | metadata: 4 | name: vm-webhook 5 | namespace: argocd 6 | spec: 7 | service: 8 | ports: 9 | - port: 8080 10 | targetPort: 8080 11 | webhook: 12 | # event-source can run multiple HTTP servers. Simply define a unique port to start a new HTTP server 13 | vm: 14 | # port to run HTTP server on 15 | port: "8080" 16 | # endpoint to listen to 17 | endpoint: /api 18 | # HTTP request method to allow. In this case, only POST requests are accepted 19 | method: POST 20 | 21 | -------------------------------------------------------------------------------- /demo/test-app/nginx-hello-world/nginx_hello_world_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: nginx-hello-world 5 | spec: 6 | destination: 7 | name: '' 8 | namespace: nginx-hello 9 | server: 'https://kubernetes.default.svc' 10 | source: 11 | path: demo/test-app/nginx-hello-world/manifests/ 12 | repoURL: 'https://github.com/small-hack/argocd-apps.git' 13 | targetRevision: HEAD 14 | sources: [] 15 | project: default 16 | syncPolicy: 17 | syncOptions: 18 | - CreateNamespace=true 19 | automated: 20 | prune: true 21 | selfHeal: true 22 | -------------------------------------------------------------------------------- /demo/wg-access-server/README.md: -------------------------------------------------------------------------------- 1 | # wireguard access server argo CD app 2 | Using [freifunkMUC/wg-access-server-chart](https://github.com/freifunkMUC/wg-access-server-chart/tree/main/charts/wg-access-server) (which is a fork of [Place1/wg-access-server](https://github.com/Place1/wg-access-server/tree/master/deploy/helm/wg-access-server)) we create a Wireguard based VPN server in k8s. 3 | 4 | 5 | ## Create a wireguard keypair 6 | 7 | - install wireguard 8 | 9 | ```bash 10 | sudo apt-get install wireguard 11 | ``` 12 | 13 | - create a key-pair 14 | 15 | ```bash 16 | wg genkey | tee privatekey | wg pubkey > publickey 17 | ``` 18 | -------------------------------------------------------------------------------- /cert-manager/cluster-issuers/templates/staging.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-staging 6 | spec: 7 | acme: 8 | # The ACME server URL 9 | server: https://acme-staging-v02.api.letsencrypt.org/directory 10 | # Email address used for ACME registration 11 | email: {{ .Values.email }} 12 | # Name of a secret used to store the ACME account private key 13 | privateKeySecretRef: 14 | name: letsencrypt-staging 15 | # Enable the HTTP-01 challenge provider 16 | solvers: 17 | - http01: 18 | ingress: 19 | class: nginx 20 | -------------------------------------------------------------------------------- /demo/argo-workflows/example/webhook-event-source.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: EventSource 3 | metadata: 4 | name: webhook 5 | namespace: argocd 6 | spec: 7 | service: 8 | ports: 9 | - port: 8080 10 | targetPort: 8080 11 | webhook: 12 | # event-source can run multiple HTTP servers. Simply define a unique port to start a new HTTP server 13 | cowsay: 14 | # port to run HTTP server on 15 | port: "8080" 16 | # endpoint to listen to 17 | endpoint: /example 18 | # HTTP request method to allow. In this case, only POST requests are accepted 19 | method: POST 20 | 21 | -------------------------------------------------------------------------------- /metallb/metallb_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: metallb-controller 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: metallb 11 | source: 12 | repoURL: 'https://github.com/metallb/metallb.git' 13 | targetRevision: v0.14.9 14 | path: config/manifests/ 15 | destination: 16 | server: "https://kubernetes.default.svc" 17 | namespace: metallb-system 18 | syncPolicy: 19 | syncOptions: 20 | - ApplyOutOfSyncOnly=true 21 | automated: 22 | prune: true 23 | selfHeal: true 24 | -------------------------------------------------------------------------------- /prometheus/app_of_apps/dashboards_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: grafana-dashboards 6 | namespace: argocd 7 | spec: 8 | project: prometheus 9 | source: 10 | repoURL: 'https://github.com/small-hack/argocd-apps' 11 | targetRevision: monitoring-distributed 12 | path: prometheus/dashboards/ 13 | destination: 14 | server: "https://kubernetes.default.svc" 15 | namespace: monitoring 16 | syncPolicy: 17 | syncOptions: 18 | - Replace=true 19 | - CreateNamespace=true 20 | automated: 21 | prune: true 22 | selfHeal: true 23 | -------------------------------------------------------------------------------- /prometheus/scrape-configs_argocd_app_example.md: -------------------------------------------------------------------------------- 1 | ```yaml 2 | --- 3 | apiVersion: argoproj.io/v1alpha1 4 | kind: Application 5 | metadata: 6 | name: scrape-configs 7 | namespace: argocd 8 | spec: 9 | project: prometheus 10 | source: 11 | repoURL: 'https://github.com/small-hack/argocd-apps' 12 | targetRevision: main 13 | path: prometheus/scrape-configs/ 14 | destination: 15 | server: "https://kubernetes.default.svc" 16 | namespace: prometheus 17 | syncPolicy: 18 | syncOptions: 19 | - Replace=true 20 | - CreateNamespace=true 21 | automated: 22 | prune: true 23 | selfHeal: true 24 | ``` 25 | -------------------------------------------------------------------------------- /demo/test-app/nginx-hello-world-vouch/nginx_hello_world_vouch_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: nginx-hello-world 5 | spec: 6 | destination: 7 | name: '' 8 | namespace: nginx-hello 9 | server: 'https://kubernetes.default.svc' 10 | source: 11 | path: test-app/nginx-hello-world-vouch/manifests/ 12 | repoURL: 'https://github.com/small-hack/argocd-apps.git' 13 | targetRevision: HEAD 14 | sources: [] 15 | project: default 16 | syncPolicy: 17 | syncOptions: 18 | - CreateNamespace=true 19 | automated: 20 | prune: true 21 | selfHeal: true 22 | -------------------------------------------------------------------------------- /grafana_stack/app_of_apps_with_matrix/alloy_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: alloy-opinionated 6 | namespace: argocd 7 | 8 | spec: 9 | project: monitoring 10 | 11 | syncPolicy: 12 | syncOptions: 13 | - Replace=true 14 | - CreateNamespace=true 15 | automated: 16 | prune: true 17 | selfHeal: true 18 | 19 | destination: 20 | server: "https://kubernetes.default.svc" 21 | namespace: monitoring 22 | 23 | source: 24 | repoURL: 'https://github.com/small-hack/argocd-apps' 25 | targetRevision: main 26 | path: alloy/ 27 | 28 | -------------------------------------------------------------------------------- /postgres/operators/zalando/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | ## if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret stores in the future :) 3 | provider: "" 4 | 5 | # -- if set to seaweedfs we deploy a policy secret. can also be minio 6 | s3_provider: "seaweedfs" 7 | 8 | # -- existing kubernetes secret with s3 admin credentials 9 | s3AdminCredentialsBitwardenID: "" 10 | 11 | # -- existing kubernetes secret with s3 admin credentials 12 | s3UserCredentialsBitwardenID: "" 13 | 14 | # -- existing kubernetes secret with s3 credentials for the remote backups 15 | s3BackupCredentialsBitwardenID: "" 16 | -------------------------------------------------------------------------------- /matrix/backups_and_restores/.sample-restic-env: -------------------------------------------------------------------------------- 1 | # this contains both the s3 endpoint (this example uses b2, but you can use any s3 compliant endpoint) AND the s3 bucket 2 | export RESTIC_REPOSITORY="s3:s3.eu-central-003.backblazeb2.com/my-matrix-bucket" 3 | 4 | # Create this file, with a single line with your restic repo password. Make sure it's `chmod`ed to 600 and has only your user as the owner 5 | export RESTIC_PASSWORD_FILE=./.matrix-restic-password 6 | 7 | export AWS_ACCESS_KEY_ID="access key id goes here" 8 | export AWS_SECRET_ACCESS_KEY="secret key goes here" 9 | 10 | # after sourcing this file, you can do the following to test: restic snapshots 11 | -------------------------------------------------------------------------------- /minio/vanilla/minio_pvc_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: minio-persistent-volumes 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: minio 11 | source: 12 | repoURL: "https://github.com/small-hack/argocd-apps" 13 | path: "minio/vanilla/persistence/" 14 | targetRevision: main 15 | destination: 16 | server: "https://kubernetes.default.svc" 17 | namespace: minio 18 | syncPolicy: 19 | syncOptions: 20 | - CreateNamespace=true 21 | automated: 22 | prune: true 23 | selfHeal: true 24 | -------------------------------------------------------------------------------- /argocd/manifests/projects/matrix.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: AppProject 4 | metadata: 5 | name: matrix 6 | namespace: argocd 7 | spec: 8 | clusterResourceWhitelist: 9 | - group: '*' 10 | kind: '*' 11 | description: all things chat 12 | destinations: 13 | - name: in-cluster 14 | namespace: '*' 15 | server: https://kubernetes.default.svc 16 | namespaceResourceWhitelist: 17 | - group: '*' 18 | kind: '*' 19 | orphanedResources: {} 20 | sourceRepos: 21 | - https://jessebot.github.io/matrix-chart 22 | - registry-1.docker.io 23 | - https://github.com/small-hack/argocd-apps.git 24 | -------------------------------------------------------------------------------- /zitadel/backups_and_restores/.sample-restic-env: -------------------------------------------------------------------------------- 1 | # this contains both the s3 endpoint (this example uses b2, but you can use any s3 compliant endpoint) AND the s3 bucket 2 | export RESTIC_REPOSITORY="s3:s3.eu-central-003.backblazeb2.com/my-zitadel-bucket" 3 | 4 | # Create this file, with a single line with your restic repo password. Make sure it's `chmod`ed to 600 and has only your user as the owner 5 | export RESTIC_PASSWORD_FILE=./.zitadel-restic-password 6 | 7 | export AWS_ACCESS_KEY_ID="access key id goes here" 8 | export AWS_SECRET_ACCESS_KEY="secret key goes here" 9 | 10 | # after sourcing this file, you can do the following to test: restic snapshots 11 | -------------------------------------------------------------------------------- /gotosocial/storage/pvc/values.yaml: -------------------------------------------------------------------------------- 1 | pvc: 2 | # -- default storage class name to use for config pvc 3 | storageClassName: "local-path" 4 | # -- default access mode for the config pvc 5 | accessMode: "ReadWriteOnce" 6 | # -- storage capacity for the gotosocial config pvc 7 | storage: "10Gi" 8 | 9 | # -- tolerate affinity 10 | affinity: {} 11 | 12 | # -- tolerate taints 13 | tolerations: [] 14 | 15 | # -- for enabling backups to a remote s3 provider or local disk backup 16 | k8up: 17 | # -- user to run the backups as 18 | securityContext: 19 | # this may need to be 33 (apache) or 82 (nginx) depending on how you run gotosocial 20 | runAsUser: 0 21 | -------------------------------------------------------------------------------- /minio/vanilla/README.md: -------------------------------------------------------------------------------- 1 | # Community maintained MinIO Argo CD ApplicationSet 2 | 3 | This is a Minio ApplicationSet that does not use the operator/tenant architecture helm charts. 4 | 5 | The main difference, beyond the lack of an operator, is that the helm chart the ApplicationSet is built around is community maintained. When we found out about it, it was referred to as the "[vanilla helm chart](https://github.com/minio/charts/blob/800de17ed357580ef8db8b191d7ff90a6724fecd/README.md#a-vanilla-helm-chart-is-available-here-helm-chart-vanilla-without-the-operator)". 6 | 7 | Here's the actual helm chart repo we're using: https://github.com/minio/minio/tree/master/helm/minio 8 | -------------------------------------------------------------------------------- /nextcloud/backups_and_restores/.sample-restic-env: -------------------------------------------------------------------------------- 1 | # this contains both the s3 endpoint (this example uses b2, but you can use any s3 compliant endpoint) AND the s3 bucket 2 | export RESTIC_REPOSITORY="s3:s3.eu-central-003.backblazeb2.com/my-nextcloud-bucket" 3 | 4 | # Create this file, with a single line with your restic repo password. Make sure it's `chmod`ed to 600 and has only your user as the owner 5 | export RESTIC_PASSWORD_FILE=./.nextcloud-restic-password 6 | 7 | export AWS_ACCESS_KEY_ID="access key id goes here" 8 | export AWS_SECRET_ACCESS_KEY="secret key goes here" 9 | 10 | # after sourcing this file, you can do the following to test: restic snapshots 11 | -------------------------------------------------------------------------------- /seaweedfs/app_of_apps/persistence_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: seaweedfs-persistent-volumes 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: seaweedfs 11 | source: 12 | repoURL: https://github.com/small-hack/argocd-apps 13 | path: seaweedfs/persistence/ 14 | targetRevision: main 15 | destination: 16 | server: "https://kubernetes.default.svc" 17 | namespace: seaweedfs 18 | syncPolicy: 19 | syncOptions: 20 | - CreateNamespace=true 21 | automated: 22 | prune: true 23 | selfHeal: true 24 | -------------------------------------------------------------------------------- /mastodon/small-hack/backups_and_restores/.sample-restic-env: -------------------------------------------------------------------------------- 1 | # this contains both the s3 endpoint (this example uses b2, but you can use any s3 compliant endpoint) AND the s3 bucket 2 | export RESTIC_REPOSITORY="s3:s3.eu-central-003.backblazeb2.com/my-mastodon-bucket" 3 | 4 | # Create this file, with a single line with your restic repo password. Make sure it's `chmod`ed to 600 and has only your user as the owner 5 | export RESTIC_PASSWORD_FILE=./.mastodon-restic-password 6 | 7 | export AWS_ACCESS_KEY_ID="access key id goes here" 8 | export AWS_SECRET_ACCESS_KEY="secret key goes here" 9 | 10 | # after sourcing this file, you can do the following to test: restic snapshots 11 | -------------------------------------------------------------------------------- /writefreely/storage/templates/prebackup_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PreBackupPod 3 | metadata: 4 | name: mysqldump 5 | spec: 6 | backupCommand: sh -c 'mysqldump -u$USER -p$PW -h $DB_HOST --all-databases' 7 | pod: 8 | spec: 9 | containers: 10 | - env: 11 | - name: USER 12 | value: dumper 13 | - name: PW 14 | value: topsecret 15 | - name: DB_HOST 16 | value: mariadb.example.com 17 | image: mariadb:10.4 18 | command: 19 | - 'sleep' 20 | - 'infinity' 21 | imagePullPolicy: Always 22 | name: mysqldump 23 | -------------------------------------------------------------------------------- /generic-app/README.md: -------------------------------------------------------------------------------- 1 | # Generic App Argo CD App 2 | A generic Argo CD app using the [generic-app helm chart](https://github.com/small-hack/generic-app-helm) and the Argo CD Appset Secret Plugin. 3 | 4 | This just allows you specify your app name and image registry/repo/tag. 5 | 6 | ## Deployment + Ingress 7 | For something with ingress, checkout the [deployment-ingress](./deployment-ingress) directory 8 | 9 | ## Job (no deployment) 10 | Sometimes you don't even need a deployment, you just need a job, checkout the [job](./job) directory 11 | 12 | ## Job and Deployment 13 | Sometimes you need a deployment and job, checkout the [deployment-and-job](./deployment-and-job) directory 14 | -------------------------------------------------------------------------------- /home-assistant/backups_and_restores/.sample-restic-env: -------------------------------------------------------------------------------- 1 | # this contains both the s3 endpoint (this example uses b2, but you can use any s3 compliant endpoint) AND the s3 bucket 2 | export RESTIC_REPOSITORY="s3:s3.eu-central-003.backblazeb2.com/my-home-assistant-bucket" 3 | 4 | # Create this file, with a single line with your restic repo password. Make sure it's `chmod`ed to 600 and has only your user as the owner 5 | export RESTIC_PASSWORD_FILE=./.home-assistant-restic-password 6 | 7 | export AWS_ACCESS_KEY_ID="access key id goes here" 8 | export AWS_SECRET_ACCESS_KEY="secret key goes here" 9 | 10 | # after sourcing this file, you can do the following to test: restic snapshots 11 | -------------------------------------------------------------------------------- /nextcloud/app_of_apps/before_starting_scripts_configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: before-starting-scripts 6 | data: 7 | install-apps.sh: | 8 | #!/bin/sh 9 | echo "Switching to the beta channel" 10 | php /var/www/html/occ config:system:set updater.release.channel --value=beta 11 | echo "Installing Nextcloud apps..." 12 | php /var/www/html/occ app:install oidc_login 13 | php /var/www/html/occ app:install notes 14 | php /var/www/html/occ app:install bookmarks 15 | php /var/www/html/occ app:install deck 16 | php /var/www/html/occ app:install side_menu 17 | echo "Nextcloud apps installation complete." 18 | -------------------------------------------------------------------------------- /scripts/autoupdate-prometheus-crds.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # tiny script to bump the version of the prometheus CRDs to match the helm chart version 3 | 4 | set -euo pipefail 5 | 6 | new_version="$1" 7 | 8 | version=$(grep "targetRevision:" "prometheus/crds/prometheus_crds_argocd_app.yaml" | awk '{print $2}' | tr -d "kube-prometheus-stack-") 9 | 10 | # this shouldn't happen 11 | if [[ ! $version ]]; then 12 | echo "No valid version was found" 13 | exit 1 14 | fi 15 | 16 | echo "Bumping version for promtheus CRDs from $version to $new_version to be in line with the helm chart version" 17 | sed -i "s/${version}/${new_version}/" "prometheus/crds/prometheus_crds_argocd_app.yaml" 18 | -------------------------------------------------------------------------------- /mastodon/small-hack/app_of_apps/migratedb_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | # --- 2 | # apiVersion: argoproj.io/v1alpha1 3 | # kind: Application 4 | # metadata: 5 | # name: mastodon-db-migrate-job 6 | # namespace: argocd 7 | # annotations: 8 | # argocd.argoproj.io/sync-wave: "4" 9 | # spec: 10 | # project: default 11 | # destination: 12 | # server: "https://kubernetes.default.svc" 13 | # namespace: mastodon 14 | # source: 15 | # repoURL: https://github.com/small-hack/argocd-apps.git 16 | # path: mastodon/manifests/migrate-job/ 17 | # syncPolicy: 18 | # syncOptions: 19 | # - Replace=true 20 | # automated: 21 | # prune: true 22 | # selfHeal: false 23 | -------------------------------------------------------------------------------- /argocd/manifests/projects/keycloak.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: AppProject 4 | metadata: 5 | name: keycloak 6 | namespace: argocd 7 | spec: 8 | clusterResourceWhitelist: 9 | - group: '*' 10 | kind: '*' 11 | description: all keycloak apps 12 | destinations: 13 | - name: in-cluster 14 | namespace: keycloak 15 | server: https://kubernetes.default.svc 16 | - name: '*' 17 | namespace: argocd 18 | server: '*' 19 | namespaceResourceWhitelist: 20 | - group: '*' 21 | kind: '*' 22 | orphanedResources: {} 23 | sourceRepos: 24 | - registry-1.docker.io 25 | - https://github.com/small-hack/argocd-apps.git 26 | -------------------------------------------------------------------------------- /collabora_online/README.md: -------------------------------------------------------------------------------- 1 | # Argo CD Application for Collabora Online 2 | 3 | According to their website, [Collabora Online](https://www.collaboraonline.com/): 4 | 5 | > is a powerful online document editing suite which you can integrate into your own infrastructure or access via one of our trusted hosting partners. 6 | > Data protection is our priority. We provide you with the tools needed to keep your work secure, GDPR-compliant, without compromising on features. 7 | 8 | We use them to selfhost a document service to integrate with Nextcloud, but you can also use Collabora Online directly. 9 | 10 | ## Sync waves 11 | 12 | 1. externals secret for admin credentials 13 | 2. collabora web app 14 | -------------------------------------------------------------------------------- /demo/appflowy/storage/minio/README.md: -------------------------------------------------------------------------------- 1 | # Community maintained MinIO Argo CD ApplicationSet 2 | 3 | This is a Minio ApplicationSet for Appflowy-Cloud that does not use the operator/tenant architecture helm charts. 4 | 5 | The main difference, beyond the lack of an operator, is that the helm chart the ApplicationSet is built around is community maintained. When we found out about it, it was referred to as the "[vanilla helm chart](https://github.com/minio/charts/blob/800de17ed357580ef8db8b191d7ff90a6724fecd/README.md#a-vanilla-helm-chart-is-available-here-helm-chart-vanilla-without-the-operator)". 6 | 7 | Here's the actual helm chart repo we're using: https://github.com/minio/minio/tree/master/helm/minio 8 | -------------------------------------------------------------------------------- /demo/juicefs/persistence/seaweedfs_data_pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: swfs-volume-data 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 100Gi 12 | --- 13 | kind: PersistentVolumeClaim 14 | apiVersion: v1 15 | metadata: 16 | name: swfs-master-data 17 | spec: 18 | accessModes: 19 | - ReadWriteOnce 20 | resources: 21 | requests: 22 | storage: 1Gi 23 | --- 24 | kind: PersistentVolumeClaim 25 | apiVersion: v1 26 | metadata: 27 | name: swfs-filer-data 28 | spec: 29 | accessModes: 30 | - ReadWriteOnce 31 | resources: 32 | requests: 33 | storage: 10Gi 34 | -------------------------------------------------------------------------------- /demo/rancher/manifests/rancher-ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: rancher-ingress 6 | namespace: cattle-system 7 | annotations: 8 | kubernetes.io/ingress.class: nginx 9 | cert-manager.io/cluster-issuer: "letsencrypt-prod" 10 | spec: 11 | tls: 12 | - hosts: 13 | - rancher.buildstar.online 14 | secretName: rancher-tls 15 | ingressClassName: nginx 16 | rules: 17 | - host: rancher.buildstar.online 18 | http: 19 | paths: 20 | - path: / 21 | pathType: Prefix 22 | backend: 23 | service: 24 | name: rancher 25 | port: 26 | number: 80 27 | -------------------------------------------------------------------------------- /nvidia_device_plugin/nvidia_device_plugin_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: nvidia-device-plugin-app 6 | namespace: argocd 7 | spec: 8 | project: nvidia-device-plugin 9 | destination: 10 | server: "https://kubernetes.default.svc" 11 | namespace: nvidia-device-plugin 12 | syncPolicy: 13 | syncOptions: 14 | - ApplyOutOfSyncOnly=true 15 | automated: 16 | prune: true 17 | selfHeal: true 18 | source: 19 | repoURL: 'https://nvidia.github.io/k8s-device-plugin' 20 | chart: nvidia-device-plugin 21 | targetRevision: 0.18.0 22 | helm: 23 | releaseName: nvidia-device-plugin 24 | -------------------------------------------------------------------------------- /demo/longhorn/helm/longhorn-helm.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: longhorn-helm-chart 6 | namespace: argocd 7 | spec: 8 | destination: 9 | namespace: longhorn-system 10 | server: 'https://kubernetes.default.svc' 11 | source: 12 | repoURL: 'https://charts.longhorn.io' 13 | targetRevision: 1.10.0 14 | chart: longhorn 15 | helm: 16 | releaseName: longhorn 17 | valuesObject: 18 | preUpgradeChecker: 19 | jobEnabled: false 20 | project: longhorn 21 | syncPolicy: 22 | syncOptions: 23 | - CreateNamespace=true 24 | automated: 25 | prune: true 26 | selfHeal: true 27 | -------------------------------------------------------------------------------- /mastodon/mastodon/app_of_apps_with_tolerations/migratedb_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | # --- 2 | # apiVersion: argoproj.io/v1alpha1 3 | # kind: Application 4 | # metadata: 5 | # name: mastodon-db-migrate-job 6 | # namespace: argocd 7 | # annotations: 8 | # argocd.argoproj.io/sync-wave: "4" 9 | # spec: 10 | # project: default 11 | # destination: 12 | # server: "https://kubernetes.default.svc" 13 | # namespace: mastodon 14 | # source: 15 | # repoURL: https://github.com/small-hack/argocd-apps.git 16 | # path: mastodon/manifests/migrate-job/ 17 | # syncPolicy: 18 | # syncOptions: 19 | # - Replace=true 20 | # automated: 21 | # prune: true 22 | # selfHeal: false 23 | -------------------------------------------------------------------------------- /mastodon/small-hack/app_of_apps_with_tolerations/migratedb_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | # --- 2 | # apiVersion: argoproj.io/v1alpha1 3 | # kind: Application 4 | # metadata: 5 | # name: mastodon-db-migrate-job 6 | # namespace: argocd 7 | # annotations: 8 | # argocd.argoproj.io/sync-wave: "4" 9 | # spec: 10 | # project: default 11 | # destination: 12 | # server: "https://kubernetes.default.svc" 13 | # namespace: mastodon 14 | # source: 15 | # repoURL: https://github.com/small-hack/argocd-apps.git 16 | # path: mastodon/manifests/migrate-job/ 17 | # syncPolicy: 18 | # syncOptions: 19 | # - Replace=true 20 | # automated: 21 | # prune: true 22 | # selfHeal: false 23 | -------------------------------------------------------------------------------- /postgres/backups/examples/backup-job.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k8up.io/v1 3 | kind: Backup 4 | metadata: 5 | name: root-backup-to-b2 6 | namespace: default 7 | spec: 8 | podSecurityContext: 9 | runAsUser: 0 10 | failedJobsHistoryLimit: 10 11 | successfulJobsHistoryLimit: 10 12 | backend: 13 | repoPasswordSecretRef: 14 | name: 15 | key: password 16 | s3: 17 | endpoint: 18 | bucket: 19 | accessKeyIDSecretRef: 20 | name: 21 | key: applicationKeyId 22 | secretAccessKeySecretRef: 23 | name: 2 | Cilium on Argo CD 3 | 4 | 5 |

6 | 7 | 8 |

9 | 10 |

11 | Deploy Cilium to Kubernetes via an ArgoCD ApplicationSet. 12 |

13 | 14 | ___ 15 | 16 | Features: 17 | - Deploy hubble dashboard behind vouch 18 | - Enables use of ebfp-dependant apps like Keda and Kepler 19 | - Encryption enabled via Wireguard integration. See https://docs.cilium.io/en/stable/security/network/encryption-wireguard/#enable-wireguard-in-cilium for more details. 20 | -------------------------------------------------------------------------------- /demo/pixelfed/external_secrets/templates/bitwarden/pixelfed_app_key.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: pixelfed-app-key 7 | namespace: pixelfed 8 | spec: 9 | target: 10 | name: pixelfed-app-key 11 | deletionPolicy: Delete 12 | template: 13 | type: Opaque 14 | data: 15 | key: |- 16 | {{ `{{ .secretKey }}` }} 17 | data: 18 | - secretKey: secretKey 19 | sourceRef: 20 | storeRef: 21 | name: bitwarden-login 22 | kind: ClusterSecretStore 23 | remoteRef: 24 | key: {{ .Values.appKeyBitwardenID }} 25 | property: password 26 | {{- end }} 27 | -------------------------------------------------------------------------------- /ingress-nginx/modsecurity_configmap/modsecurity_exception_files/README.md: -------------------------------------------------------------------------------- 1 | # Security Rule ID Ranges 2 | 3 | | App | Start | 4 | |:---------------|:-------| 5 | | postgres | 20000 | 6 | | Grafana | 30001 | 7 | | Matrix | 40001 | 8 | | Home Assistant | 50001 | 9 | | zitadel | 60001 | 10 | | ArgoCD | 70001 | 11 | | ActivityPub | 80000 | 12 | | Loki | 90001 | 13 | | tempo | 100000 | 14 | | Mimir | 110001 | 15 | | Harbor | 120001 | 16 | | Jellyfin | 130000 | 17 | | Forgejo | 140000 | 18 | | Ghost | 150000 | 19 | | AI | 160000 | 20 | | Banned | 170000 | 21 | | VNC | 180000 | 22 | | Nextcloud | 950000 | 23 | -------------------------------------------------------------------------------- /ingress-nginx/modsecurity_configmap/values.yaml: -------------------------------------------------------------------------------- 1 | # -- enable the activity pub exceptions 2 | activitypub: true 3 | 4 | # -- enable the argocd exceptions 5 | argocd: true 6 | 7 | # -- enable the grafana exceptions 8 | grafana: true 9 | 10 | # -- enable the home assistant exceptions 11 | home-assistant: true 12 | 13 | # -- enable the loki exceptions 14 | loki: true 15 | 16 | # -- enable the matrix exceptions 17 | matrix: true 18 | 19 | # -- enable the mimir exceptions 20 | mimir: true 21 | 22 | # -- enable the nextcloud exceptions 23 | nextcloud: true 24 | 25 | # -- enable the postgresql exceptions 26 | postgresql: true 27 | 28 | # -- enable the tempo exceptions 29 | tempo: true 30 | 31 | # -- enable the zitadel exceptions 32 | zitadel: true 33 | -------------------------------------------------------------------------------- /mastodon/small-hack/storage/pvc/values.yaml: -------------------------------------------------------------------------------- 1 | valkey_pvc: 2 | # -- enable nextcloud configuration pvc 3 | enabled: true 4 | # -- default storage class name to use for config pvc 5 | storageClassName: "local-path" 6 | # -- default access mode for the config pvc 7 | accessMode: "ReadWriteOnce" 8 | # -- storage capacity for the nextcloud config pvc 9 | storage: "2Gi" 10 | 11 | # -- tolerate affinity 12 | affinity: {} 13 | 14 | # -- tolerate taints 15 | tolerations: [] 16 | 17 | # -- for enabling backups to a remote s3 provider or local disk backup 18 | k8up: 19 | # -- user to run the backups as 20 | securityContext: 21 | # this may need to be 33 (apache) or 82 (nginx) depending on how you run nextcloud 22 | runAsUser: 0 23 | -------------------------------------------------------------------------------- /seaweedfs/operator/seaweedfs_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: seaweedfs-operator-app 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "2" 9 | spec: 10 | project: seaweedfs 11 | destination: 12 | server: https://kubernetes.default.svc 13 | namespace: seaweedfs 14 | syncPolicy: 15 | syncOptions: 16 | - CreateNamespace=true 17 | - ApplyOutOfSyncOnly=true 18 | automated: 19 | prune: true 20 | selfHeal: true 21 | source: 22 | repoURL: https://github.com/seaweedfs/seaweedfs-operator 23 | targetRevision: master 24 | path: deploy/helm/ 25 | helm: 26 | releaseName: seaweedfs-operator 27 | -------------------------------------------------------------------------------- /argocd/manifests/projects/social.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: AppProject 4 | metadata: 5 | name: mastodon 6 | namespace: argocd 7 | spec: 8 | clusterResourceWhitelist: 9 | - group: '*' 10 | kind: '*' 11 | description: all mastodon apps 12 | destinations: 13 | - name: in-cluster 14 | namespace: mastodon 15 | server: https://kubernetes.default.svc 16 | - name: '*' 17 | namespace: argocd 18 | server: '*' 19 | namespaceResourceWhitelist: 20 | - group: '*' 21 | kind: '*' 22 | orphanedResources: {} 23 | sourceRepos: 24 | - registry-1.docker.io 25 | - https://github.com/small-hack/argocd-apps.git 26 | - https://github.com/jessebot/mastodon-helm-chart.git 27 | -------------------------------------------------------------------------------- /mastodon/README.md: -------------------------------------------------------------------------------- 1 | # Argo CD ApplicationSets for Mastodon 2 | 3 | **Stable directory**: [small-hack](./small-hack) 4 | **UnStable directory**: [mastodon](./mastodon) 5 | 6 | Example Argo CD tree view of Mastodon (small-hack) App: 7 | 8 | ![Screenshot of mastodon application in Argo CD's web interface. It shows tree view with a main mastodon app having the following children: mastodon-external-secrets-appset, mastodon-pvc-appset, mastodon-postgres-appset, valkey-appset, mastodon-web-app, mastodon-s3-pvc, and mastodon-seaweedfs. All apps show as healthy and successfully synced. The image is in dark mode so it used dark grey background, greyish blue tiles, and neon green pop colors.](https://github.com/user-attachments/assets/a6657495-02f0-41c9-b6e6-d6149549c7ab) 9 | -------------------------------------------------------------------------------- /postgres/backups/k8up-test/k8up-job/manifests/backup-as-root.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k8up.io/v1 3 | kind: Backup 4 | metadata: 5 | name: root-backup-to-b2 6 | namespace: default 7 | spec: 8 | podSecurityContext: 9 | runAsUser: 0 10 | failedJobsHistoryLimit: 10 11 | successfulJobsHistoryLimit: 10 12 | backend: 13 | repoPasswordSecretRef: 14 | name: k8up-restic-b2-repo-pw-pg-backup 15 | key: password 16 | s3: 17 | endpoint: s3.eu-central-003.backblazeb2.com 18 | bucket: vmt-pg-backup-test 19 | accessKeyIDSecretRef: 20 | name: k8up-b2-creds-pg-backup 21 | key: applicationKeyId 22 | secretAccessKeySecretRef: 23 | name: k8up-b2-creds-pg-backup 24 | key: applicationKey 25 | -------------------------------------------------------------------------------- /demo/test-app/nginx-hello-world/manifests/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: nginx-hello-ingress 6 | namespace: nginx-hello 7 | annotations: 8 | kubernetes.io/ingress.class: nginx 9 | cert-manager.io/cluster-issuer: "letsencrypt-prod" 10 | nginx.ingress.kubernetes.io/rewrite-target: / 11 | spec: 12 | tls: 13 | - hosts: 14 | - {{ .testing_hostname }} 15 | secretName: "letsencrypt-prod" 16 | rules: 17 | - host: {{ .testing_hostname }} 18 | http: 19 | paths: 20 | - path: / 21 | pathType: Prefix 22 | backend: 23 | service: 24 | name: nginx-hello-service 25 | port: 26 | number: 5000 27 | -------------------------------------------------------------------------------- /external-secrets-operator/app_of_apps/external-secrets-crds_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: external-secrets-operator-crds 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: external-secrets-operator 11 | destination: 12 | server: "https://kubernetes.default.svc" 13 | namespace: external-secrets 14 | syncPolicy: 15 | syncOptions: 16 | - CreateNamespace=true 17 | - ServerSideApply=true 18 | automated: 19 | prune: true 20 | selfHeal: true 21 | source: 22 | repoURL: 'https://github.com/external-secrets/external-secrets' 23 | targetRevision: v0.20.4 24 | path: deploy/crds/ 25 | -------------------------------------------------------------------------------- /seaweedfs/README.md: -------------------------------------------------------------------------------- 1 | # Argo CD Applications for deploying SeaweedFS 2 | 3 | We are currently experimenting with [SeaweedFS](https://github.com/seaweedfs/seaweedfs) on Kubernetes. 4 | 5 | ## Sync Waves 6 | 7 | 1. persistent volumes for filer, volume server, and master server 8 | 2. SeaweedFS helm chart 9 | 10 | ## Persistence 11 | 12 | This is to deploy a pre-existing persistent volume. 13 | 14 | - [persistent volumes](./persistence/seaweedfs_data_pvc.yaml) 15 | 16 | ### Backups 17 | 18 | Docs on backing up SeaweedFS. 19 | 20 | - [Regular docs](./backups/backups.md) 21 | - [S3 docs](./backups/s3-backups.md) 22 | 23 | ## Operator 24 | 25 | This is the new SeaweedFS operator helm chart for using the SeaweedFS CRDs. Still in experimental phase. 26 | 27 | -------------------------------------------------------------------------------- /grafana_stack/app_of_apps_with_matrix/dashboards_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: grafana-dashboards 6 | namespace: argocd 7 | 8 | spec: 9 | project: monitoring 10 | 11 | destination: 12 | server: "https://kubernetes.default.svc" 13 | namespace: monitoring 14 | 15 | syncPolicy: 16 | syncOptions: 17 | - Replace=true 18 | - CreateNamespace=true 19 | automated: 20 | prune: true 21 | selfHeal: true 22 | 23 | source: 24 | repoURL: 'https://github.com/small-hack/argocd-apps' 25 | targetRevision: main 26 | path: grafana_stack/dashboards/ 27 | helm: 28 | valuesObject: 29 | dashboards: 30 | deploy_all: true 31 | -------------------------------------------------------------------------------- /k8up/k8up_argocd_app.md: -------------------------------------------------------------------------------- 1 | this is what it would look like to deploy the CRDs separately: 2 | ```yaml 3 | --- 4 | # CRDs for backups 5 | apiVersion: argoproj.io/v1alpha1 6 | kind: Application 7 | metadata: 8 | name: k8up-crd 9 | namespace: argocd 10 | annotations: 11 | argocd.argoproj.io/sync-wave: "-1" 12 | spec: 13 | project: k8up 14 | source: 15 | repoURL: https://github.com/k8up-io/k8up.git 16 | path: config/crd/apiextensions.k8s.io/v1/ 17 | targetRevision: k8up-4.8.5 18 | destination: 19 | server: "https://kubernetes.default.svc" 20 | namespace: k8up 21 | syncPolicy: 22 | syncOptions: 23 | - CreateNamespace=true 24 | - Replace=true 25 | automated: 26 | prune: true 27 | selfHeal: true 28 | ``` 29 | -------------------------------------------------------------------------------- /minio/backups/external_secrets_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # First sync wave because we need secrets for nextcloud, so it has to be 3 | # before nextcloud so it has secrets for all its credentials. 4 | apiVersion: argoproj.io/v1alpha1 5 | kind: Application 6 | metadata: 7 | name: minio-backups-external-secrets 8 | namespace: argocd 9 | annotations: 10 | argocd.argoproj.io/sync-wave: "1" 11 | spec: 12 | project: minio 13 | destination: 14 | server: "https://kubernetes.default.svc" 15 | namespace: minio 16 | source: 17 | repoURL: https://github.com/small-hack/argocd-apps.git 18 | path: minio/backups/external_secrets/ 19 | syncPolicy: 20 | syncOptions: 21 | - Replace=true 22 | automated: 23 | prune: true 24 | selfHeal: true 25 | -------------------------------------------------------------------------------- /ghost/storage/templates/pvc_mysql.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Dynamic Persistent volume claim for postgresql specifically to persist 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | name: ghost-mysql 7 | spec: 8 | storageClassName: {{ .Values.pvc_storageClassName }} 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: {{ .Values.pvc_capacity }} 14 | --- 15 | # Dynamic Persistent volume claim for postgresql specifically to persist 16 | apiVersion: v1 17 | kind: PersistentVolumeClaim 18 | metadata: 19 | name: ghost-activitypub-mysql 20 | spec: 21 | storageClassName: {{ .Values.pvc_storageClassName }} 22 | accessModes: 23 | - ReadWriteOnce 24 | resources: 25 | requests: 26 | storage: {{ .Values.pvc_capacity }} 27 | -------------------------------------------------------------------------------- /minio/backups/testing/backups/root_backup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k8up.io/v1 3 | kind: Backup 4 | metadata: 5 | name: root-backup-minio-s3 6 | namespace: nextcloud 7 | spec: 8 | promURL: http://push-gateway.prometheus:9091/ 9 | podSecurityContext: 10 | runAsUser: 0 11 | failedJobsHistoryLimit: 10 12 | successfulJobsHistoryLimit: 10 13 | backend: 14 | repoPasswordSecretRef: 15 | name: minio-backups-credentials 16 | key: resticRepoPassword 17 | s3: 18 | endpoint: s3.eu-central-003.backblazeb2.com 19 | bucket: testing-minio-backups 20 | accessKeyIDSecretRef: 21 | name: minio-backups-credentials 22 | key: applicationKeyId 23 | secretAccessKeySecretRef: 24 | name: minio-backups-credentials 25 | key: applicationKey 26 | -------------------------------------------------------------------------------- /zitadel/storage/minio_tenant/minio_setup_script_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: zitadel-minio-setup-script 6 | namespace: argocd 7 | annotations: 8 | # after the external secrets and minio tenant but before the postgres and zitadel apps 9 | argocd.argoproj.io/sync-wave: "3" 10 | spec: 11 | project: zitadel 12 | destination: 13 | server: "https://kubernetes.default.svc" 14 | namespace: zitadel 15 | syncPolicy: 16 | syncOptions: 17 | - ApplyOutOfSyncOnly=true 18 | automated: 19 | prune: true 20 | selfHeal: true 21 | source: 22 | repoURL: https://github.com/small-hack/argocd-apps.git 23 | path: zitadel/storage/minio_tenant/minio_setup_script/ 24 | targetRevision: main 25 | -------------------------------------------------------------------------------- /ingress-nginx/modsecurity_configmap/templates/modsecurity_plugins_configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: modsecurity-plugins 5 | labels: 6 | nginx-configmap: "1" 7 | data: 8 | empty-after.conf: | 9 | # no data 10 | empty-before.conf: | 11 | # no data 12 | empty-config.conf: | 13 | # no data 14 | {{/* range through all modsecurity exception files */}} 15 | {{ range $path, $_ := .Files.Glob "modsecurity_exception_files/**.conf" }} 16 | {{/* create a simple file name to template with */}} 17 | {{- $file_name := $path | trimPrefix "modsecurity_exception_files/" | lower | nospace | trimSuffix ".conf" -}} 18 | 19 | {{- $file_name | nindent 2 }}-rule-exclusions-before.conf: |- 20 | {{- $.Files.Get $path | nindent 4 }} 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /s3_bucket_ingresses/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- range .Values.buckets }} 2 | --- 3 | apiVersion: networking.k8s.io/v1 4 | kind: Ingress 5 | metadata: 6 | name: s3-{{ . }}-bucket-ingress 7 | {{- with $.Values.annotations }} 8 | annotations: 9 | {{- toYaml . | nindent 4 }} 10 | {{- end }} 11 | spec: 12 | ingressClassName: nginx 13 | rules: 14 | - host: {{ . }}.{{ $.Values.s3_hostname }} 15 | http: 16 | paths: 17 | - path: / 18 | pathType: ImplementationSpecific 19 | backend: 20 | service: 21 | name: seaweedfs-s3 22 | port: 23 | number: 8333 24 | tls: 25 | - secretName: s3-{{ . }}-bucket-tls 26 | hosts: 27 | - {{ . }}.{{ $.Values.s3_hostname }} 28 | {{- end }} 29 | -------------------------------------------------------------------------------- /argocd/manifests/projects/zitadel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: AppProject 3 | metadata: 4 | name: zitadel 5 | namespace: argocd 6 | spec: 7 | clusterResourceWhitelist: 8 | - group: '*' 9 | kind: '*' 10 | description: project for zitadel 11 | destinations: 12 | - name: in-cluster 13 | namespace: zitadel 14 | server: https://kubernetes.default.svc 15 | - name: in-cluster 16 | namespace: argocd 17 | server: https://kubernetes.default.svc 18 | namespaceResourceWhitelist: 19 | - group: '*' 20 | kind: '*' 21 | orphanedResources: {} 22 | sourceRepos: 23 | - https://charts.zitadel.com 24 | - https://charts.cockroachdb.com/ 25 | - https://zitadel.github.io/zitadel-charts 26 | - https://github.com/small-hack/argocd-apps 27 | -------------------------------------------------------------------------------- /demo/vault/vault_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: vault-app 6 | namespace: argocd 7 | spec: 8 | project: vault 9 | destination: 10 | server: https://kubernetes.default.svc 11 | namespace: vault 12 | 13 | syncPolicy: 14 | syncOptions: 15 | - CreateNamespace=true 16 | - ApplyOutOfSyncOnly=true 17 | automated: 18 | prune: true 19 | selfHeal: true 20 | 21 | ignoreDifferences: 22 | - group: apps 23 | kind: MutatingWebhookConfiguration 24 | name: vault-app-agent-injector-cfg 25 | jqPathExpressions: 26 | - '.webhooks[]?.clientConfig.caBundle' 27 | 28 | source: 29 | repoURL: 'https://helm.releases.hashicorp.com' 30 | targetRevision: 0.31.0 31 | chart: vault 32 | -------------------------------------------------------------------------------- /matrix/external_secrets/templates/bitwarden/syncv3_env.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.external_secrets_provider "bitwarden") .Values.sliding_sync.sliding_sync_bitwarden_ID }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: syncv3-secret 7 | spec: 8 | target: 9 | name: syncv3-secret 10 | deletionPolicy: Delete 11 | template: 12 | type: Opaque 13 | data: 14 | SYNCV3_SECRET: |- 15 | {{ `{{ .secret }}` }} 16 | data: 17 | # SYNCV3_SECRET 18 | - secretKey: secret 19 | sourceRef: 20 | storeRef: 21 | name: bitwarden-login 22 | kind: ClusterSecretStore 23 | remoteRef: 24 | key: {{ .Values.sliding_sync.sliding_sync_bitwarden_ID }} 25 | property: password 26 | {{- end }} 27 | -------------------------------------------------------------------------------- /nextcloud/maintenance_mode_cronjobs/values.yaml: -------------------------------------------------------------------------------- 1 | # -- timezone you want to use for TZ env var, must be identifier from this list: 2 | # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List 3 | # example could be "Europe/Amsterdam" 4 | timezone: "" 5 | 6 | image: 7 | # -- docker image repo 8 | repository: "nextcloud" 9 | # -- docker image tag 10 | tag: "31.0.10-fpm-alpine" 11 | 12 | maintenance_mode_on: 13 | # -- cron schedule to turn on maintenance mode for nextcloud 14 | schedule: "" 15 | 16 | maintenance_mode_off: 17 | # -- cron schedule to turn off maintenance mode for nextcloud 18 | schedule: "" 19 | 20 | # -- affinity to make the cronjobs use a specific node 21 | affinity: {} 22 | 23 | # -- tolerations to make the cronjobs tolerate the taints on a specific node 24 | tolerations: [] 25 | -------------------------------------------------------------------------------- /external-secrets-operator/providers/bitwarden/bitwarden_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: bitwarden-eso-provider 6 | namespace: argocd 7 | spec: 8 | project: external-secrets-operator 9 | source: 10 | repoURL: 'https://small-hack.github.io/bitwarden-eso-provider' 11 | targetRevision: 1.2.0 12 | chart: bitwarden-eso-provider 13 | helm: 14 | values: | 15 | network_policy: false 16 | bitwarden_eso_provider: 17 | auth: 18 | existingSecret: bweso-login 19 | destination: 20 | server: "https://kubernetes.default.svc" 21 | namespace: external-secrets 22 | syncPolicy: 23 | syncOptions: 24 | - CreateNamespace=true 25 | automated: 26 | prune: true 27 | selfHeal: true 28 | -------------------------------------------------------------------------------- /prometheus/crds/prometheus_crds_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: prometheus-crd 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: prometheus 11 | destination: 12 | name: in-cluster 13 | namespace: prometheus 14 | source: 15 | repoURL: https://github.com/prometheus-community/helm-charts.git 16 | path: charts/kube-prometheus-stack/charts/crds/crds/ 17 | targetRevision: kube-prometheus-stack-78.5.0 18 | directory: 19 | recurse: true 20 | syncPolicy: 21 | syncOptions: 22 | - CreateNamespace=true 23 | - ApplyOutOfSyncOnly=true 24 | - ServerSideApply=true 25 | - Retry=true 26 | automated: 27 | selfHeal: true 28 | prune: true 29 | -------------------------------------------------------------------------------- /s3_persistence_and_backups/templates/juicefs_valkey_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "juicefs" }} 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: juicefs-valkey-primary 7 | annotations: 8 | k8up.io/backup: "true" 9 | spec: 10 | storageClassName: {{ .Values.pvc_storageClassName }} 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: {{ .Values.pvc_capacity }} 16 | --- 17 | kind: PersistentVolumeClaim 18 | apiVersion: v1 19 | metadata: 20 | name: juicefs-valkey-replica 21 | annotations: 22 | k8up.io/backup: "true" 23 | spec: 24 | storageClassName: {{ .Values.pvc_storageClassName }} 25 | accessModes: 26 | - ReadWriteOnce 27 | resources: 28 | requests: 29 | storage: {{ .Values.pvc_capacity }} 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /argocd/manifests/projects/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: AppProject 4 | metadata: 5 | name: external-secrets-operator 6 | namespace: argocd 7 | spec: 8 | clusterResourceWhitelist: 9 | - group: '*' 10 | kind: '*' 11 | description: all apps for kubernetes external secrets operator 12 | destinations: 13 | - name: in-cluster 14 | namespace: external-secrets 15 | server: https://kubernetes.default.svc 16 | - name: '*' 17 | namespace: argocd 18 | server: '*' 19 | namespaceResourceWhitelist: 20 | - group: '*' 21 | kind: '*' 22 | orphanedResources: {} 23 | sourceRepos: 24 | - https://github.com/small-hack/argocd-apps.git 25 | - https://charts.external-secrets.io 26 | - https://jessebot.github.io/bitwarden-eso-provider 27 | -------------------------------------------------------------------------------- /nextcloud/backups_and_restores/root_backup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k8up.io/v1 3 | kind: Backup 4 | metadata: 5 | name: root-backup-nextcloud-s3 6 | namespace: nextcloud 7 | spec: 8 | promURL: http://push-gateway.monitoring:9091/ 9 | podSecurityContext: 10 | runAsUser: 0 11 | failedJobsHistoryLimit: 10 12 | successfulJobsHistoryLimit: 10 13 | backend: 14 | repoPasswordSecretRef: 15 | name: nextcloud-backups-credentials 16 | key: resticRepoPassword 17 | s3: 18 | endpoint: s3.eu-central-003.backblazeb2.com 19 | bucket: testing-ncloud-backups-september 20 | accessKeyIDSecretRef: 21 | name: nextcloud-backups-credentials 22 | key: applicationKeyId 23 | secretAccessKeySecretRef: 24 | name: nextcloud-backups-credentials 25 | key: applicationKey 26 | -------------------------------------------------------------------------------- /cert-manager/external_secrets/templates/bitwarden/cloudflare_token.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: cloudflare-api-token 7 | spec: 8 | target: 9 | # Name for the secret to be created on the cluster 10 | name: cloudflare-api-token 11 | deletionPolicy: Delete 12 | template: 13 | type: Opaque 14 | data: 15 | token: |- 16 | {{ `{{ .token }}` }} 17 | data: 18 | # Key given to the secret to be created on the cluster 19 | - secretKey: token 20 | sourceRef: 21 | storeRef: 22 | name: bitwarden-login 23 | kind: ClusterSecretStore 24 | remoteRef: 25 | key: {{ .Values.cloudflareBitwardenID }} 26 | property: password 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /demo/juicefs/external_secrets/templates/redis_credentials.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: juicefs-redis-password 7 | spec: 8 | target: 9 | # Name for the secret to be created on the cluster 10 | name: juicefs-redis-password 11 | deletionPolicy: Delete 12 | template: 13 | type: Opaque 14 | data: 15 | redis-password: |- 16 | {{ `{{ .password }}` }} 17 | data: 18 | # Key given to the secret to be created on the cluster 19 | - secretKey: password 20 | sourceRef: 21 | storeRef: 22 | name: bitwarden-login 23 | kind: ClusterSecretStore 24 | remoteRef: 25 | key: juicefs-redis-password 26 | property: password 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /postgres/backups/k8up-test/k8up-job/external_secrets/restic-repo-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # repo secret for k8up, backups for persistent volumes using restic 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: k8up-restic-b2-repo-pw-pg-backup 7 | namespace: default 8 | spec: 9 | target: 10 | # Name for the secret to be created on the cluster 11 | name: k8up-restic-b2-repo-pw-pg-backup 12 | deletionPolicy: Delete 13 | template: 14 | type: Opaque 15 | data: 16 | password: |- 17 | {{ .password }} 18 | 19 | data: 20 | - secretKey: password 21 | sourceRef: 22 | storeRef: 23 | name: bitwarden-login 24 | kind: ClusterSecretStore 25 | remoteRef: 26 | key: pg-backup-test-b2-repo-password 27 | property: password 28 | -------------------------------------------------------------------------------- /tempo/external_secrets/templates/valkey_credentials.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: tempo-valkey-credentials 7 | spec: 8 | target: 9 | # Name for the secret to be created on the cluster 10 | name: tempo-valkey-credentials 11 | deletionPolicy: Delete 12 | template: 13 | type: Opaque 14 | data: 15 | password: |- 16 | {{ `{{ .password }}` }} 17 | data: 18 | # Key given to the secret to be created on the cluster 19 | - secretKey: password 20 | sourceRef: 21 | storeRef: 22 | name: bitwarden-login 23 | kind: ClusterSecretStore 24 | remoteRef: 25 | key: {{ .Values.valkeyCredentialsBitwardenID }} 26 | property: password 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /ingress-nginx/modsecurity_configmap/modsecurity_exception_files/loki.conf: -------------------------------------------------------------------------------- 1 | # allow "Request Containing Content, but Missing Content-Type header" rule 920340 2 | # rule 920340 3 | # Dont treat loki writes as an http smuggeling attack 4 | # Rule 921110 5 | # Remote Command Execution: Java process spawn (CVE-2017-9805) 6 | # 944110 7 | # Allow PUT method to /loki// rule 921110 8 | # this is for when loki tries to flush chunks 9 | # Suspicious Java class detected Event 944130 10 | SecRule REQUEST_URI "@beginsWith /loki" \ 11 | "id:90001,\ 12 | phase:1,\ 13 | ver:'loki-exclusions-plugin/1.0.0',\ 14 | pass,\ 15 | nolog,\ 16 | t:none,\ 17 | ctl:ruleRemoveById=911100,\ 18 | ctl:ruleRemoveById=920340,\ 19 | ctl:ruleRemoveById=921110,\ 20 | ctl:ruleRemoveById=944100,\ 21 | ctl:ruleRemoveById=944110,\ 22 | ctl:ruleRemoveById=944130" 23 | -------------------------------------------------------------------------------- /valkey/external_secrets/templates/bitwarden/valkey_credentials.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: valkey-credentials 7 | spec: 8 | target: 9 | # Name for the secret to be created on the cluster 10 | name: valkey-credentials 11 | deletionPolicy: Delete 12 | template: 13 | type: Opaque 14 | data: 15 | valkey_password: |- 16 | {{ `{{ .password }}` }} 17 | data: 18 | # Key given to the secret to be created on the cluster 19 | - secretKey: password 20 | sourceRef: 21 | storeRef: 22 | name: bitwarden-login 23 | kind: ClusterSecretStore 24 | remoteRef: 25 | key: {{ .Values.valkeyCredentialsBitwardenID }} 26 | property: password 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /demo/garage/manifests/init_rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # see: https://stackoverflow.com/questions/54196533/how-to-execute-command-from-one-pod-inside-another-pod-using-kubectl-exec-which 3 | kind: Role 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | metadata: 6 | name: garage-setup-role 7 | namespace: garage 8 | labels: 9 | app: garage 10 | rules: 11 | - apiGroups: [""] 12 | # this might need to also include pods/exec as an array item? 13 | resources: ["pods"] 14 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "exec"] 15 | 16 | --- 17 | kind: RoleBinding 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | metadata: 20 | name: garage-rolebinding 21 | namespace: garage 22 | labels: 23 | app: garage 24 | subjects: 25 | - kind: Group 26 | name: system:serviceaccounts 27 | apiGroup: rbac.authorization.k8s.io 28 | -------------------------------------------------------------------------------- /writefreely/external_secrets/templates/writefreely_admin_credentials.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: writefreely-admin-credentials 7 | spec: 8 | target: 9 | # Name for the secret to be created on the cluster 10 | name: writefreely-admin-credentials 11 | deletionPolicy: Delete 12 | template: 13 | type: Opaque 14 | data: 15 | writefreely-password: |- 16 | {{ `{{ .password }}` }} 17 | data: 18 | # writefreely admin password 19 | - secretKey: password 20 | sourceRef: 21 | storeRef: 22 | name: bitwarden-login 23 | kind: ClusterSecretStore 24 | remoteRef: 25 | key: {{ .Values.adminCredentialsBitwardenID }} 26 | property: password 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /grafana_stack/external_secrets/templates/loki_valkey_credentials.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: loki-valkey-credentials 7 | spec: 8 | target: 9 | # Name for the secret to be created on the cluster 10 | name: loki-valkey-credentials 11 | deletionPolicy: Delete 12 | template: 13 | type: Opaque 14 | data: 15 | password: |- 16 | {{ `{{ .password }}` }} 17 | data: 18 | # Key given to the secret to be created on the cluster 19 | - secretKey: password 20 | sourceRef: 21 | storeRef: 22 | name: bitwarden-login 23 | kind: ClusterSecretStore 24 | remoteRef: 25 | key: {{ .Values.lokiValkeyCredentialsBitwardenID }} 26 | property: password 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /nextcloud/external_secrets/templates/bitwarden/redis_credentials.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: nextcloud-redis-credentials 7 | spec: 8 | target: 9 | # Name for the secret to be created on the cluster 10 | name: nextcloud-redis-credentials 11 | deletionPolicy: Delete 12 | template: 13 | type: Opaque 14 | data: 15 | redis_password: |- 16 | {{ `{{ .password }}` }} 17 | data: 18 | # Key given to the secret to be created on the cluster 19 | - secretKey: password 20 | sourceRef: 21 | storeRef: 22 | name: bitwarden-login 23 | kind: ClusterSecretStore 24 | remoteRef: 25 | key: {{ .Values.redisBitwardenID }} 26 | property: password 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /valkey_cluster/external_secrets/templates/bitwarden/valkey_credentials.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: valkey-credentials 7 | spec: 8 | target: 9 | # Name for the secret to be created on the cluster 10 | name: valkey-credentials 11 | deletionPolicy: Delete 12 | template: 13 | type: Opaque 14 | data: 15 | valkey_password: |- 16 | {{ `{{ .password }}` }} 17 | data: 18 | # Key given to the secret to be created on the cluster 19 | - secretKey: password 20 | sourceRef: 21 | storeRef: 22 | name: bitwarden-login 23 | kind: ClusterSecretStore 24 | remoteRef: 25 | key: {{ .Values.valkeyCredentialsBitwardenID }} 26 | property: password 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /demo/infisical/secrets-operator/infisical_secrets_operator_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: infisical 6 | namespace: argocd 7 | spec: 8 | project: infisical 9 | source: 10 | # Actual source code: https://github.com/Infisical/infisical/tree/main/helm-charts/secrets-operator 11 | repoURL: 'https://dl.cloudsmith.io/public/infisical/helm-charts/helm/charts/' 12 | chart: secrets-operator 13 | # published versions: https://cloudsmith.io/~infisical/repos/helm-charts/packages/detail/helm/secrets-operator/#versions 14 | targetRevision: 0.10.9 15 | destination: 16 | server: "https://kubernetes.default.svc" 17 | namespace: infisical 18 | syncPolicy: 19 | syncOptions: 20 | - ApplyOutOfSyncOnly=true 21 | automated: 22 | prune: true 23 | selfHeal: true 24 | -------------------------------------------------------------------------------- /forgejo/storage/pvc/templates/pod_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PodConfig 3 | metadata: 4 | name: backups-podconfig 5 | spec: 6 | template: 7 | spec: 8 | {{- with .Values.k8up.securityContext }} 9 | securityContext: 10 | runAsUser: {{ .runAsUser }} 11 | {{- end }} 12 | {{- range .Values.k8up.tolerations }} 13 | tolerations: 14 | {{- toYaml . | nindent 8 }} 15 | {{- end }} 16 | {{- range .Values.k8up.tolerations }} 17 | affinity: 18 | {{- toYaml . | nindent 8 }} 19 | {{- end }} 20 | securityContext: 21 | fsGroup: {{ .Values.k8up.podSecurityContext.runAsUser }} 22 | runAsUser: {{ .Values.k8up.podSecurityContext.runAsUser }} 23 | containers: 24 | - name: test 25 | env: 26 | - name: FORGEJO 27 | value: 'true' 28 | -------------------------------------------------------------------------------- /matrix/external_secrets/templates/bitwarden/registration.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.external_secrets_provider "bitwarden" }} 2 | --- 3 | # secret for a matrix registration shared secret 4 | apiVersion: external-secrets.io/v1 5 | kind: ExternalSecret 6 | metadata: 7 | name: matrix-registration 8 | spec: 9 | target: 10 | # Name for the secret to be created on the cluster 11 | name: matrix-registration 12 | deletionPolicy: Delete 13 | template: 14 | type: Opaque 15 | data: 16 | registrationSharedSecret: |- 17 | {{ `{{ .sharedSecret }}` }} 18 | data: 19 | - secretKey: sharedSecret 20 | sourceRef: 21 | storeRef: 22 | name: bitwarden-login 23 | kind: ClusterSecretStore 24 | remoteRef: 25 | key: {{ .Values.synapse.registration_bitwarden_ID }} 26 | property: password 27 | {{- end }} 28 | -------------------------------------------------------------------------------- /postgres/backups/k8up-test/k8up-job/manifests/external-secrets_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Created during second sync wave because we need secrets to exist before 3 | # the job gets created, but after the "secret-store" is configured so we 4 | # have access to the external secrets operator 5 | --- 6 | apiVersion: argoproj.io/v1alpha1 7 | kind: Application 8 | metadata: 9 | name: k8up-pg-backup-test-secrets 10 | namespace: argocd 11 | annotations: 12 | argocd.argoproj.io/sync-wave: "2" 13 | spec: 14 | destination: 15 | name: '' 16 | namespace: default 17 | server: 'https://kubernetes.default.svc' 18 | source: 19 | path: postgres/backups/k8up-test/k8up-job/external_secrets/ 20 | repoURL: 'https://github.com/small-hack/argocd-apps.git' 21 | targetRevision: HEAD 22 | sources: [] 23 | project: default 24 | syncPolicy: 25 | automated: null 26 | -------------------------------------------------------------------------------- /demo/argo-workflows/README.md: -------------------------------------------------------------------------------- 1 | # Argo Workflows 2 | 3 | Argo Workflows is an open source container-native workflow engine for orchestrating parallel jobs on Kubernetes. 4 | 5 | Argo Workflows lets you define a YAML configuration with multiple steps, representing the steps in your CI/CD pipeline. Each of these steps runs in a separate container within your Kubernetes cluster. 6 | 7 | Argo uses a CRD called Workflows, which provides a generateName. This name becomes the prefix of all the pods the Workflow runs. As part of the Workflow, you can also define storage volumes, which will be accessible by the templates for your workflow steps. 8 | 9 | 10 | > Depends on Argo Server already existing, so make sure you install ArgoCD first. 11 | 12 | TODO: 13 | - replace secret manifests with external secrets 14 | - add workflows callback url to smol-k8s-lab provisioning process for argocd 15 | -------------------------------------------------------------------------------- /jellyfin/storage/pvc/templates/k8up_pod_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PodConfig 3 | metadata: 4 | name: jellyfin-podconfig 5 | spec: 6 | template: 7 | spec: 8 | {{- with .Values.k8up.securityContext }} 9 | securityContext: 10 | runAsUser: {{ .runAsUser }} 11 | {{- end }} 12 | 13 | containers: 14 | - name: test 15 | env: 16 | - name: jellyfin 17 | value: "true" 18 | 19 | {{- if .Values.tolerations }} 20 | tolerations: 21 | {{- range .Values.tolerations }} 22 | - effect: {{ .effect }} 23 | key: {{ .key }} 24 | operator: {{ .operator }} 25 | value: {{ .value }} 26 | {{- end }} 27 | {{- end }} 28 | 29 | {{- with .Values.affinity}} 30 | affinity: 31 | {{- toYaml . | nindent 8 }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /metallb/README.md: -------------------------------------------------------------------------------- 1 | # metallb manifests 2 | Installs [metallb](https://github.com/metallb/metallb/) and configures your ip pool and l2 advertisement. 3 | 4 | ## Sync Waves 5 | 1. metallb controller and custom resource definitions 6 | 7 | ## To Deploy 8 | you can paste this in the "Edit as YAML" section when creating a new Argo CD app: 9 | 10 | ```yaml 11 | --- 12 | apiVersion: argoproj.io/v1alpha1 13 | kind: Application 14 | metadata: 15 | name: metallb 16 | namespace: argocd 17 | spec: 18 | project: metallb 19 | source: 20 | repoURL: 'https://github.com/small-hack/argocd-apps' 21 | targetRevision: main 22 | path: metallb/ 23 | destination: 24 | server: "https://kubernetes.default.svc" 25 | namespace: metallb-system 26 | syncPolicy: 27 | syncOptions: 28 | - Replace=true 29 | automated: 30 | prune: true 31 | selfHeal: true 32 | ``` 33 | -------------------------------------------------------------------------------- /netmaker/app_of_apps/netmaker_persistsence.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # First sync wave done in parallel with creating secrets. Must be BEFORE 3 | # netmaker so that netmaker persists it's data between upgrades. Sync policy 4 | # is set to ApplyOutOfSyncOnly=true to create the volume initially only. 5 | apiVersion: argoproj.io/v1alpha1 6 | kind: Application 7 | metadata: 8 | name: netmaker-persistence 9 | namespace: argocd 10 | annotations: 11 | argocd.argoproj.io/sync-wave: "1" 12 | spec: 13 | project: netmaker 14 | destination: 15 | server: "https://kubernetes.default.svc" 16 | namespace: netmaker 17 | source: 18 | repoURL: https://github.com/small-hack/argocd-apps.git 19 | path: netmaker/manifests/persistence/ 20 | targetRevision: main 21 | syncPolicy: 22 | syncOptions: 23 | - ApplyOutOfSyncOnly=true 24 | automated: 25 | selfHeal: true 26 | 27 | -------------------------------------------------------------------------------- /nextcloud/storage/pvc/templates/k8up_pod_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PodConfig 3 | metadata: 4 | name: file-backups-podconfig 5 | spec: 6 | template: 7 | spec: 8 | {{- with .Values.k8up.securityContext }} 9 | securityContext: 10 | runAsUser: {{ .runAsUser }} 11 | {{- end }} 12 | 13 | containers: 14 | - name: test 15 | env: 16 | - name: nextcloud 17 | value: "true" 18 | 19 | {{- if .Values.tolerations }} 20 | tolerations: 21 | {{- range .Values.tolerations }} 22 | - effect: {{ .effect }} 23 | key: {{ .key }} 24 | operator: {{ .operator }} 25 | value: {{ .value }} 26 | {{- end }} 27 | {{- end }} 28 | 29 | {{- with .Values.affinity}} 30 | affinity: 31 | {{- toYaml . | nindent 8 }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /peertube/storage/pvc/templates/k8up_pod_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PodConfig 3 | metadata: 4 | name: file-backups-podconfig 5 | spec: 6 | template: 7 | spec: 8 | {{- with .Values.k8up.securityContext }} 9 | securityContext: 10 | runAsUser: {{ .runAsUser }} 11 | {{- end }} 12 | 13 | containers: 14 | - name: test 15 | env: 16 | - name: nextcloud 17 | value: "true" 18 | 19 | {{- if .Values.tolerations }} 20 | tolerations: 21 | {{- range .Values.tolerations }} 22 | - effect: {{ .effect }} 23 | key: {{ .key }} 24 | operator: {{ .operator }} 25 | value: {{ .value }} 26 | {{- end }} 27 | {{- end }} 28 | 29 | {{- with .Values.affinity}} 30 | affinity: 31 | {{- toYaml . | nindent 8 }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /demo/appflowy/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret providers in the future 3 | provider: "" 4 | 5 | # -- if set to seaweedfs we deploy a policy secret. can also be minio 6 | s3_provider: "seaweedfs" 7 | 8 | # -- existing bitwarden vault item id with s3 admin credentials 9 | s3AdminCredentialsBitwardenID: "" 10 | 11 | # -- existing bitwarden vault item id with s3 appflowy credentials 12 | s3AppflowyCredentialsBitwardenID: "" 13 | 14 | # -- existing bitwarden vault item id with s3 db credentials for gotrue 15 | s3PostgresCredentialsBitwardenID: "" 16 | 17 | # -- existing bitwarden vault item id with s3 credentials for the remote backups 18 | s3BackupCredentialsBitwardenID: "" 19 | 20 | # -- existing bitwarden vault item id with gotrue credentials 21 | gotrueCredentialsBitwardenID: "" 22 | -------------------------------------------------------------------------------- /demo/pixelfed/storage/pvc/templates/k8up_pod_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PodConfig 3 | metadata: 4 | name: file-backups-podconfig 5 | spec: 6 | template: 7 | spec: 8 | {{- with .Values.k8up.securityContext }} 9 | securityContext: 10 | runAsUser: {{ .runAsUser }} 11 | {{- end }} 12 | 13 | containers: 14 | - name: test 15 | env: 16 | - name: nextcloud 17 | value: "true" 18 | 19 | {{- if .Values.tolerations }} 20 | tolerations: 21 | {{- range .Values.tolerations }} 22 | - effect: {{ .effect }} 23 | key: {{ .key }} 24 | operator: {{ .operator }} 25 | value: {{ .value }} 26 | {{- end }} 27 | {{- end }} 28 | 29 | {{- with .Values.affinity}} 30 | affinity: 31 | {{- toYaml . | nindent 8 }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /gotosocial/storage/pvc/templates/k8up_pod_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PodConfig 3 | metadata: 4 | name: file-backups-podconfig 5 | spec: 6 | template: 7 | spec: 8 | {{- with .Values.k8up.securityContext }} 9 | securityContext: 10 | runAsUser: {{ .runAsUser }} 11 | {{- end }} 12 | 13 | containers: 14 | - name: test 15 | env: 16 | - name: gotosocial 17 | value: "true" 18 | 19 | {{- if .Values.tolerations }} 20 | tolerations: 21 | {{- range .Values.tolerations }} 22 | - effect: {{ .effect }} 23 | key: {{ .key }} 24 | operator: {{ .operator }} 25 | value: {{ .value }} 26 | {{- end }} 27 | {{- end }} 28 | 29 | {{- with .Values.affinity}} 30 | affinity: 31 | {{- toYaml . | nindent 8 }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /demo/local-path-provisioner/README.md: -------------------------------------------------------------------------------- 1 | # Local Path Provisioner 2 | 3 | Custom ConfigMap and StorageClasses to add our storage defaults for nodes 4 | 5 | Storage Classes: 6 | 7 | - local-path (always /var/lib/rancher/k3s/storage) 8 | - fast-raid (always /mnt/raid1) 9 | - slow-raid (always /mnt/raid0) 10 | 11 | Performance Charactaristics: 12 | 13 | ## Host: Bradley 14 | 15 | Fast-Raid: 16 | - 2x Crucial MX500 1TB SSD 17 | - RAID1 18 | Screenshot 2024-10-20 at 09 33 06 19 | 20 | Slow-Raid: 21 | - 3x Seagate HDD 3.5" 2TB ST2000DM008 Barracuda 22 | - RAID5 23 | Screenshot 2024-10-20 at 09 29 47 24 | 25 | ## Host: Node0 26 | Fast-raid: 27 | - 2x Crucial P3 Plus 4TB 28 | - RAID1 29 | -------------------------------------------------------------------------------- /external-secrets-operator/app_of_apps/with_metrics/external-secrets-argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: external-secrets-operator-helm 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: external-secrets-operator 11 | destination: 12 | server: "https://kubernetes.default.svc" 13 | namespace: external-secrets 14 | syncPolicy: 15 | syncOptions: 16 | - CreateNamespace=true 17 | automated: 18 | prune: true 19 | selfHeal: true 20 | source: 21 | repoURL: 'https://charts.external-secrets.io' 22 | targetRevision: 0.20.4 23 | chart: external-secrets 24 | helm: 25 | releaseName: external-secrets 26 | valuesObject: 27 | fullnameOverride: external-secrets 28 | serviceMonitor: 29 | enabled: true 30 | -------------------------------------------------------------------------------- /mastodon/small-hack/storage/pvc/templates/k8up_pod_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PodConfig 3 | metadata: 4 | name: file-backups-podconfig 5 | spec: 6 | template: 7 | spec: 8 | {{- with .Values.k8up.securityContext }} 9 | securityContext: 10 | runAsUser: {{ .runAsUser }} 11 | {{- end }} 12 | 13 | containers: 14 | - name: test 15 | env: 16 | - name: nextcloud 17 | value: "true" 18 | 19 | {{- if .Values.tolerations }} 20 | tolerations: 21 | {{- range .Values.tolerations }} 22 | - effect: {{ .effect }} 23 | key: {{ .key }} 24 | operator: {{ .operator }} 25 | value: {{ .value }} 26 | {{- end }} 27 | {{- end }} 28 | 29 | {{- with .Values.affinity}} 30 | affinity: 31 | {{- toYaml . | nindent 8 }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /mastodon/small-hack/storage/pvc/templates/valkey_pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.valkey_pvc.enabled "true" }} 2 | --- 3 | kind: PersistentVolumeClaim 4 | apiVersion: v1 5 | metadata: 6 | name: mastodon-valkey-primary 7 | annotations: 8 | k8up.io/backup: 'true' 9 | spec: 10 | storageClassName: {{ .Values.valkey_pvc.storageClassName }} 11 | accessModes: 12 | - {{ .Values.valkey_pvc.accessMode }} 13 | resources: 14 | requests: 15 | storage: {{ .Values.valkey_pvc.storage }} 16 | --- 17 | kind: PersistentVolumeClaim 18 | apiVersion: v1 19 | metadata: 20 | name: mastodon-valkey-replica 21 | annotations: 22 | k8up.io/backup: 'true' 23 | spec: 24 | storageClassName: {{ .Values.valkey_pvc.storageClassName }} 25 | accessModes: 26 | - {{ .Values.valkey_pvc.accessMode }} 27 | resources: 28 | requests: 29 | storage: {{ .Values.valkey_pvc.storage }} 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /zitadel/external_secrets/templates/bitwarden/zitadel-core-key.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | # secret zitadel core key 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: zitadel-core-key 7 | namespace: zitadel 8 | spec: 9 | target: 10 | # Name of the kubernetes secret 11 | name: zitadel-core-key 12 | deletionPolicy: Delete 13 | template: 14 | type: Opaque 15 | data: 16 | masterkey: |- 17 | {{ `{{ .password }}` }} 18 | 19 | data: 20 | - secretKey: password 21 | sourceRef: 22 | storeRef: 23 | name: bitwarden-login 24 | kind: ClusterSecretStore 25 | remoteRef: 26 | # id of the bitwarden secret 27 | key: {{ .Values.bitwardenCoreItemID }} 28 | # property within the bitwarden secret we want 29 | property: password 30 | {{- end }} 31 | -------------------------------------------------------------------------------- /demo/rancher/README.md: -------------------------------------------------------------------------------- 1 | # Rancher 2 | 3 | Rancher is a complete software stack for teams adopting containers. It addresses the operational and security challenges of managing multiple Kubernetes clusters, while providing DevOps teams with integrated tools for running containerized workloads. 4 | 5 | ## Manually Deploy to Argocd 6 | 7 | ```yaml 8 | --- 9 | apiVersion: argoproj.io/v1alpha1 10 | kind: Application 11 | metadata: 12 | name: rancher-argo-app 13 | namespace: argocd 14 | annotations: 15 | argocd.argoproj.io/sync-wave: "1" 16 | spec: 17 | project: rancher 18 | destination: 19 | server: "https://kubernetes.default.svc" 20 | namespace: cattle-system 21 | source: 22 | repoURL: https://github.com/small-hack/argocd-apps.git 23 | path: demo/rancher/ 24 | syncPolicy: 25 | syncOptions: 26 | - ApplyOutOfSyncOnly=true 27 | automated: 28 | selfHeal: true 29 | ``` 30 | -------------------------------------------------------------------------------- /demo/garage/config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # https://garagehq.deuxfleurs.fr/documentation/quick-start/#creating-a-cluster-layout 3 | 4 | # check ALL garage's nodes 5 | # NODE_IDS=$(kubectl exec --stdin --tty -n garage garage-web-app-0 -- ./garage status | tail -n 3 | cut -d ' ' -f 1 | xargs) 6 | NODE_ID=$(kubectl exec --stdin --tty -n garage garage-web-app-0 -- ./garage status | tail -n 1 | cut -d ' ' -f 1) 7 | 8 | # assign location and capcity to node 9 | kubectl exec --stdin --tty -n garage garage-web-app-0 -- ./garage layout assign -z dc1 -c 1G $NODE_ID 10 | 11 | # get the version to apply 12 | VERSION=$(kubectl exec --stdin --tty -n garage garage-web-app-0 -- ./garage layout show | grep "layout apply" | cut -d ' ' -f 9) 13 | 14 | echo "Found version: $VERSION" 15 | 16 | # apply the new version of the layout we just created 17 | kubectl exec --stdin --tty -n garage garage-web-app-0 -- ./garage layout apply --version 6 18 | -------------------------------------------------------------------------------- /seaweedfs/persistence/seaweedfs_data_pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: swfs-volume-data 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 100Gi 12 | --- 13 | kind: PersistentVolumeClaim 14 | apiVersion: v1 15 | metadata: 16 | name: swfs-volume-idx 17 | spec: 18 | accessModes: 19 | - ReadWriteOnce 20 | resources: 21 | requests: 22 | storage: 10Gi 23 | --- 24 | kind: PersistentVolumeClaim 25 | apiVersion: v1 26 | metadata: 27 | name: swfs-master-data 28 | spec: 29 | accessModes: 30 | - ReadWriteOnce 31 | resources: 32 | requests: 33 | storage: 10Gi 34 | --- 35 | kind: PersistentVolumeClaim 36 | apiVersion: v1 37 | metadata: 38 | name: swfs-filer-data 39 | spec: 40 | accessModes: 41 | - ReadWriteOnce 42 | resources: 43 | requests: 44 | storage: 10Gi 45 | -------------------------------------------------------------------------------- /demo/rancher/rancher-argo-app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: rancher 6 | namespace: argocd 7 | spec: 8 | project: default 9 | sources: 10 | - repoURL: 'https://releases.rancher.com/server-charts/latest' 11 | targetRevision: 2.12.3 12 | chart: rancher 13 | helm: 14 | values: | 15 | bootstrapPassword: password 16 | hostname: rancher.buildstar.online 17 | replicas: -1 18 | ingress: 19 | enabled: false 20 | - repoURL: https://github.com/small-hack/argocd-apps 21 | targetRevision: main 22 | path: demo/rancher/manifests 23 | destination: 24 | server: "https://kubernetes.default.svc" 25 | namespace: cattle-system 26 | syncPolicy: 27 | automated: 28 | prune: false 29 | selfHeal: true 30 | syncOptions: 31 | - CreateNamespace=true 32 | -------------------------------------------------------------------------------- /matrix/external_secrets/templates/bitwarden/trusted_key_servers.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.external_secrets_provider "bitwarden") (not (eq .Values.synapse.trusted_key_servers_bitwarden_ID "not applicable")) }} 2 | --- 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: trusted-key-servers 7 | spec: 8 | target: 9 | # Name for the secret to be created on the cluster 10 | name: trusted-key-servers 11 | deletionPolicy: Delete 12 | template: 13 | type: Opaque 14 | data: 15 | trustedKeyServers: |- 16 | {{ `{{ .trustedKeyServers }}` }} 17 | data: 18 | - secretKey: trustedKeyServers 19 | sourceRef: 20 | storeRef: 21 | name: bitwarden-login 22 | kind: ClusterSecretStore 23 | remoteRef: 24 | key: {{ .Values.synapse.trusted_key_servers_bitwarden_ID }} 25 | property: password 26 | {{- end }} 27 | -------------------------------------------------------------------------------- /minio/backups/helm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: k8up_minio_backups 3 | description: A Helm chart for creating minio backups using k8up 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.0.0 19 | -------------------------------------------------------------------------------- /argocd/manifests/projects/ingress_project.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: AppProject 4 | metadata: 5 | name: ingress 6 | namespace: argocd 7 | spec: 8 | clusterResourceWhitelist: 9 | - group: '*' 10 | kind: '*' 11 | description: Apps required for ingress to function like ingress-nginx, cert-manager, and vouch 12 | destinations: 13 | - name: in-cluster 14 | namespace: ingress 15 | server: https://kubernetes.default.svc 16 | - name: '*' 17 | namespace: argocd 18 | server: '*' 19 | - name: '*' 20 | namespace: ingress 21 | server: '*' 22 | namespaceResourceWhitelist: 23 | - group: '*' 24 | kind: '*' 25 | orphanedResources: {} 26 | sourceRepos: 27 | - https://github.com/small-hack/argocd-apps.git 28 | - https://charts.jetstack.io 29 | - https://kubernetes.github.io/ingress-nginx 30 | - https://vouch.github.io/helm-charts/ 31 | -------------------------------------------------------------------------------- /coturn/coturn_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # installs whole kube-prometheus-stack which includes grafana and alert manager 3 | apiVersion: argoproj.io/v1alpha1 4 | kind: Application 5 | metadata: 6 | name: coturn-helm-chart 7 | namespace: argocd 8 | annotations: 9 | argocd.argoproj.io/sync-wave: "2" 10 | spec: 11 | project: default 12 | destination: 13 | server: "https://kubernetes.default.svc" 14 | namespace: coturn 15 | sources: 16 | # prometheus-community helm repo 17 | - repoURL: https://small-hack.github.io/coturn-chart/ 18 | chart: coturn 19 | targetRevision: 9.1.0 20 | helm: 21 | valueFiles: 22 | - $values/coturn/values/values.yaml 23 | # our values.yaml file locally 24 | - repoURL: 'https://github.com/small-hack/argocd-apps.git' 25 | targetRevision: main 26 | ref: values 27 | syncPolicy: 28 | automated: 29 | prune: true 30 | selfHeal: true 31 | -------------------------------------------------------------------------------- /mysql/percona-pxc-operator/pxc_operator_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: pxc-operator-application 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: pxc-operator 11 | destination: 12 | server: "https://kubernetes.default.svc" 13 | namespace: pxc-operator 14 | syncPolicy: 15 | syncOptions: 16 | - ApplyOutOfSyncOnly=true 17 | - ServerSideApply=true 18 | automated: 19 | prune: true 20 | selfHeal: true 21 | source: 22 | repoURL: 'https://percona.github.io/percona-helm-charts' 23 | chart: pxc-operator 24 | targetRevision: 1.18.0 25 | helm: 26 | releaseName: pxc-operator 27 | valuesObject: 28 | replicaCount: 1 29 | watchAllNamespaces: true 30 | logStructured: true 31 | logLevel: "INFO" 32 | disableTelemetry: false 33 | -------------------------------------------------------------------------------- /argocd/manifests/projects/nextcloud.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: AppProject 4 | metadata: 5 | labels: 6 | env: prod 7 | name: nextcloud 8 | namespace: argocd 9 | spec: 10 | clusterResourceWhitelist: 11 | - group: '*' 12 | kind: '*' 13 | description: all nextcloud apps 14 | destinations: 15 | - name: in-cluster 16 | namespace: nextcloud 17 | server: https://kubernetes.default.svc 18 | - name: '*' 19 | namespace: argocd 20 | server: '*' 21 | namespaceResourceWhitelist: 22 | - group: '*' 23 | kind: '*' 24 | orphanedResources: {} 25 | roles: 26 | - description: nextcloud admins 27 | name: nextcloud 28 | policies: 29 | - p, proj:nextcloud:nextcloud, applications, *, nextcloud/*, allow 30 | sourceRepos: 31 | - registry-1.docker.io 32 | - https://nextcloud.github.io/helm 33 | - https://github.com/small-hack/argocd-apps.git 34 | -------------------------------------------------------------------------------- /peertube/external_secrets/templates/bitwarden/peertube-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (eq .Values.provider "bitwarden") .Values.secretBitwardenID }} 2 | --- 3 | # secret peertube email stuff 4 | apiVersion: external-secrets.io/v1 5 | kind: ExternalSecret 6 | metadata: 7 | name: peertube-secret 8 | namespace: peertube 9 | spec: 10 | target: 11 | # Name of the kubernetes secret 12 | name: peertube-secret 13 | deletionPolicy: Delete 14 | template: 15 | type: Opaque 16 | data: 17 | password: |- 18 | {{ `{{ .password }}` }} 19 | 20 | data: 21 | - secretKey: password 22 | sourceRef: 23 | storeRef: 24 | name: bitwarden-login 25 | kind: ClusterSecretStore 26 | remoteRef: 27 | # key-id of the bitwarden secret 28 | key: {{ .Values.secretBitwardenID }} 29 | # property within the bitwarden secret we want 30 | property: password 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /s3_persistence_and_backups/templates/pod_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PodConfig 3 | metadata: 4 | name: s3-backups-podconfig 5 | spec: 6 | template: 7 | spec: 8 | {{- with .Values.k8up.securityContext }} 9 | securityContext: 10 | runAsUser: {{ .runAsUser }} 11 | {{- end }} 12 | 13 | containers: 14 | - name: test 15 | env: 16 | - name: {{ .Values.app }} 17 | value: "true" 18 | 19 | {{- if and .Values.tolerations .Values.k8up.tolerations }} 20 | tolerations: 21 | {{- range .Values.tolerations }} 22 | - effect: {{ .effect }} 23 | key: {{ .key }} 24 | operator: {{ .operator }} 25 | value: {{ .value }} 26 | {{- end }} 27 | {{- end }} 28 | 29 | {{- with and .Values.k8up.affinity .Values.affinity}} 30 | affinity: 31 | {{- toYaml . | nindent 8 }} 32 | {{- end }} 33 | -------------------------------------------------------------------------------- /ghost/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret providers in the future 3 | provider: "" 4 | 5 | # -- if set to seaweedfs we deploy a policy secret. can also be minio 6 | s3_provider: "seaweedfs" 7 | 8 | # -- existing kubernetes secret with s3 admin credentials 9 | s3AdminCredentialsBitwardenID: "" 10 | 11 | # -- existing kubernetes secret with s3 ghost credentials 12 | s3ghostCredentialsBitwardenID: "" 13 | 14 | # -- existing kubernetes secret with s3 credentials for the remote backups 15 | s3BackupCredentialsBitwardenID: "" 16 | 17 | # other ghost specific secrets 18 | 19 | # -- ghost admin Credentials 20 | adminCredentialsBitwardenID: "" 21 | 22 | # -- ghost smtp Credentials 23 | smtpCredentialsBitwardenID: "" 24 | 25 | # -- ghost mysql Credentials 26 | mysqlBitwardenID: "" 27 | 28 | # -- ghost OIDC Credentials 29 | oidcCredentialsBitwardenID: "" 30 | -------------------------------------------------------------------------------- /kubevirt/examples/machine-sizes/high-cpu.yaml: -------------------------------------------------------------------------------- 1 | # High CPU machines: Num Cores == GB RAM 2 | --- 3 | apiVersion: instancetype.kubevirt.io/v1beta1 4 | kind: VirtualMachineClusterInstancetype 5 | metadata: 6 | name: highcpu-micro 7 | spec: 8 | cpu: 9 | guest: 2 10 | memory: 11 | guest: 2Gi 12 | --- 13 | apiVersion: instancetype.kubevirt.io/v1beta1 14 | kind: VirtualMachineClusterInstancetype 15 | metadata: 16 | name: highcpu-small 17 | spec: 18 | cpu: 19 | guest: 4 20 | memory: 21 | guest: 4Gi 22 | --- 23 | apiVersion: instancetype.kubevirt.io/v1beta1 24 | kind: VirtualMachineClusterInstancetype 25 | metadata: 26 | name: highcpu-medium 27 | spec: 28 | cpu: 29 | guest: 8 30 | memory: 31 | guest: 8Gi 32 | --- 33 | apiVersion: instancetype.kubevirt.io/v1beta1 34 | kind: VirtualMachineClusterInstancetype 35 | metadata: 36 | name: highcpu-large 37 | spec: 38 | cpu: 39 | guest: 16 40 | memory: 41 | guest: 16Gi 42 | -------------------------------------------------------------------------------- /kubevirt/examples/machine-sizes/high-mem.yaml: -------------------------------------------------------------------------------- 1 | # High Memory machines: Num Cores == (GB RAM)/4 2 | --- 3 | apiVersion: instancetype.kubevirt.io/v1beta1 4 | kind: VirtualMachineClusterInstancetype 5 | metadata: 6 | name: highmem-micro 7 | spec: 8 | cpu: 9 | guest: 1 10 | memory: 11 | guest: 4Gi 12 | --- 13 | apiVersion: instancetype.kubevirt.io/v1beta1 14 | kind: VirtualMachineClusterInstancetype 15 | metadata: 16 | name: highmem-small 17 | spec: 18 | cpu: 19 | guest: 2 20 | memory: 21 | guest: 8Gi 22 | --- 23 | apiVersion: instancetype.kubevirt.io/v1beta1 24 | kind: VirtualMachineClusterInstancetype 25 | metadata: 26 | name: highmem-medium 27 | spec: 28 | cpu: 29 | guest: 4 30 | memory: 31 | guest: 16Gi 32 | --- 33 | apiVersion: instancetype.kubevirt.io/v1beta1 34 | kind: VirtualMachineClusterInstancetype 35 | metadata: 36 | name: highmem-large 37 | spec: 38 | cpu: 39 | guest: 8 40 | memory: 41 | guest: 32Gi 42 | -------------------------------------------------------------------------------- /kubevirt/examples/machine-sizes/standard.yaml: -------------------------------------------------------------------------------- 1 | # Standard machines: Num Cores == (GB RAM)/2 2 | --- 3 | apiVersion: instancetype.kubevirt.io/v1beta1 4 | kind: VirtualMachineClusterInstancetype 5 | metadata: 6 | name: standard-micro 7 | spec: 8 | cpu: 9 | guest: 1 10 | memory: 11 | guest: 2Gi 12 | --- 13 | apiVersion: instancetype.kubevirt.io/v1beta1 14 | kind: VirtualMachineClusterInstancetype 15 | metadata: 16 | name: standard-small 17 | spec: 18 | cpu: 19 | guest: 2 20 | memory: 21 | guest: 4Gi 22 | --- 23 | apiVersion: instancetype.kubevirt.io/v1beta1 24 | kind: VirtualMachineClusterInstancetype 25 | metadata: 26 | name: standard-medium 27 | spec: 28 | cpu: 29 | guest: 4 30 | memory: 31 | guest: 8Gi 32 | --- 33 | apiVersion: instancetype.kubevirt.io/v1beta1 34 | kind: VirtualMachineClusterInstancetype 35 | metadata: 36 | name: standard-large 37 | spec: 38 | cpu: 39 | guest: 8 40 | memory: 41 | guest: 16Gi 42 | -------------------------------------------------------------------------------- /postgres/operators/cloud-native-postgres/cnpg_operator_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: cnpg-operator-application 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: cnpg-operator 11 | destination: 12 | server: "https://kubernetes.default.svc" 13 | namespace: cnpg-system 14 | syncPolicy: 15 | syncOptions: 16 | - ApplyOutOfSyncOnly=true 17 | - ServerSideApply=true 18 | automated: 19 | prune: true 20 | selfHeal: true 21 | source: 22 | repoURL: 'https://cloudnative-pg.github.io/charts' 23 | chart: cloudnative-pg 24 | targetRevision: 0.26.1 25 | helm: 26 | releaseName: cnpg-operator 27 | values: | 28 | monitoring: 29 | # -- Specifies whether the monitoring should be enabled. Requires Prometheus Operator CRDs. 30 | podMonitorEnabled: false 31 | -------------------------------------------------------------------------------- /demo/argo-workflows/app-of-apps/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | namespace: argocd 5 | name: operate-workflow-sa 6 | --- 7 | # Similarly you can use a ClusterRole and ClusterRoleBinding 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: Role 10 | metadata: 11 | name: operate-workflow-role 12 | namespace: argocd 13 | rules: 14 | - apiGroups: 15 | - argoproj.io 16 | verbs: 17 | - "*" 18 | resources: 19 | - workflows 20 | - workflowtemplates 21 | - cronworkflows 22 | - clusterworkflowtemplates 23 | - workflowtaskresults 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: RoleBinding 27 | metadata: 28 | name: operate-workflow-role-binding 29 | namespace: argocd 30 | roleRef: 31 | apiGroup: rbac.authorization.k8s.io 32 | kind: Role 33 | name: operate-workflow-role 34 | subjects: 35 | - kind: ServiceAccount 36 | name: operate-workflow-sa 37 | -------------------------------------------------------------------------------- /writefreely/external_secrets/templates/smtp-credentials.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | # secret writefreely email stuff 4 | apiVersion: external-secrets.io/v1 5 | kind: ExternalSecret 6 | metadata: 7 | name: writefreely-smtp-credentials 8 | namespace: writefreely 9 | spec: 10 | target: 11 | # Name of the kubernetes secret to create 12 | name: writefreely-smtp-credentials 13 | deletionPolicy: Delete 14 | template: 15 | type: Opaque 16 | data: 17 | smtp-password: |- 18 | {{ `{{ .password }}` }} 19 | 20 | data: 21 | - secretKey: password 22 | sourceRef: 23 | storeRef: 24 | name: bitwarden-login 25 | kind: ClusterSecretStore 26 | remoteRef: 27 | # name of the bitwarden secret 28 | key: {{ .Values.smtpCredentialsBitwardenID }} 29 | # property within the bitwarden secret we want 30 | property: password 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /cert-manager/external_secrets/README.md: -------------------------------------------------------------------------------- 1 | # cert-manager-eso-bitwarden-chart 2 | 3 | ![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) 4 | 5 | A Helm chart for Cert Manager External Secrets using the Bitwarden ESO provider on Kubernetes 6 | 7 | ## Values 8 | 9 | | Key | Type | Default | Description | 10 | |-----|------|---------|-------------| 11 | | cloudflareBitwardenID | string | `""` | | 12 | | provider | string | `""` | if this is not set to "bitwarden", we will not actually deploy any templates we may support other secret providers in the future | 13 | 14 | ---------------------------------------------- 15 | Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) 16 | -------------------------------------------------------------------------------- /minio/backups/testing/restores/restore_files.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k8up.io/v1 3 | kind: Restore 4 | metadata: 5 | name: nextcloud-files 6 | namespace: nextcloud 7 | spec: 8 | failedJobHistoryLimit: 5 9 | successfulJobHistoryLimit: 1 10 | podSecurityContext: 11 | # change to 33 for www-data 12 | runAsUser: 0 13 | # This is optional to specify a specific snapshot to restore from 14 | snapshot: REPLACE_ME 15 | restoreMethod: 16 | folder: 17 | claimName: nextcloud-files 18 | backend: 19 | repoPasswordSecretRef: 20 | name: nextcloud-backups-credentials 21 | key: resticRepoPassword 22 | s3: 23 | endpoint: s3.eu-central-003.backblazeb2.com 24 | bucket: testing-ncloud-backups-september 25 | accessKeyIDSecretRef: 26 | name: nextcloud-backups-credentials 27 | key: applicationKeyId 28 | secretAccessKeySecretRef: 29 | name: nextcloud-backups-credentials 30 | key: applicationKey 31 | -------------------------------------------------------------------------------- /nextcloud/backups_and_restores/restore_files.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: k8up.io/v1 3 | kind: Restore 4 | metadata: 5 | name: nextcloud-files 6 | namespace: nextcloud 7 | spec: 8 | failedJobHistoryLimit: 5 9 | successfulJobHistoryLimit: 1 10 | podSecurityContext: 11 | # change to 33 for www-data 12 | runAsUser: 0 13 | # This is optional to specify a specific snapshot to restore from 14 | snapshot: REPLACE_ME 15 | restoreMethod: 16 | folder: 17 | claimName: nextcloud-files 18 | backend: 19 | repoPasswordSecretRef: 20 | name: nextcloud-backups-credentials 21 | key: resticRepoPassword 22 | s3: 23 | endpoint: s3.eu-central-003.backblazeb2.com 24 | bucket: testing-ncloud-backups-september 25 | accessKeyIDSecretRef: 26 | name: nextcloud-backups-credentials 27 | key: applicationKeyId 28 | secretAccessKeySecretRef: 29 | name: nextcloud-backups-credentials 30 | key: applicationKey 31 | -------------------------------------------------------------------------------- /argocd/manifests/projects/monitoring-project.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: AppProject 4 | metadata: 5 | name: monitoring 6 | namespace: argocd 7 | spec: 8 | clusterResourceWhitelist: 9 | - group: '*' 10 | kind: '*' 11 | description: Prometheus, Alert Manager, Grafana, Loki, Promtail and node exporter :D 12 | destinations: 13 | - name: in-cluster 14 | namespace: monitoring 15 | server: https://kubernetes.default.svc 16 | - name: '*' 17 | namespace: argocd 18 | server: '*' 19 | - name: '*' 20 | namespace: monitoring 21 | server: '*' 22 | - name: '*' 23 | namespace: '*' 24 | server: '*' 25 | namespaceResourceWhitelist: 26 | - group: '*' 27 | kind: '*' 28 | orphanedResources: {} 29 | sourceRepos: 30 | - https://prometheus-community.github.io/helm-charts 31 | - https://github.com/small-hack/argocd-apps.git 32 | - https://github.com/grafana/helm-charts 33 | -------------------------------------------------------------------------------- /prometheus/external_secrets/README.md: -------------------------------------------------------------------------------- 1 | # prometheus-eso-chart 2 | 3 | ![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) 4 | 5 | A Helm chart for Prometheus Stack External Secrets using the Bitwarden ESO provider on Kubernetes 6 | 7 | ## Values 8 | 9 | | Key | Type | Default | Description | 10 | |-----|------|---------|-------------| 11 | | oidcCredentialsBitwardenID | string | `""` | zitadel OIDC Credentials | 12 | | provider | string | `""` | if this is not set to "bitwarden", we will not actually deploy any templates we may support other secret providers in the future | 13 | 14 | ---------------------------------------------- 15 | Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) 16 | -------------------------------------------------------------------------------- /grafana_stack/README.md: -------------------------------------------------------------------------------- 1 | ### 🚧 This is under construction 🚧 2 | 3 | ## Grafana Monitoring Stack 4 | 5 | | Application | Description | 6 | |-------------|-------------| 7 | | alloy | collects logs and metrics on each cluster | 8 | | loki | receives logs and aggregates them before pushing to S3 | 9 | | mimir | prometheus replacement that does s3 storage (collects metrics) | 10 | | grafana | metics and logs query frontend and dashboards | 11 | 12 | ## Loki 13 | 14 | like Prometheus, but for logs 15 | 16 | - Loki is a horizontally-scalable, highly-available, multi-tenant log aggregation system inspired by Prometheus. 17 | - It is designed to be very cost effective and easy to operate. 18 | - It does not index the contents of the logs, but rather a set of labels for each log stream. 19 | 20 | See: https://github.com/grafana/loki 21 | 22 | ## Coming soon 23 | 24 | Soon you'll also be able to use [smol-k8s-lab](https://github.com/small-hack/smol-k8s-lab) to deploy this. 25 | -------------------------------------------------------------------------------- /kyverno/kyverno_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # webapp is deployed 2nd because we need secrets and persistent volumes up 1st 3 | apiVersion: argoproj.io/v1alpha1 4 | kind: Application 5 | metadata: 6 | name: kyverno-app 7 | namespace: argocd 8 | annotations: 9 | argocd.argoproj.io/sync-wave: "1" 10 | spec: 11 | project: default 12 | destination: 13 | server: "https://kubernetes.default.svc" 14 | namespace: kyverno 15 | sources: 16 | # official kyverno helm repo 17 | - repoURL: 'https://kyverno.github.io/kyverno/' 18 | chart: kyverno 19 | targetRevision: 3.5.2 20 | helm: 21 | valueFiles: 22 | - $values/kyverno/values/values.yaml 23 | # our values.yaml file locally 24 | - repoURL: 'https://github.com/small-hack/argocd-apps.git' 25 | targetRevision: main 26 | ref: values 27 | syncPolicy: 28 | syncOptions: 29 | - ApplyOutOfSyncOnly=true 30 | automated: 31 | prune: true 32 | selfHeal: true 33 | -------------------------------------------------------------------------------- /valkey/external_secrets/README.md: -------------------------------------------------------------------------------- 1 | # external-secrets-valkey-chart 2 | 3 | ![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) 4 | 5 | A Helm chart for External Secrets using the Bitwarden ESO provider on Kubernetes 6 | 7 | ## Values 8 | 9 | | Key | Type | Default | Description | 10 | |-----|------|---------|-------------| 11 | | provider | string | `""` | if this is not set to "bitwarden", we will not actually deploy any templates we may support other secret providers in the future | 12 | | valkeyBitwardenID | string | `""` | nextcloud redis (actually for valkey) Credentials | 13 | 14 | ---------------------------------------------- 15 | Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) 16 | -------------------------------------------------------------------------------- /writefreely/external_secrets/templates/mysql_credentials.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.provider "bitwarden" }} 2 | --- 3 | # secret for a writefreely mysql DB 4 | apiVersion: external-secrets.io/v1 5 | kind: ExternalSecret 6 | metadata: 7 | name: writefreely-mysql-credentials 8 | spec: 9 | target: 10 | # Name for the secret to be created on the cluster 11 | name: writefreely-mysql-credentials 12 | deletionPolicy: Delete 13 | template: 14 | type: Opaque 15 | data: 16 | mysql-password: |- 17 | {{ `{{ .password }}` }} 18 | mysql-replication-password: |- 19 | {{ `{{ .password }}` }} 20 | mysql-root-password: |- 21 | {{ `{{ .password }}` }} 22 | data: 23 | - secretKey: password 24 | sourceRef: 25 | storeRef: 26 | name: bitwarden-login 27 | kind: ClusterSecretStore 28 | remoteRef: 29 | key: {{ .Values.mysqlBitwardenID }} 30 | property: password 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /demo/infisical/infisical_secrets/secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # example secret 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: infisical-backend-secrets 7 | namespace: infisical 8 | type: Opaque 9 | data: 10 | # these can be generated with: openssl rand -hex 16 | base64 11 | ENCRYPTION_KEY: OTBjNGI5YjY0MzBlOTQ2NTMyZTFhNmMxOWYwMWFkODM= 12 | JWT_AUTH_SECRET: YzljNWM4ZWFmZTMyMGM3YTdjYzQyOWNmMTY5M2ZhMjE= 13 | JWT_MFA_SECRET: YzEwZjMxMzUzYzE2YTI1ZmFlNDc4MGFjNGVmZGYxNTY= 14 | JWT_PROVIDER_AUTH_SECRET: OTNmYWNjZDg3ZWRkMzFkM2M3NTU2ZTMxMjYxNjdjNzk= 15 | JWT_REFRESH_SECRET: MDNlNDQ2ZWJlZGE5YjRkMmY2ZTMwNzJiYWM5MjQ2NDQ= 16 | JWT_SERVICE_SECRET: YTNkZWJkN2IzOWE2N2MyNTM1MTA2N2U0NmU2ZTNmYWQ= 17 | JWT_SIGNUP_SECRET: Mzg5NzExMzYyM2UxNGZhNGM0MjFlNTgzODM0MTEwODk= 18 | # these are not filled in but should be 19 | MONGO_URL: 20 | SMTP_FROM_ADDRESS: 21 | SMTP_FROM_NAME: 22 | SMTP_HOST: 23 | SMTP_PASSWORD: 24 | SMTP_PORT: NTg3 25 | SMTP_SECURE: dHJ1ZQ== 26 | SMTP_USERNAME: 27 | -------------------------------------------------------------------------------- /demo/keycloak/external_secrets/keycloak-admin-credentials.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # secret for a nextcloud postgres DB 3 | apiVersion: external-secrets.io/v1 4 | kind: ExternalSecret 5 | metadata: 6 | name: keycloak-admin-credentials 7 | namespace: keycloak 8 | spec: 9 | target: 10 | # Name of the kubernetes secret 11 | name: keycloak-admin-credentials 12 | deletionPolicy: Delete 13 | template: 14 | type: Opaque 15 | data: 16 | # Key-names within the keubernetes secret 17 | password: |- 18 | {{ .password }} 19 | 20 | data: 21 | # `secretKey` relates to the key name defined within the keubernetes secret 22 | - secretKey: password 23 | sourceRef: 24 | storeRef: 25 | name: bitwarden-login 26 | kind: ClusterSecretStore 27 | remoteRef: 28 | # key-id of the bitwarden secret 29 | key: keycloak-admin-credentials 30 | # property within the secret we want 31 | property: password 32 | -------------------------------------------------------------------------------- /demo/artifactory/artifactory_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: artifactory 5 | spec: 6 | destination: 7 | name: '' 8 | namespace: artifactory 9 | server: 'https://kubernetes.default.svc' 10 | source: 11 | repoURL: 'https://charts.jfrog.io' 12 | targetRevision: 107.117.19 13 | chart: artifactory-oss 14 | helm: 15 | parameters: 16 | - name: artifactory.ingress.enabled 17 | value: 'true' 18 | - name: artifactory.ingress.tls 19 | value: 'true' 20 | - name: artifactory.databaseUpgradeReady 21 | value: 'yes' 22 | - name: artifactory.nginx.enabled 23 | value: 'false' 24 | - name: artifactory.nginx.tlsSecretName 25 | value: artifactory-tls 26 | sources: [] 27 | project: default 28 | syncPolicy: 29 | automated: 30 | prune: false 31 | selfHeal: false 32 | syncOptions: 33 | - CreateNamespace=true 34 | -------------------------------------------------------------------------------- /demo/openbao/openbao_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: openbao-app 6 | namespace: argocd 7 | spec: 8 | project: openbao 9 | destination: 10 | server: https://kubernetes.default.svc 11 | namespace: openbao 12 | 13 | syncPolicy: 14 | syncOptions: 15 | - CreateNamespace=true 16 | - ApplyOutOfSyncOnly=true 17 | automated: 18 | prune: true 19 | selfHeal: true 20 | 21 | ignoreDifferences: 22 | - group: apps 23 | kind: MutatingWebhookConfiguration 24 | name: openbao-app-agent-injector-cfg 25 | jqPathExpressions: 26 | - '.webhooks[]?.clientConfig.caBundle' 27 | source: 28 | # for testing 29 | # repoURL: 'https://github.com/jessebot/openbao-helm' 30 | # repoURL: 'https://github.com/openbao/openbao-helm' 31 | # path: charts/openbao/ 32 | repoURL: 'https://openbao.github.io/openbao-helm' 33 | chart: openbao 34 | targetRevision: 0.18.4 35 | -------------------------------------------------------------------------------- /valkey_cluster/external_secrets/README.md: -------------------------------------------------------------------------------- 1 | # external-secrets-valkey-chart 2 | 3 | ![Version: 0.1.0](https://img.shields.io/badge/Version-0.1.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.0.0](https://img.shields.io/badge/AppVersion-1.0.0-informational?style=flat-square) 4 | 5 | A Helm chart for External Secrets using the Bitwarden ESO provider on Kubernetes 6 | 7 | ## Values 8 | 9 | | Key | Type | Default | Description | 10 | |-----|------|---------|-------------| 11 | | provider | string | `""` | if this is not set to "bitwarden", we will not actually deploy any templates we may support other secret providers in the future | 12 | | valkeyBitwardenID | string | `""` | nextcloud redis (actually for valkey) Credentials | 13 | 14 | ---------------------------------------------- 15 | Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) 16 | -------------------------------------------------------------------------------- /cert-manager/cert-manager_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: cert-manager-helm-chart 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: cert-manager 11 | source: 12 | repoURL: 'https://charts.jetstack.io' 13 | chart: cert-manager 14 | targetRevision: v1.19.1 15 | helm: 16 | releaseName: cert-manager 17 | valuesObject: 18 | config: 19 | featureGates: 20 | # Disable the use of Exact PathType in Ingress resources, to work around a bug in ingress-nginx 21 | # https://github.com/kubernetes/ingress-nginx/issues/11176 22 | ACMEHTTP01IngressPathTypeExact: false 23 | destination: 24 | server: "https://kubernetes.default.svc" 25 | namespace: cert-manager 26 | syncPolicy: 27 | syncOptions: 28 | - ApplyOutOfSyncOnly=true 29 | automated: 30 | prune: true 31 | selfHeal: true 32 | -------------------------------------------------------------------------------- /writefreely/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret providers in the future 3 | provider: "" 4 | 5 | # -- if set to seaweedfs we deploy a policy secret. can also be minio 6 | s3_provider: "seaweedfs" 7 | 8 | # -- existing kubernetes secret with s3 admin credentials 9 | s3AdminCredentialsBitwardenID: "" 10 | 11 | # -- existing kubernetes secret with s3 writefreely credentials 12 | s3writefreelyCredentialsBitwardenID: "" 13 | 14 | # -- existing kubernetes secret with s3 credentials for the remote backups 15 | s3BackupCredentialsBitwardenID: "" 16 | 17 | # other writefreely specific secrets 18 | 19 | # -- writefreely admin Credentials 20 | adminCredentialsBitwardenID: "" 21 | 22 | # -- writefreely smtp Credentials 23 | smtpCredentialsBitwardenID: "" 24 | 25 | # -- writefreely mysql Credentials 26 | mysqlBitwardenID: "" 27 | 28 | # -- writefreely OIDC Credentials 29 | oidcCredentialsBitwardenID: "" 30 | -------------------------------------------------------------------------------- /ingress-nginx/modsecurity_configmap/modsecurity_exception_files/jellyfin.conf: -------------------------------------------------------------------------------- 1 | # ------------------------------------------------------------------------- 2 | # Allow uploading images to collections 3 | # Rule ID #920420 Request content type is not allowed by policy 4 | # ------------------------------------------------------------------------- 5 | SecRule REQUEST_URI "@rx ^\/(?i)items\/.*\/images\/.*" \ 6 | "id:130000,\ 7 | phase:1,\ 8 | ver:'jellyfin-rule-exclusions-plugin/1.0.0',\ 9 | pass,\ 10 | t:none,\ 11 | nolog,\ 12 | setvar:'tx.allowed_request_content_type=|image/jpeg|image/apng|image/gif|image/tiff|image/webp|image/png|'" 13 | 14 | # allow removing something from favorites 15 | # allow rule ID 911100 to include DELETE 16 | SecRule REQUEST_URI "@rx ^/Users/.*/FavoriteItems/.*$" \ 17 | "id:130001,\ 18 | phase:1,\ 19 | ver:'jellyfin-rule-exclusions-plugin/1.0.0',\ 20 | pass,\ 21 | nolog,\ 22 | t:none,\ 23 | setvar:'tx.allowed_methods=GET HEAD POST OPTIONS DELETE'" 24 | -------------------------------------------------------------------------------- /grafana_stack/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # -- if this is not set to "bitwarden", we will not actually deploy any templates 2 | # we may support other secret providers in the future 3 | provider: "" 4 | 5 | # -- if set to seaweedfs we deploy a policy secret. can also be minio 6 | s3_provider: "seaweedfs" 7 | 8 | # -- existing kubernetes secret with s3 admin credentials 9 | s3AdminCredentialsBitwardenID: "" 10 | 11 | # -- existing kubernetes secret with s3 loki credentials 12 | s3LokiCredentialsBitwardenID: "" 13 | 14 | # -- existing kubernetes secret with s3 mimir credentials 15 | s3MimirCredentialsBitwardenID: "" 16 | 17 | # -- optional existing kubernetes secret with s3 tempo credentials 18 | s3TempoCredentialsBitwardenID: "" 19 | 20 | # -- existing kubernetes secret with s3 credentials for the remote backups 21 | s3BackupCredentialsBitwardenID: "" 22 | 23 | # -- grafana OIDC Credentials 24 | oidcCredentialsBitwardenID: "" 25 | 26 | # -- valkey Credentials for loki 27 | lokiValkeyCredentialsBitwardenID: "" 28 | -------------------------------------------------------------------------------- /cert-manager/app_of_apps/cert-manager_argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: cert-manager-helm-chart 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "2" 9 | spec: 10 | project: cert-manager 11 | source: 12 | repoURL: 'https://charts.jetstack.io' 13 | chart: cert-manager 14 | targetRevision: v1.19.1 15 | helm: 16 | releaseName: cert-manager 17 | valuesObject: 18 | config: 19 | featureGates: 20 | # Disable the use of Exact PathType in Ingress resources, to work around a bug in ingress-nginx 21 | # https://github.com/kubernetes/ingress-nginx/issues/11176 22 | ACMEHTTP01IngressPathTypeExact: false 23 | destination: 24 | server: "https://kubernetes.default.svc" 25 | namespace: cert-manager 26 | syncPolicy: 27 | syncOptions: 28 | - ApplyOutOfSyncOnly=true 29 | automated: 30 | prune: true 31 | selfHeal: true 32 | -------------------------------------------------------------------------------- /mastodon/small-hack/external_secrets/values.yaml: -------------------------------------------------------------------------------- 1 | # Use external secrets. Set to bitwarden to use this chart. other providers may be supported in the future 2 | provider: "" 3 | 4 | adminCredentialsBitwardenID: "" 5 | pgsqlCredentialsBitwardenID: "" 6 | valkeyCredentialsBitwardenID: "" 7 | mastodonSecretsBitwardenID: "" 8 | smtpCredentialsBitwardenID: "" 9 | 10 | # -- if set to seaweedfs we deploy a policy secret. can also be minio 11 | s3_provider: "seaweedfs" 12 | 13 | # -- existing kubernetes secret with s3 admin credentials 14 | s3AdminCredentialsBitwardenID: "" 15 | # -- existing kubernetes secret with s3 mastodon credentials 16 | s3MastodonCredentialsBitwardenID: "" 17 | # -- existing kubernetes secret with s3 postgres credentials 18 | s3PostgresCredentialsBitwardenID: "" 19 | 20 | # -- existing kubernetes secret with s3 credentials for the remote backups 21 | s3BackupCredentialsBitwardenID: "" 22 | 23 | # -- existing kubernetes secret with libretranslate API secret key 24 | libretranslateApiKeybitwardenID: "" 25 | -------------------------------------------------------------------------------- /ghost/storage/templates/prebackup_pod_ghost.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k8up.io/v1 2 | kind: PreBackupPod 3 | metadata: 4 | name: ghost-mysqldump 5 | spec: 6 | backupCommand: sh -c 'mysqldump -u$USER -p$PW -h $DB_HOST --all-databases' 7 | pod: 8 | spec: 9 | containers: 10 | - name: ghost-mysqldump 11 | image: mariadb:10.4 12 | command: 13 | - 'sleep' 14 | - 'infinity' 15 | imagePullPolicy: Always 16 | env: 17 | - name: USER 18 | valueFrom: 19 | secretKeyRef: 20 | key: username 21 | name: ghost-mysql-credentials 22 | - name: PW 23 | valueFrom: 24 | secretKeyRef: 25 | key: mysql-password 26 | name: ghost-mysql-credentials 27 | - name: DB_HOST 28 | valueFrom: 29 | secretKeyRef: 30 | key: host 31 | name: ghost-mysql-credentials 32 | -------------------------------------------------------------------------------- /external-secrets-operator/app_of_apps/external-secrets-argocd_app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: external-secrets-operator-helm 6 | namespace: argocd 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | project: external-secrets-operator 11 | destination: 12 | server: "https://kubernetes.default.svc" 13 | namespace: external-secrets 14 | syncPolicy: 15 | syncOptions: 16 | - CreateNamespace=true 17 | automated: 18 | prune: true 19 | selfHeal: true 20 | source: 21 | repoURL: 'https://charts.external-secrets.io' 22 | targetRevision: 0.20.4 23 | chart: external-secrets 24 | helm: 25 | releaseName: external-secrets 26 | # -- https://github.com/external-secrets/external-secrets/tree/main/deploy/charts/external-secrets 27 | valuesObject: 28 | fullnameOverride: external-secrets 29 | # If set, install and upgrade CRDs through helm chart. 30 | installCRDs: false 31 | --------------------------------------------------------------------------------