├── .github
├── PULL_REQUEST_TEMPLATE.md
├── ct.yaml
├── dependabot.yml
├── kubeval.sh
└── workflows
│ ├── helm-lint.yaml
│ ├── kubeval.yaml
│ └── release.yaml
├── .gitignore
├── .helmdocsignore
├── .idea
├── bashsupport_project.xml
├── codeStyles
│ └── codeStyleConfig.xml
├── copilot.data.migration.agent.xml
├── encodings.xml
├── inspectionProfiles
│ └── Project_Default.xml
├── jsLibraryMappings.xml
├── jsonSchemas.xml
├── k8s-infrastructure.iml
├── misc.xml
├── modules.xml
├── php.xml
└── vcs.xml
├── .nvmrc
├── .pre-commit-config.yaml
├── LICENSE.md
├── README.md
├── _config.yml
├── apps
├── ActualBudget.yaml
├── AudioBookShelf.yaml
├── Bitwarden.yaml
├── Flaresolverr.yaml
├── Forecastle.yaml
├── Harbor.yaml
├── Headlamp.yaml
├── HomeAssistant.yaml
├── Jellyfin.yaml
├── Lidarr.yaml
├── MQTTBroker.yaml
├── Mealie.yaml
├── MosquitoMQTTBroker.yaml
├── Ollama.yaml
├── OpenAIWhisper.yaml
├── OpenWebUI.yaml
├── Overseerr.yaml
├── Paperless.yaml
├── Photoprism.yaml
├── Plex.yaml
├── Prowlarr.yaml
├── Qdrant.yaml
├── RDiffWeb.yaml
├── Radarr.yaml
├── Resilio.yaml
├── SFTPGo.yaml
├── Sharry.yaml
├── Sonarr.yaml
├── Tdarr.yaml
├── Transmission.yaml
├── ZWaveJS.yaml
├── Zigbee2mqtt.yaml
├── generic
│ ├── base
│ │ ├── deployment.yaml
│ │ ├── ingress.yaml
│ │ ├── kustomization.yaml
│ │ └── service.yaml
│ └── overlays
│ │ ├── audiobookshelf
│ │ ├── kustomization.yaml
│ │ └── patches
│ │ │ ├── deployment.yaml
│ │ │ ├── ingress.yaml
│ │ │ └── service.yaml
│ │ ├── jellyfin
│ │ ├── kustomization.yaml
│ │ └── patches
│ │ │ ├── deployment.yaml
│ │ │ ├── ingress.yaml
│ │ │ └── service.yaml
│ │ ├── lidarr
│ │ ├── kustomization.yaml
│ │ └── patches
│ │ │ ├── deployment.yaml
│ │ │ ├── ingress.yaml
│ │ │ └── service.yaml
│ │ ├── qdrant
│ │ ├── kustomization.yaml
│ │ └── patches
│ │ │ ├── deployment.yaml
│ │ │ ├── ingress.yaml
│ │ │ └── service.yaml
│ │ ├── rdiffweb
│ │ ├── kustomization.yaml
│ │ └── patches
│ │ │ ├── deployment.yaml
│ │ │ ├── ingress.yaml
│ │ │ └── service.yaml
│ │ └── transmission
│ │ ├── kustomization.yaml
│ │ └── patches
│ │ ├── deployment.yaml
│ │ ├── ingress.yaml
│ │ └── service.yaml
├── mealie
│ └── base
│ │ ├── deployment.yaml
│ │ ├── ingress.yaml
│ │ ├── kustomization.yaml
│ │ └── service.yaml
├── open-webui
│ ├── base
│ │ ├── kustomization.yaml
│ │ ├── ollama-service.yaml
│ │ ├── ollama-statefulset.yaml
│ │ ├── webui-deployment.yaml
│ │ ├── webui-ingress.yaml
│ │ └── webui-service.yaml
│ ├── gandazgul
│ │ ├── kustomization.yaml
│ │ └── webui-deployment-patch.yaml
│ └── rafag
│ │ ├── kustomization.yaml
│ │ └── webui-deployment-patch.yaml
└── tdarr
│ └── base
│ ├── deployment.yaml
│ ├── ingress.yaml
│ ├── kustomization.yaml
│ └── service.yaml
├── charts
├── hostpath-provisioner
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── README.md
│ ├── templates
│ │ ├── DaemonSet.yaml
│ │ ├── NOTES.txt
│ │ ├── StorageClass.yaml
│ │ ├── _helpers.tpl
│ │ └── rbac.yaml
│ └── values.yaml
└── mosca
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── README.md
│ ├── templates
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ ├── deployment.yaml
│ ├── ingress.yaml
│ └── service.yaml
│ └── values.yaml
├── clusters
├── gandazgul
│ ├── ClusterKustomization.yaml
│ ├── apps
│ │ ├── RsyncCronJobs.yaml
│ │ ├── kustomization.yaml
│ │ ├── secrets
│ │ │ ├── kustomization.yaml
│ │ │ ├── paperless-values.yaml
│ │ │ ├── plex-values.yaml
│ │ │ ├── radarr-values.yaml
│ │ │ ├── resilio-values.yaml
│ │ │ └── sonarr-values.yaml
│ │ └── values
│ │ │ ├── paperless-values.yaml
│ │ │ ├── plex-values.yaml
│ │ │ ├── radarr-values.yaml
│ │ │ ├── resilio-values.yaml
│ │ │ └── sonarr-values.yaml
│ └── sealed-secret
│ │ └── SealedSecret.yaml
├── rafag
│ ├── ClusterKustomization.yaml
│ ├── apps
│ │ ├── kustomization.yaml
│ │ ├── secrets
│ │ │ ├── kustomization.yaml
│ │ │ ├── paperless-values.yaml
│ │ │ ├── plex-values.yaml
│ │ │ ├── radarr-values.yaml
│ │ │ ├── resilio-values.yaml
│ │ │ └── sonarr-values.yaml
│ │ └── values
│ │ │ ├── paperless-values.yaml
│ │ │ ├── plex-values.yaml
│ │ │ ├── radarr-values.yaml
│ │ │ ├── resilio-values.yaml
│ │ │ └── sonarr-values.yaml
│ └── sealed-secret
│ │ └── SealedSecret.yaml
└── renepor
│ ├── ClusterKustomization.yaml
│ ├── apps
│ ├── kustomization.yaml
│ ├── secrets
│ │ ├── kustomization.yaml
│ │ ├── plex-values.yaml
│ │ └── resilio-values.yaml
│ └── values
│ │ ├── plex-values.yaml
│ │ └── resilio-values.yaml
│ └── sealed-secret
│ └── SealedSecret.yaml
├── containers
├── container-build.js
├── container-run.js
├── ddns-cloudflare
│ ├── Containerfile
│ ├── app.js
│ ├── package.json
│ └── yarn.lock
├── rdiff-backup
│ └── Dockerfile
└── transmission-pia-port-forward
│ ├── Dockerfile
│ └── port-forwarding.sh
├── index.md
├── index.yaml
├── infrastructure
├── cert-manager
│ ├── Certificate.yaml
│ ├── controller
│ │ ├── CertManager.yaml
│ │ └── Reflector.yaml
│ └── issuers
│ │ ├── LetsEncryptProdCloudflareIssuer.yaml
│ │ ├── LetsEncryptProdDNSIssuer.yaml
│ │ ├── LetsEncryptStgCloudflareIssuer.yaml
│ │ └── LetsEncryptStgDNSIssuer.yaml
├── cronjob
│ ├── cronjob.yaml
│ └── kustomization.yaml
├── kube-system
│ ├── BackupCronJobs.yaml
│ ├── CertManagerKustomization.yaml
│ ├── CloudflareDDNS.yaml
│ ├── HostpathProvisioner.yaml
│ ├── IngressNginx.yaml
│ ├── Monitoring.yaml
│ ├── SealedSecretsController.yaml
│ ├── Storage.yaml
│ └── repos
│ │ ├── GeekCookbook.yaml
│ │ ├── HelmGandazgul.yaml
│ │ ├── HelmK8sAtHome.yaml
│ │ ├── HelmKubernetes.yaml
│ │ ├── HelmNginx.yaml
│ │ ├── HelmSagikazarmark.yaml
│ │ ├── HelmStable.yaml
│ │ └── K8sHomeLab.yaml
├── monitoring
│ ├── alertmanager
│ │ ├── alertmanager-alertmanager.yaml
│ │ ├── alertmanager-networkPolicy.yaml
│ │ ├── alertmanager-podDisruptionBudget.yaml
│ │ ├── alertmanager-prometheusRule.yaml
│ │ ├── alertmanager-secret.yaml
│ │ ├── alertmanager-service.yaml
│ │ ├── alertmanager-serviceAccount.yaml
│ │ ├── alertmanager-serviceMonitor.yaml
│ │ └── ingress.yml
│ ├── grafana
│ │ ├── grafana-config.yaml
│ │ ├── grafana-deployment.yaml
│ │ ├── grafana-ingress.yml
│ │ ├── grafana-service.yaml
│ │ ├── grafana-serviceAccount.yaml
│ │ └── grafana-serviceMonitor.yaml
│ ├── kube-prometheus
│ │ ├── blackboxExporter-clusterRole.yaml
│ │ ├── blackboxExporter-clusterRoleBinding.yaml
│ │ ├── blackboxExporter-configuration.yaml
│ │ ├── blackboxExporter-deployment.yaml
│ │ ├── blackboxExporter-networkPolicy.yaml
│ │ ├── blackboxExporter-service.yaml
│ │ ├── blackboxExporter-serviceAccount.yaml
│ │ ├── blackboxExporter-serviceMonitor.yaml
│ │ ├── grafana-dashboardDatasources.yaml
│ │ ├── grafana-dashboardDefinitions.yaml
│ │ ├── grafana-dashboardSources.yaml
│ │ ├── grafana-networkPolicy.yaml
│ │ ├── grafana-prometheusRule.yaml
│ │ ├── kubePrometheus-prometheusRule.yaml
│ │ ├── kubeStateMetrics-clusterRole.yaml
│ │ ├── kubeStateMetrics-clusterRoleBinding.yaml
│ │ ├── kubeStateMetrics-deployment.yaml
│ │ ├── kubeStateMetrics-networkPolicy.yaml
│ │ ├── kubeStateMetrics-prometheusRule.yaml
│ │ ├── kubeStateMetrics-service.yaml
│ │ ├── kubeStateMetrics-serviceAccount.yaml
│ │ ├── kubeStateMetrics-serviceMonitor.yaml
│ │ ├── kubernetesControlPlane-prometheusRule.yaml
│ │ ├── kubernetesControlPlane-serviceMonitorApiserver.yaml
│ │ ├── kubernetesControlPlane-serviceMonitorCoreDNS.yaml
│ │ ├── kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml
│ │ ├── kubernetesControlPlane-serviceMonitorKubeScheduler.yaml
│ │ ├── kubernetesControlPlane-serviceMonitorKubelet.yaml
│ │ ├── nodeExporter-clusterRole.yaml
│ │ ├── nodeExporter-clusterRoleBinding.yaml
│ │ ├── nodeExporter-daemonset.yaml
│ │ ├── nodeExporter-networkPolicy.yaml
│ │ ├── nodeExporter-prometheusRule.yaml
│ │ ├── nodeExporter-service.yaml
│ │ ├── nodeExporter-serviceAccount.yaml
│ │ ├── nodeExporter-serviceMonitor.yaml
│ │ ├── prometheusAdapter-apiService.yaml
│ │ ├── prometheusAdapter-clusterRole.yaml
│ │ ├── prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml
│ │ ├── prometheusAdapter-clusterRoleBinding.yaml
│ │ ├── prometheusAdapter-clusterRoleBindingDelegator.yaml
│ │ ├── prometheusAdapter-clusterRoleServerResources.yaml
│ │ ├── prometheusAdapter-configMap.yaml
│ │ ├── prometheusAdapter-deployment.yaml
│ │ ├── prometheusAdapter-networkPolicy.yaml
│ │ ├── prometheusAdapter-podDisruptionBudget.yaml
│ │ ├── prometheusAdapter-roleBindingAuthReader.yaml
│ │ ├── prometheusAdapter-service.yaml
│ │ ├── prometheusAdapter-serviceAccount.yaml
│ │ ├── prometheusAdapter-serviceMonitor.yaml
│ │ ├── prometheusOperator-clusterRole.yaml
│ │ ├── prometheusOperator-clusterRoleBinding.yaml
│ │ ├── prometheusOperator-deployment.yaml
│ │ ├── prometheusOperator-networkPolicy.yaml
│ │ ├── prometheusOperator-prometheusRule.yaml
│ │ ├── prometheusOperator-service.yaml
│ │ ├── prometheusOperator-serviceAccount.yaml
│ │ └── prometheusOperator-serviceMonitor.yaml
│ ├── prometheus
│ │ ├── ingress.yml
│ │ ├── prometheus-clusterRole.yaml
│ │ ├── prometheus-clusterRoleBinding.yaml
│ │ ├── prometheus-networkPolicy.yaml
│ │ ├── prometheus-podDisruptionBudget.yaml
│ │ ├── prometheus-prometheus.yaml
│ │ ├── prometheus-prometheusRule.yaml
│ │ ├── prometheus-roleBindingConfig.yaml
│ │ ├── prometheus-roleBindingSpecificNamespaces.yaml
│ │ ├── prometheus-roleConfig.yaml
│ │ ├── prometheus-roleSpecificNamespaces.yaml
│ │ ├── prometheus-service.yaml
│ │ ├── prometheus-serviceAccount.yaml
│ │ └── prometheus-serviceMonitor.yaml
│ └── setup
│ │ ├── 0alertmanagerConfigCustomResourceDefinition.yaml
│ │ ├── 0alertmanagerCustomResourceDefinition.yaml
│ │ ├── 0podmonitorCustomResourceDefinition.yaml
│ │ ├── 0probeCustomResourceDefinition.yaml
│ │ ├── 0prometheusCustomResourceDefinition.yaml
│ │ ├── 0prometheusagentCustomResourceDefinition.yaml
│ │ ├── 0prometheusruleCustomResourceDefinition.yaml
│ │ ├── 0scrapeconfigCustomResourceDefinition.yaml
│ │ ├── 0servicemonitorCustomResourceDefinition.yaml
│ │ └── 0thanosrulerCustomResourceDefinition.yaml
├── setup
│ ├── GitRepoSync.yaml.templ
│ ├── KubePrometheusStack.yaml
│ ├── KubeSystem.yaml
│ ├── SealedSecretsKustomization.yaml.templ
│ ├── change-branch.sh
│ ├── configure-cluster.sh
│ ├── install-flux.sh
│ ├── requirements.sh
│ └── secrets.env.example
└── storage
│ ├── hdd-class
│ ├── kustomization.yaml
│ └── sc.yaml
│ ├── hdd
│ └── kustomization.yaml
│ ├── pv
│ ├── kustomization.yaml
│ └── pv.yaml
│ └── pvc
│ ├── kustomization.yaml
│ └── pvc.yaml
├── install-k8s
├── 1-fedoraPostInstall.sh
├── 2-configK8SControlPlane.sh
├── 2-configK8SNode.sh
├── 3-installKVM.sh
├── configNode.sh
├── jail.local
└── kubeadm.yaml
├── package.json
└── yarn.lock
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/gandazgul/k8s-infrastructure/8707698a1e0b03d95c941d4391eb49c6c9df4761/.github/PULL_REQUEST_TEMPLATE.md
--------------------------------------------------------------------------------
/.github/ct.yaml:
--------------------------------------------------------------------------------
1 | remote: origin
2 | target-branch: master
3 | helm-extra-args: --timeout 600s
4 | chart-dirs:
5 | - charts
6 | excluded-charts:
7 | - chart-template
8 | chart-repos:
9 | - bitnami=https://charts.bitnami.com/bitnami
10 | - k8s-at-home=https://k8s-at-home.com/charts
11 | - jetstack=https://charts.jetstack.io
12 | - gandazgul=https://gandazgul.github.io/k8s-infrastructure/
13 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | # To get started with Dependabot version updates, you'll need to specify which
2 | # package ecosystems to update and where the package manifests are located.
3 | # Please see the documentation for all configuration options:
4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
5 |
6 | version: 2
7 | updates:
8 | - package-ecosystem: "npm" # See documentation for possible values
9 | directory: "/" # Location of package manifests
10 | schedule:
11 | interval: "weekly"
12 |
--------------------------------------------------------------------------------
/.github/kubeval.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #set -euo pipefail
3 |
4 | CHART_DIRS="$(git diff --find-renames --name-only "$(git rev-parse --abbrev-ref HEAD)" remotes/origin/master -- charts | grep '[cC]hart.yaml' | sed -e 's#/[Cc]hart.yaml##g')"
5 | KUBEVAL_VERSION="0.15.0"
6 | SCHEMA_LOCATION="https://raw.githubusercontent.com/instrumenta/kubernetes-json-schema/master/"
7 |
8 | # install kubeval
9 | curl --silent --show-error --fail --location --output /tmp/kubeval.tar.gz https://github.com/instrumenta/kubeval/releases/download/"${KUBEVAL_VERSION}"/kubeval-linux-amd64.tar.gz
10 | tar -xf /tmp/kubeval.tar.gz kubeval
11 |
12 | # validate charts
13 | for CHART_DIR in ${CHART_DIRS}; do
14 | helm template "${CHART_DIR}" | ./kubeval --strict --ignore-missing-schemas --kubernetes-version "${KUBERNETES_VERSION#v}" --schema-location "${SCHEMA_LOCATION}"
15 | done
16 |
--------------------------------------------------------------------------------
/.github/workflows/helm-lint.yaml:
--------------------------------------------------------------------------------
1 | name: Lint and Test Charts
2 |
3 | on:
4 | pull_request:
5 | workflow_dispatch:
6 |
7 | jobs:
8 | lint-test:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - name: Checkout
12 | uses: actions/checkout@v2
13 | with:
14 | fetch-depth: 0
15 |
16 | - name: Install Helm
17 | uses: azure/setup-helm@v1
18 | with:
19 | version: v3.4.0
20 |
21 | - uses: actions/setup-python@v2
22 | with:
23 | python-version: 3.13.1
24 |
25 | - name: Set up chart-testing
26 | uses: helm/chart-testing-action@v2.0.1
27 |
28 | - name: Run chart-testing (list-changed)
29 | id: list-changed
30 | run: |
31 | returnCode=0
32 | changed=$(ct list-changed --config .github/ct.yaml) || returnCode=$?
33 | if [[ $returnCode = 0 ]]; then
34 | echo "::set-output name=changed::true"
35 | fi
36 |
37 | - name: Run chart-testing (lint)
38 | id: lint
39 | run: ct lint --config .github/ct.yaml
40 | if: steps.list-changed.outputs.changed == 'true'
41 |
42 | - name: Create kind cluster
43 | uses: helm/kind-action@v1.1.0
44 | if: steps.list-changed.outputs.changed == 'true'
45 |
46 | - name: Run chart-testing (install)
47 | run: ct install --config .github/ct.yaml
48 | if: steps.list-changed.outputs.changed == 'true'
49 |
--------------------------------------------------------------------------------
/.github/workflows/kubeval.yaml:
--------------------------------------------------------------------------------
1 | name: Verify charts against k8s API Schema
2 |
3 | on:
4 | workflow_run:
5 | workflows: [ "Lint and Test Charts" ]
6 | types:
7 | - completed
8 |
9 | jobs:
10 | kubeval-chart:
11 | if: ${{ github.event.workflow_run.conclusion == 'success' }}
12 | runs-on: ubuntu-latest
13 | # needs:
14 | # - lint-test
15 | strategy:
16 | matrix:
17 | k8s:
18 | - v1.28.8
19 | steps:
20 | - name: Checkout
21 | uses: actions/checkout@v1
22 | - name: Run kubeval
23 | env:
24 | KUBERNETES_VERSION: ${{ matrix.k8s }}
25 | run: .github/kubeval.sh
26 |
--------------------------------------------------------------------------------
/.github/workflows/release.yaml:
--------------------------------------------------------------------------------
1 | name: Release Charts
2 |
3 | on:
4 | workflow_dispatch:
5 | push:
6 | branches:
7 | - master
8 | paths:
9 | - "charts/**"
10 |
11 | jobs:
12 | pre-release:
13 | runs-on: ubuntu-latest
14 | timeout-minutes: 5
15 | steps:
16 | - name: Block concurrent releases
17 | uses: softprops/turnstyle@v1
18 | with:
19 | continue-after-seconds: 180
20 | env:
21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
22 |
23 | release:
24 | needs: pre-release
25 | runs-on: ubuntu-latest
26 | steps:
27 | - name: Checkout
28 | uses: actions/checkout@v2
29 | with:
30 | fetch-depth: 0
31 |
32 | - name: Configure Git
33 | run: |
34 | git config user.name "$GITHUB_ACTOR"
35 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
36 |
37 | - name: Install Helm
38 | uses: azure/setup-helm@v1
39 | with:
40 | version: v3.4.0
41 |
42 | - name: Run chart-releaser
43 | uses: helm/chart-releaser-action@v1.2.0
44 | with:
45 | charts_repo_url: https://gandazgul.github.io/k8s-infrastructure/
46 | env:
47 | CR_TOKEN: "${{ secrets.CR_TOKEN }}"
48 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Injellij
2 | .idea/workspace.xml
3 | .idea/aws.xml
4 | .idea/copilot/
5 | .idea/copilot.data.migration*
6 |
7 | # docker
8 | containers/sshd/users.conf
9 | containers/size-checker/ssmtp.conf
10 |
11 | # chart deps
12 | charts/gogs/charts/
13 |
14 | # secrets and private files
15 | secrets.sh
16 | secrets.env
17 |
18 | # node
19 | node_modules/
20 |
21 | # yarn
22 | yarn-error.log
23 |
24 | # Mac
25 | .DS_Store
26 |
--------------------------------------------------------------------------------
/.helmdocsignore:
--------------------------------------------------------------------------------
1 | charts/chart-template/
2 |
--------------------------------------------------------------------------------
/.idea/bashsupport_project.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
10 |
11 |
--------------------------------------------------------------------------------
/.idea/codeStyles/codeStyleConfig.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
--------------------------------------------------------------------------------
/.idea/copilot.data.migration.agent.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/jsLibraryMappings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/jsonSchemas.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/.idea/k8s-infrastructure.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/php.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/.nvmrc:
--------------------------------------------------------------------------------
1 | v22
2 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/norwoodj/helm-docs
3 | rev: v1.2.0
4 | hooks:
5 | - id: helm-docs
6 | args:
7 | # Make the tool search for charts only under the `example-charts` directory
8 | - --chart-search-root=charts
9 |
10 | # A base filename makes it relative to each chart directory found
11 | - --template-files=README.md.gotmpl
12 | - repo: https://github.com/pre-commit/pre-commit-hooks
13 | rev: v3.2.0
14 | hooks:
15 | - id: trailing-whitespace
16 | - id: end-of-file-fixer
17 | - id: check-added-large-files
18 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | # MIT License
2 |
3 | Copyright (c) 2019 Carlos Ravelo
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # K8s Infrastructure Config
2 |
3 | This is a collection of scripts to deploy kubernetes v1.32.x on Fedora. Tested on Fedora 39.
4 |
5 | It's also a collection of helm charts that I developed or customized, as well as
6 | [flux v2](https://toolkit.fluxcd.io/) objects to deploy all the supported applications.
7 |
8 | We handled storage with PersistenceVolumes mapped to mount points on the host and pre-existing claims created that pods
9 | can use as volumes. There's a k8s cron job included to make differential backups between the main mount point and the
10 | backup one.
11 |
12 | [Documentation](https://gandazgul.github.io/k8s-infrastructure/)
13 |
14 | ---
15 |
16 | ## My Home Setup
17 |
18 | A small business server running the control plane node and worker. I plan to add at least one other node to learn to manage
19 | a "cluster" and to try to automate node on-boarding. I've tested the manual node on-boarding with VMs, and it works
20 | well. Look at this script [https://github.com/gandazgul/k8s-infrastructure/blob/main/install-k8s/2-configK8SNode.sh]()
21 |
22 | ```bash
23 | helm repo add gandazgul https://gandazgul.github.io/k8s-infrastructure/
24 | ```
25 |
26 | Here is the [index.yaml](https://gandazgul.github.io/k8s-infrastructure/index.yaml)
27 |
28 | ## What is YASR? I see it mentioned everywhere
29 |
30 | YASR is an in-joke, it stands for Yet Another Storage
31 | Repository (https://encyclopedia.thefreedictionary.com/Yet+Another) - SR is the name of storage volumes in Xenserver
32 | which we migrated from. YASR is the volume we use, to store all application settings. MAIN and BACKUP have all the app
33 | data and personal files, backed up to back up with rdiff-backup.
34 |
35 | ## License
36 |
37 | Unless specifically noted, all parts of this project are licensed under
38 | the [MIT license](https://github.com/gandazgul/k8s-infrastructure/blob/main/LICENSE.md).
39 |
40 | ## Contributing
41 |
42 | Contributions are more than welcome. Please open a PR with a good title and description of the change you are making.
43 | Links to docs or examples are great.
44 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | title: K8s Infrastucture
2 | description: K8s deployment scripts for Fedora 37 and a collection of helm charts and release configs for several apps and services.
3 | remote_theme: pmarsceill/just-the-docs
4 | # Aux links for the upper right navigation
5 | aux_links:
6 | "K8s Infrastructure on GitHub":
7 | - "https://github.com/gandazgul/k8s-infrastructure"
8 |
--------------------------------------------------------------------------------
/apps/ActualBudget.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: community-charts
6 | namespace: kube-system
7 | spec:
8 | interval: 1h0m0s
9 | url: https://community-charts.github.io/helm-charts
10 | ---
11 | apiVersion: source.toolkit.fluxcd.io/v1
12 | kind: GitRepository
13 | metadata:
14 | name: community-charts
15 | namespace: kube-system
16 | spec:
17 | interval: 1h0m0s
18 | url: https://github.com/gandazgul/helm-charts.git
19 | ref:
20 | branch: patch-1
21 | ---
22 | apiVersion: helm.toolkit.fluxcd.io/v2
23 | kind: HelmRelease
24 | metadata:
25 | name: actualbudget
26 | namespace: default
27 | spec:
28 | interval: 5m
29 | chart:
30 | spec:
31 | chart: ./charts/actualbudget
32 | version: 1.5.0
33 | sourceRef:
34 | # kind: HelmRepository
35 | kind: GitRepository
36 | name: community-charts
37 | namespace: kube-system
38 | interval: 1h
39 | values:
40 | strategy:
41 | type: Recreate
42 | files:
43 | server: /data/server-files
44 | user: /data/user-files
45 | ingress:
46 | enabled: true
47 | className: nginx
48 | hosts:
49 | - host: budget.${CLUSTER_DOMAIN_NAME}
50 | paths:
51 | - path: /
52 | pathType: ImplementationSpecific
53 | tls:
54 | - hosts:
55 | - budget.${CLUSTER_DOMAIN_NAME}
56 | secretName: internal-ingress-cert
57 | annotations:
58 | forecastle.stakater.com/expose: "true"
59 | forecastle.stakater.com/appName: "ActualBudget"
60 | forecastle.stakater.com/icon: "https://raw.githubusercontent.com/actualbudget/docs/refs/heads/master/static/img/actual.png"
61 | persistence:
62 | enabled: true
63 | existingClaim: yasr-volume
64 | subPath: configs/actualbudget
65 |
--------------------------------------------------------------------------------
/apps/AudioBookShelf.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: audiobookshelf
6 | namespace: default
7 | spec:
8 | interval: 1h
9 | path: ./apps/generic/overlays/audiobookshelf/
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 | healthChecks:
16 | - apiVersion: apps/v1
17 | kind: Deployment
18 | name: audiobookshelf
19 | namespace: default
20 | postBuild:
21 | substituteFrom:
22 | - kind: Secret
23 | name: secrets
24 |
--------------------------------------------------------------------------------
/apps/Bitwarden.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.toolkit.fluxcd.io/v2
3 | kind: HelmRelease
4 | metadata:
5 | name: bitwarden
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | chart:
10 | spec:
11 | chart: vaultwarden
12 | version: 3.1.3
13 | sourceRef:
14 | kind: HelmRepository
15 | name: k8s-at-home
16 | namespace: kube-system
17 | interval: 5m
18 | values:
19 | image:
20 | repository: vaultwarden/server
21 | tag: 1.32.3
22 | env:
23 | TZ: "${CLUSTER_TIME_ZONE}"
24 | DATA_FOLDER: "/config"
25 | WEB_VAULT_ENABLED: true
26 | LOG_FILE: "/config/bitwarden.log"
27 | # makes the logs more readable
28 | ROCKET_CLI_COLORS: false
29 | SIGNUPS_ALLOWED: true
30 | ADMIN_TOKEN: ${BITWARDEN_ADMIN_TOKEN}
31 | INVITATIONS_ALLOWED: true
32 | SHOW_PASSWORD_HINT: false
33 | DOMAIN: https://${BITWARDEN_SUBDOMAIN}.${CLUSTER_DOMAIN_NAME}
34 | SMTP_HOST: smtp.gmail.com
35 | SMTP_PORT: 587
36 | SMTP_FROM: ${EMAIL}
37 | SMTP_SECURITY: starttls
38 | SMTP_USERNAME: ${EMAIL}
39 | SMTP_PASSWORD: ${SMTP_PASSWORD}
40 | SERVER_ADMIN_EMAIL: ${EMAIL}
41 |
42 | # YUBI key secrets
43 | # YUBICO_CLIENT_ID
44 | # YUBICO_SECRET_KEY
45 | # Limits config: https://api.rocket.rs/v0.4/rocket/config/struct.Limits.html
46 | # Used to increase the API upload/post limit, default is 10MB
47 | # ROCKET_LIMITS
48 | # how many threads are spawn to handle requests. Docker image is set to 10
49 | # ROCKET_WORKERS
50 | persistence:
51 | config:
52 | enabled: true
53 | existingClaim: yasr-volume
54 | subPath: configs/bitwarden
55 | ingress:
56 | main:
57 | enabled: true
58 | hosts:
59 | - host: ${BITWARDEN_SUBDOMAIN}.${CLUSTER_DOMAIN_NAME}
60 | paths:
61 | - path: /
62 | tls:
63 | - hosts:
64 | - ${BITWARDEN_SUBDOMAIN}.${CLUSTER_DOMAIN_NAME}
65 | secretName: internal-ingress-cert
66 | annotations:
67 | kubernetes.io/ingress.class: "nginx"
68 | forecastle.stakater.com/appName: "Bitwarden"
69 | forecastle.stakater.com/expose: "true"
70 | forecastle.stakater.com/icon: "https://bitwarden.com/icons/icon-512x512.png"
71 |
--------------------------------------------------------------------------------
/apps/Flaresolverr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1beta2
3 | kind: OCIRepository
4 | metadata:
5 | name: flaresolverr
6 | namespace: kube-system
7 | spec:
8 | interval: 5m0s
9 | url: oci://ghcr.io/m0nsterrr/helm-charts/flaresolverr
10 | ref:
11 | semver: ">=2.0.1"
12 | ---
13 | apiVersion: helm.toolkit.fluxcd.io/v2
14 | kind: HelmRelease
15 | metadata:
16 | name: flaresolverr
17 | namespace: default
18 | spec:
19 | interval: 5m
20 | chartRef:
21 | kind: OCIRepository
22 | name: flaresolverr
23 | namespace: kube-system
24 | values:
25 | extraEnv:
26 | - name: PUID
27 | value: "1000"
28 | - name: PGID
29 | value: "1000"
30 | - name: TZ
31 | value: "${CLUSTER_TIME_ZONE}"
32 | service:
33 | port: 8191
34 | ingress:
35 | enabled: true
36 | ingressClassName: nginx
37 | hosts:
38 | - host: flaresolverr.${CLUSTER_DOMAIN_NAME}
39 | paths:
40 | - path: /
41 | pathType: ImplementationSpecific
42 | tls:
43 | - hosts:
44 | - flaresolverr.${CLUSTER_DOMAIN_NAME}
45 | secretName: internal-ingress-cert
46 | annotations:
47 | forecastle.stakater.com/appName: "Flaresolverr"
48 | forecastle.stakater.com/group: "Media"
49 | forecastle.stakater.com/expose: "true"
50 | forecastle.stakater.com/icon: ""
51 | resources:
52 | limits:
53 | memory: 512Mi
54 | cpu: 500m
55 | requests:
56 | memory: 256Mi
57 | cpu: 250m
58 |
--------------------------------------------------------------------------------
/apps/Forecastle.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: stakater-charts
6 | namespace: kube-system
7 | spec:
8 | interval: 1440m0s
9 | url: https://stakater.github.io/stakater-charts/
10 | ---
11 | apiVersion: helm.toolkit.fluxcd.io/v2
12 | kind: HelmRelease
13 | metadata:
14 | name: forecastle
15 | namespace: default
16 | spec:
17 | interval: 5m
18 | chart:
19 | spec:
20 | chart: forecastle
21 | version: "1.0.159"
22 | sourceRef:
23 | kind: HelmRepository
24 | name: stakater-charts
25 | namespace: kube-system
26 | # All values at https://github.com/stakater/Forecastle/blob/master/deployments/kubernetes/chart/forecastle/values.yaml
27 | values:
28 | forecastle:
29 | image:
30 | repository: stakater/forecastle
31 | tag: v1.0.159
32 | deployment:
33 | securityContext:
34 | runAsUser: 1000
35 | fsGroup: 1000
36 | config:
37 | namespaceSelector:
38 | matchNames:
39 | - default
40 | - kube-system
41 | - monitoring
42 | title: Cluster Homepage
43 | customApps:
44 | - name: Plex
45 | icon: https://www.plex.tv/wp-content/themes/plex/assets/img/plex-logo.svg
46 | url: https://app.plex.tv/desktop/
47 | group: Media
48 | ingress:
49 | enabled: true
50 | annotations:
51 | kubernetes.io/ingress.class: "nginx"
52 | hosts:
53 | - host: home.${CLUSTER_DOMAIN_NAME}
54 | paths:
55 | - path: /
56 | pathType: ImplementationSpecific
57 | tls:
58 | - hosts:
59 | - home.${CLUSTER_DOMAIN_NAME}
60 | secretName: internal-ingress-cert
61 |
--------------------------------------------------------------------------------
/apps/Headlamp.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: GitRepository
4 | metadata:
5 | name: headlamp
6 | namespace: kube-system
7 | spec:
8 | interval: 1h0m0s
9 | url: https://github.com/kubernetes-sigs/headlamp.git
10 | ref:
11 | branch: main
12 | ---
13 | apiVersion: helm.toolkit.fluxcd.io/v2
14 | kind: HelmRelease
15 | metadata:
16 | name: headlamp
17 | namespace: default
18 | spec:
19 | interval: 1h
20 | chart:
21 | spec:
22 | chart: ./charts/headlamp
23 | sourceRef:
24 | kind: GitRepository
25 | name: headlamp
26 | namespace: kube-system
27 | values:
28 | config:
29 | oidc:
30 | clientID: ${HEADLAMP_GOOGLE_CLIENT_ID}
31 | clientSecret: ${HEADLAMP_GOOGLE_CLIENT_SECRET}
32 | issuerURL: https://accounts.google.com
33 | scopes: openid,email,profile
34 | volumes:
35 | - name: config
36 | persistentVolumeClaim:
37 | claimName: yasr-volume
38 | volumeMounts:
39 | - name: config
40 | mountPath: /headlamp/plugins
41 | subPath: configs/headlamp
42 | service:
43 | port: 4466
44 | ingress:
45 | enabled: true
46 | ingressClassName: nginx
47 | annotations:
48 | forecastle.stakater.com/appName: "Headlamp"
49 | forecastle.stakater.com/group: "Management"
50 | forecastle.stakater.com/expose: "true"
51 | forecastle.stakater.com/icon: "https://headlamp.dev/img/logo.svg"
52 | hosts:
53 | - host: dashboard.${CLUSTER_DOMAIN_NAME}
54 | paths:
55 | - path: /
56 | type: ImplementationSpecific
57 | tls:
58 | - secretName: internal-ingress-cert
59 | hosts:
60 | - dashboard.${CLUSTER_DOMAIN_NAME}
61 |
--------------------------------------------------------------------------------
/apps/Jellyfin.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: jellyfin
6 | namespace: default
7 | spec:
8 | interval: 1h
9 | path: ./apps/generic/overlays/jellyfin/
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 | healthChecks:
16 | - apiVersion: apps/v1
17 | kind: Deployment
18 | name: jellyfin
19 | namespace: default
20 | postBuild:
21 | substituteFrom:
22 | - kind: Secret
23 | name: secrets
24 |
--------------------------------------------------------------------------------
/apps/Lidarr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: lidarr
6 | namespace: default
7 | spec:
8 | interval: 1h
9 | path: ./apps/generic/overlays/lidarr/
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 | healthChecks:
16 | - apiVersion: apps/v1
17 | kind: Deployment
18 | name: lidarr
19 | namespace: default
20 | postBuild:
21 | substituteFrom:
22 | - kind: Secret
23 | name: secrets
24 |
--------------------------------------------------------------------------------
/apps/MQTTBroker.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.toolkit.fluxcd.io/v2
3 | kind: HelmRelease
4 | metadata:
5 | name: mqtt-broker
6 | namespace: default
7 | spec:
8 | chart:
9 | spec:
10 | chart: ./charts/mosca
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 | interval: 1h0m0s
16 | values:
17 | # image:
18 | # tag: "v2.8.3"
19 | service:
20 | type: NodePort
21 |
--------------------------------------------------------------------------------
/apps/Mealie.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: mealie
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | path: ./apps/mealie/base/
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 | healthChecks:
16 | - apiVersion: apps/v1
17 | kind: Deployment
18 | name: mealie
19 | namespace: default
20 | postBuild:
21 | substituteFrom:
22 | - kind: Secret
23 | name: secrets
24 |
--------------------------------------------------------------------------------
/apps/MosquitoMQTTBroker.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.toolkit.fluxcd.io/v2
3 | kind: HelmRelease
4 | metadata:
5 | name: mosquitto
6 | namespace: default
7 | spec:
8 | chart:
9 | spec:
10 | chart: mosquitto
11 | version: 4.3.2
12 | sourceRef:
13 | kind: HelmRepository
14 | name: k8s-at-home
15 | namespace: kube-system
16 | interval: 1h0m0s
17 | values:
18 | # TODO: why does it have to be NodePort?
19 | service:
20 | main:
21 | type: NodePort
22 | ports:
23 | mqtt:
24 | enabled: true
25 | port: 1883
26 | nodePort: 1883
27 | persistence:
28 | configinc:
29 | enabled: true
30 | type: pvc
31 | existingClaim: yasr-volume
32 | subPath: configs/mosquitto
33 |
--------------------------------------------------------------------------------
/apps/Ollama.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: ollama
6 | namespace: kube-system
7 | spec:
8 | interval: 1h0m0s
9 | url: "https://otwld.github.io/ollama-helm"
10 | ---
11 | apiVersion: helm.toolkit.fluxcd.io/v2
12 | kind: HelmRelease
13 | metadata:
14 | name: ollama
15 | namespace: default
16 | spec:
17 | interval: 5m
18 | chart:
19 | spec:
20 | chart: ollama
21 | version: 0.20.0
22 | sourceRef:
23 | kind: HelmRepository
24 | name: ollama
25 | namespace: kube-system
26 | interval: 5m
27 | values:
28 | image:
29 | pullPolicy: Always
30 | ollama:
31 | models:
32 | - llama2
33 | - llama2-uncensored
34 | ingress:
35 | enabled: true
36 | hosts:
37 | - host: ai.${CLUSTER_DOMAIN_NAME}
38 | paths:
39 | - path: /
40 | pathType: Prefix
41 | tls:
42 | - hosts:
43 | - ai.${CLUSTER_DOMAIN_NAME}
44 | secretName: internal-ingress-cert
45 | annotations:
46 | kubernetes.io/ingress.class: "nginx"
47 | forecastle.stakater.com/appName: "ollama"
48 | forecastle.stakater.com/expose: "true"
49 | forecastle.stakater.com/icon: "https://ollama.com/public/ollama.png"
50 | persistentVolume:
51 | enabled: true
52 | existingClaim: yasr-volume
53 | subPath: configs/ollama
54 |
--------------------------------------------------------------------------------
/apps/OpenAIWhisper.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: truecharts
6 | namespace: kube-system
7 | spec:
8 | interval: 1m
9 | url: "https://charts.truecharts.org"
10 | ---
11 | apiVersion: helm.toolkit.fluxcd.io/v2
12 | kind: HelmRelease
13 | metadata:
14 | name: wyoming-whisper
15 | namespace: default
16 | spec:
17 | interval: 5m
18 | chart:
19 | spec:
20 | chart: wyoming-whisper
21 | version: "5.0.0"
22 | sourceRef:
23 | kind: HelmRepository
24 | name: truecharts
25 | namespace: kube-system
26 | #https://github.com/truecharts/charts/blob/master/charts/stable/wyoming-whisper/values.yaml
27 | values:
28 | key1: value1
29 | key2:
30 | subkey1: value2
31 | subkey2: value3
32 |
--------------------------------------------------------------------------------
/apps/OpenWebUI.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: open-webui
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | path: ./apps/open-webui/${CLUSTER_NAME}
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 | healthChecks:
16 | - apiVersion: apps/v1
17 | kind: Deployment
18 | name: open-webui
19 | namespace: default
20 | postBuild:
21 | substituteFrom:
22 | - kind: Secret
23 | name: secrets
24 |
--------------------------------------------------------------------------------
/apps/Overseerr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.toolkit.fluxcd.io/v2
3 | kind: HelmRelease
4 | metadata:
5 | name: overseerr
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | chart:
10 | spec:
11 | chart: overseerr
12 | version: 5.4.2
13 | sourceRef:
14 | kind: HelmRepository
15 | name: k8s-at-home
16 | namespace: kube-system
17 | interval: 5m
18 | # valuesFrom:
19 | # - kind: Secret
20 | # name: overseerr-values
21 | # optional: false
22 | values:
23 | image:
24 | tag: latest
25 | pullPolicy: Always
26 | env:
27 | TZ: "${CLUSTER_TIME_ZONE}"
28 | persistence:
29 | config:
30 | enabled: true
31 | type: pvc
32 | existingClaim: yasr-volume
33 | subPath: configs/overseerr
34 | ingress:
35 | main:
36 | enabled: true
37 | hosts:
38 | - host: media.${CLUSTER_DOMAIN_NAME}
39 | paths:
40 | - path: /
41 | tls:
42 | - hosts:
43 | - media.${CLUSTER_DOMAIN_NAME}
44 | secretName: internal-ingress-cert
45 | annotations:
46 | kubernetes.io/ingress.class: "nginx"
47 | forecastle.stakater.com/appName: "Overseerr"
48 | forecastle.stakater.com/group: "Media"
49 | forecastle.stakater.com/expose: "true"
50 | forecastle.stakater.com/icon: "https://media.${CLUSTER_DOMAIN_NAME}/apple-touch-icon.png"
51 |
--------------------------------------------------------------------------------
/apps/Photoprism.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.toolkit.fluxcd.io/v2
3 | kind: HelmRelease
4 | metadata:
5 | name: photoprism
6 | namespace: default
7 | spec:
8 | chart:
9 | spec:
10 | chart: photoprism
11 | version: 6.5.0
12 | sourceRef:
13 | kind: HelmRepository
14 | name: k8s-at-home
15 | namespace: kube-system
16 | interval: 1h0m0s
17 | values:
18 | image:
19 | pullPolicy: Always
20 | tag: latest
21 | env:
22 | UID: "${PHOTOS_UID}"
23 | GID: "${PHOTOS_GID}"
24 | TZ: "${CLUSTER_TIME_ZONE}"
25 | PHOTOPRISM_ADMIN_PASSWORD: ${ADMIN_PASSWORD}
26 | persistence:
27 | config:
28 | enabled: true
29 | mountPath: /photoprism/storage
30 | existingClaim: yasr-volume
31 | subPath: configs/photoprism
32 | originals:
33 | enabled: true
34 | mountPath: /photoprism/originals
35 | existingClaim: main-volume
36 | subPath: ${PHOTOS_PATH}
37 | ingress:
38 | main:
39 | enabled: true
40 | hosts:
41 | - host: photos.${CLUSTER_DOMAIN_NAME}
42 | paths:
43 | - path: /
44 | tls:
45 | - hosts:
46 | - photos.${CLUSTER_DOMAIN_NAME}
47 | secretName: internal-ingress-cert
48 | annotations:
49 | kubernetes.io/ingress.class: "nginx"
50 | forecastle.stakater.com/appName: "Photos"
51 | forecastle.stakater.com/expose: "true"
52 | forecastle.stakater.com/icon: "https://www.photoprism.app/static/icons/logo.svg"
53 |
--------------------------------------------------------------------------------
/apps/Plex.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.toolkit.fluxcd.io/v2
3 | kind: HelmRelease
4 | metadata:
5 | name: plex
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | chart:
10 | spec:
11 | # renovate: registryUrl=https://k8s-at-home.com/charts/
12 | chart: plex
13 | version: 6.2.1
14 | sourceRef:
15 | kind: HelmRepository
16 | name: k8s-at-home
17 | namespace: kube-system
18 | interval: 5m
19 | valuesFrom:
20 | - kind: Secret
21 | name: plex-values
22 | optional: false
23 | # All values at https://github.com/k8s-at-home/charts/blob/main/charts/plex/values.yaml
24 | values:
25 | image:
26 | repository: plexinc/pms-docker
27 | tag: plexpass
28 | pullPolicy: Always
29 | service:
30 | main:
31 | primary: true
32 | type: NodePort
33 | ports:
34 | http:
35 | nodePort: 32400
36 | port: 32400
37 | env:
38 | TZ: "${CLUSTER_TIME_ZONE}"
39 | ADVERTISE_IP: "http://${DYN_DNS_NAME}:32400/,http://${CONTROL_PLANE_IP}:32400/"
40 | probes:
41 | liveness:
42 | enabled: true
43 | readiness:
44 | enabled: true
45 | startup:
46 | enabled: true
47 | persistence:
48 | config:
49 | enabled: true
50 | type: pvc
51 | existingClaim: yasr-volume
52 | subPath: configs/plex
53 | ## Data is in the individual plex-values for each cluster
54 | # data:
55 | # enabled: true
56 | # type: pvc
57 | # existingClaim: main-volume
58 | # subPath:
59 | # - path: public
60 | # mountPath: /data
61 | transcode:
62 | enabled: true
63 | type: pvc
64 | size: ${PLEX_TRANSCODE_SIZE:=60Gi}
65 | accessMode: ReadWriteOnce
66 | resources:
67 | limits:
68 | memory: 4900Mi
69 | requests:
70 | cpu: 35m
71 | memory: 1500Mi
72 | podSecurityContext:
73 | # Hardware acceleration using an Intel iGPU w/ QuickSync
74 | # These IDs below should be matched to your `video` and `render` group on the host
75 | # To obtain those IDs run the following grep statement on the host:
76 | # $ cat /etc/group | grep "video\|render"
77 | # video:x:44:
78 | # render:x:109:
79 | supplementalGroups:
80 | - 39
81 | - 998
82 |
--------------------------------------------------------------------------------
/apps/Prowlarr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.toolkit.fluxcd.io/v2
3 | kind: HelmRelease
4 | metadata:
5 | name: prowlarr
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | chart:
10 | spec:
11 | # renovate: registryUrl=https://k8s-at-home.com/charts/
12 | chart: prowlarr
13 | version: 4.2.0
14 | sourceRef:
15 | kind: HelmRepository
16 | name: k8s-at-home
17 | namespace: kube-system
18 | interval: 5m
19 | values:
20 | image:
21 | repository: docker.io/linuxserver/prowlarr
22 | tag: develop
23 | pullPolicy: Always
24 | env:
25 | PUID: "1000"
26 | PGID: "1000"
27 | TZ: "${CLUSTER_TIME_ZONE}"
28 | persistence:
29 | config:
30 | enabled: true
31 | type: pvc
32 | existingClaim: yasr-volume
33 | subPath: configs/prowlarr
34 | ingress:
35 | main:
36 | enabled: true
37 | hosts:
38 | - host: seedbox.${CLUSTER_DOMAIN_NAME}
39 | paths:
40 | - path: /
41 | tls:
42 | - hosts:
43 | - seedbox.${CLUSTER_DOMAIN_NAME}
44 | secretName: internal-ingress-cert
45 | annotations:
46 | kubernetes.io/ingress.class: "nginx"
47 | forecastle.stakater.com/appName: "Prowlarr"
48 | forecastle.stakater.com/group: "Media"
49 | forecastle.stakater.com/expose: "true"
50 | forecastle.stakater.com/icon: "https://seedbox.${CLUSTER_DOMAIN_NAME}/Content/Images/logo.png"
51 |
--------------------------------------------------------------------------------
/apps/Qdrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: qdrant
6 | namespace: default
7 | spec:
8 | interval: 1h
9 | path: ./apps/generic/overlays/qdrant/
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 | healthChecks:
16 | - apiVersion: apps/v1
17 | kind: Deployment
18 | name: qdrant
19 | namespace: default
20 | postBuild:
21 | substituteFrom:
22 | - kind: Secret
23 | name: secrets
24 |
--------------------------------------------------------------------------------
/apps/RDiffWeb.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: rdiffweb
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | path: ./apps/generic/overlays/rdiffweb/
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 | healthChecks:
16 | - apiVersion: apps/v1
17 | kind: Deployment
18 | name: rdiffweb
19 | namespace: default
20 | postBuild:
21 | substituteFrom:
22 | - kind: Secret
23 | name: secrets
24 |
--------------------------------------------------------------------------------
/apps/Radarr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.toolkit.fluxcd.io/v2
3 | kind: HelmRelease
4 | metadata:
5 | name: radarr
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | chart:
10 | spec:
11 | # renovate: registryUrl=https://k8s-at-home.com/charts/
12 | chart: radarr
13 | version: 16.0.1
14 | sourceRef:
15 | kind: HelmRepository
16 | name: k8s-at-home
17 | namespace: kube-system
18 | interval: 5m
19 | valuesFrom:
20 | - kind: Secret
21 | name: radarr-values
22 | optional: false
23 | values:
24 | image:
25 | repository: docker.io/linuxserver/radarr
26 | tag: amd64-latest
27 | pullPolicy: Always
28 | env:
29 | PUID: "1000"
30 | PGID: "1000"
31 | TZ: "${CLUSTER_TIME_ZONE}"
32 | persistence:
33 | config:
34 | enabled: true
35 | type: pvc
36 | existingClaim: yasr-volume
37 | subPath:
38 | - path: configs/radarr/
39 | mountPath: /config
40 | - path: configs/transmission/
41 | mountPath: /data
42 | ingress:
43 | main:
44 | enabled: true
45 | hosts:
46 | - host: movies.${CLUSTER_DOMAIN_NAME}
47 | paths:
48 | - path: /
49 | pathType: ImplementationSpecific
50 | tls:
51 | - hosts:
52 | - movies.${CLUSTER_DOMAIN_NAME}
53 | secretName: internal-ingress-cert
54 | annotations:
55 | kubernetes.io/ingress.class: "nginx"
56 | forecastle.stakater.com/appName: "Movies"
57 | forecastle.stakater.com/group: "Media"
58 | forecastle.stakater.com/expose: "true"
59 | forecastle.stakater.com/icon: "https://movies.${CLUSTER_DOMAIN_NAME}/Content/Images/logo.png"
60 |
--------------------------------------------------------------------------------
/apps/Resilio.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.toolkit.fluxcd.io/v2
3 | kind: HelmRelease
4 | metadata:
5 | name: resilio
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | chart:
10 | spec:
11 | chart: resilio-sync
12 | version: 5.0.1
13 | sourceRef:
14 | kind: HelmRepository
15 | name: k8s-at-home
16 | namespace: kube-system
17 | interval: 5m
18 | valuesFrom:
19 | - kind: Secret
20 | name: resilio-values
21 | optional: false
22 | values:
23 | env:
24 | TZ: "${CLUSTER_TIME_ZONE}"
25 | PUID: "1000"
26 | PGID: "1000"
27 | ingress:
28 | main:
29 | enabled: true
30 | hosts:
31 | - host: resilio.${CLUSTER_DOMAIN_NAME}
32 | paths:
33 | - path: /
34 | tls:
35 | - hosts:
36 | - resilio.${CLUSTER_DOMAIN_NAME}
37 | secretName: internal-ingress-cert
38 | annotations:
39 | kubernetes.io/ingress.class: "nginx"
40 | forecastle.stakater.com/appName: "Resilio"
41 | forecastle.stakater.com/group: "File Sharing"
42 | forecastle.stakater.com/expose: "true"
43 | forecastle.stakater.com/icon: "https://www.resilio.com/images/logo.svg"
44 |
--------------------------------------------------------------------------------
/apps/Sonarr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: helm.toolkit.fluxcd.io/v2
3 | kind: HelmRelease
4 | metadata:
5 | name: sonarr
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | chart:
10 | spec:
11 | # renovate: registryUrl=https://k8s-at-home.com/charts/
12 | chart: sonarr
13 | version: 16.0.1
14 | sourceRef:
15 | kind: HelmRepository
16 | name: k8s-at-home
17 | namespace: kube-system
18 | interval: 5m
19 | valuesFrom:
20 | - kind: Secret
21 | name: sonarr-values
22 | optional: false
23 | values:
24 | image:
25 | repository: docker.io/linuxserver/sonarr
26 | tag: amd64-latest
27 | pullPolicy: Always
28 | env:
29 | PUID: "1000"
30 | PGID: "1000"
31 | TZ: "${CLUSTER_TIME_ZONE}"
32 | persistence:
33 | config:
34 | enabled: true
35 | type: pvc
36 | existingClaim: yasr-volume
37 | subPath:
38 | - path: configs/sonarr
39 | mountPath: /config
40 | - path: configs/transmission
41 | mountPath: /data
42 | ingress:
43 | main:
44 | enabled: true
45 | hosts:
46 | - host: tv.${CLUSTER_DOMAIN_NAME}
47 | paths:
48 | - path: /
49 | tls:
50 | - hosts:
51 | - tv.${CLUSTER_DOMAIN_NAME}
52 | secretName: internal-ingress-cert
53 | annotations:
54 | kubernetes.io/ingress.class: "nginx"
55 | forecastle.stakater.com/appName: "TV"
56 | forecastle.stakater.com/group: "Media"
57 | forecastle.stakater.com/expose: "true"
58 | forecastle.stakater.com/icon: "https://tv.${CLUSTER_DOMAIN_NAME}/Content/Images/logo.svg"
59 |
--------------------------------------------------------------------------------
/apps/Tdarr.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: tdarr
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | path: ./apps/tdarr/base/
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 | healthChecks:
16 | - apiVersion: apps/v1
17 | kind: Deployment
18 | name: tdarr
19 | namespace: default
20 | postBuild:
21 | substituteFrom:
22 | - kind: Secret
23 | name: secrets
--------------------------------------------------------------------------------
/apps/Transmission.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: transmission
6 | namespace: default
7 | spec:
8 | interval: 5m
9 | path: ./apps/generic/overlays/transmission/
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 | healthChecks:
16 | - apiVersion: apps/v1
17 | kind: Deployment
18 | name: transmission
19 | namespace: default
20 | postBuild:
21 | substituteFrom:
22 | - kind: Secret
23 | name: secrets
24 |
--------------------------------------------------------------------------------
/apps/ZWaveJS.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: k8sonlab
6 | namespace: kube-system
7 | spec:
8 | interval: 1h0m0s
9 | url: https://charts.ar80.eu/
10 | ---
11 | apiVersion: helm.toolkit.fluxcd.io/v2
12 | kind: HelmRelease
13 | metadata:
14 | name: zwavejs
15 | namespace: default
16 | spec:
17 | interval: 5m
18 | chart:
19 | spec:
20 | chart: zwave-js-ui
21 | version: 0.2.31
22 | sourceRef:
23 | kind: HelmRepository
24 | name: k8sonlab
25 | namespace: kube-system
26 | interval: 5m
27 | values:
28 | securityContext:
29 | privileged: true
30 | image:
31 | tag: latest
32 | pullPolicy: Always
33 | env:
34 | TZ: "${CLUSTER_TIME_ZONE}"
35 | ingress:
36 | enabled: true
37 | className: nginx
38 | hosts:
39 | - host: zwave.${CLUSTER_DOMAIN_NAME}
40 | paths:
41 | - path: /
42 | pathType: ImplementationSpecific
43 | tls:
44 | - hosts:
45 | - zwave.${CLUSTER_DOMAIN_NAME}
46 | secretName: internal-ingress-cert
47 | annotations:
48 | forecastle.stakater.com/appName: "ZWaveJS"
49 | forecastle.stakater.com/group: "Management"
50 | forecastle.stakater.com/expose: "true"
51 | forecastle.stakater.com/icon: "https://zwave.${CLUSTER_DOMAIN_NAME}/logo.svg"
52 | persistence:
53 | enabled: true
54 | mountPath: /usr/src/app/store/
55 | existingClaim: yasr-volume
56 | subPath: configs/zwavejs
57 | affinity:
58 | nodeAffinity:
59 | requiredDuringSchedulingIgnoredDuringExecution:
60 | nodeSelectorTerms:
61 | - matchExpressions:
62 | - key: node-role.kubernetes.io/control-plane
63 | operator: Exists
64 |
--------------------------------------------------------------------------------
/apps/Zigbee2mqtt.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: andrenarchy
6 | namespace: kube-system
7 | spec:
8 | interval: 1h0m0s
9 | url: https://andrenarchy.github.io/helm-charts/
10 | ---
11 | apiVersion: helm.toolkit.fluxcd.io/v2
12 | kind: HelmRelease
13 | metadata:
14 | name: zigbee2mqtt
15 | namespace: default
16 | spec:
17 | interval: 5m
18 | chart:
19 | spec:
20 | chart: zigbee2mqtt
21 | version: 9.25.0
22 | sourceRef:
23 | kind: HelmRepository
24 | name: andrenarchy
25 | namespace: kube-system
26 | interval: 5m
27 | values:
28 | securityContext:
29 | privileged: true
30 | image:
31 | repository: docker.io/koenkk/zigbee2mqtt
32 | tag: latest
33 | # pullPolicy: Always
34 | env:
35 | TZ: "${CLUSTER_TIME_ZONE}"
36 | ingress:
37 | main:
38 | enabled: true
39 | hosts:
40 | - host: zigbee.${CLUSTER_DOMAIN_NAME}
41 | paths:
42 | - path: /
43 | tls:
44 | - hosts:
45 | - zigbee.${CLUSTER_DOMAIN_NAME}
46 | secretName: internal-ingress-cert
47 | annotations:
48 | kubernetes.io/ingress.class: "nginx"
49 | forecastle.stakater.com/expose: "true"
50 | forecastle.stakater.com/icon: ""
51 | persistence:
52 | usb:
53 | enabled: true
54 | type: hostPath
55 | hostPath: /dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_e258ac37e35fec11af99305f25bfaa52-if00-port0
56 | data:
57 | enabled: true
58 | mountPath: /data
59 | existingClaim: yasr-volume
60 | subPath: configs/zigbee2mqtt
61 | affinity:
62 | nodeAffinity:
63 | requiredDuringSchedulingIgnoredDuringExecution:
64 | nodeSelectorTerms:
65 | - matchExpressions:
66 | - key: node-role.kubernetes.io/control-plane
67 | operator: Exists
68 |
--------------------------------------------------------------------------------
/apps/generic/base/deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: deployment
6 | spec:
7 | replicas: 1
8 | revisionHistoryLimit: 2
9 | template:
10 | spec:
11 | containers:
12 | - name: container
13 | image: changeme
14 | imagePullPolicy: IfNotPresent
15 | livenessProbe:
16 | httpGet:
17 | scheme: HTTP
18 | path: /
19 | port: http
20 | initialDelaySeconds: 30
21 | timeoutSeconds: 10
22 | resources:
23 | requests:
24 | cpu: 100m
25 | memory: 250Mi
26 | limits:
27 | cpu: 500m
28 | memory: 1Gi
29 | volumes:
30 | - name: yasr-volume
31 | persistentVolumeClaim:
32 | claimName: yasr-volume
33 | - name: backup-volume
34 | persistentVolumeClaim:
35 | claimName: backup-volume
36 | - name: main-volume
37 | persistentVolumeClaim:
38 | claimName: main-volume
39 |
--------------------------------------------------------------------------------
/apps/generic/base/ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: ingress
6 | spec:
7 | ingressClassName: nginx
8 | rules:
9 | - host: ${INGRESS_HOSTNAME}
10 | http:
11 | paths:
12 | - path: /
13 | pathType: ImplementationSpecific
14 | backend:
15 | service:
16 | name: service
17 | port:
18 | name: http
19 | tls:
20 | - hosts:
21 | - ${INGRESS_HOSTNAME}
22 | secretName: internal-ingress-cert
23 |
--------------------------------------------------------------------------------
/apps/generic/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: default
5 |
6 | resources:
7 | - deployment.yaml
8 | - service.yaml
9 | - ingress.yaml
10 |
--------------------------------------------------------------------------------
/apps/generic/base/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: service
5 | spec:
6 | type: ClusterIP
7 |
--------------------------------------------------------------------------------
/apps/generic/overlays/audiobookshelf/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: default
5 | resources:
6 | - ../../base/
7 |
8 | labels:
9 | - pairs:
10 | app.kubernetes.io/name: audiobookshelf
11 | includeSelectors: true
12 |
13 | patches:
14 | - path: patches/deployment.yaml
15 | target:
16 | group: apps
17 | version: v1
18 | kind: Deployment
19 | name: deployment
20 | namespace: default
21 | options:
22 | allowNameChange: true
23 | - path: patches/ingress.yaml
24 | target:
25 | group: networking.k8s.io
26 | version: v1
27 | kind: Ingress
28 | name: ingress
29 | namespace: default
30 | options:
31 | allowNameChange: true
32 | - path: patches/service.yaml
33 | target:
34 | group: ""
35 | version: v1
36 | kind: Service
37 | name: service
38 | namespace: default
39 | options:
40 | allowNameChange: true
41 | - patch: |-
42 | - op: replace
43 | path: /spec/rules/0/http/paths/0/backend/service/name
44 | value: audiobookshelf
45 | - op: replace
46 | path: /spec/rules/0/host
47 | value: read.${CLUSTER_DOMAIN_NAME}
48 | - op: replace
49 | path: /spec/tls/0/hosts/0
50 | value: read.${CLUSTER_DOMAIN_NAME}
51 | target:
52 | group: networking.k8s.io
53 | version: v1
54 | kind: Ingress
55 | name: audiobookshelf
56 |
--------------------------------------------------------------------------------
/apps/generic/overlays/audiobookshelf/patches/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: audiobookshelf
5 | spec:
6 | template:
7 | spec:
8 | containers:
9 | - name: container
10 | image: ghcr.io/advplyr/audiobookshelf:latest
11 | imagePullPolicy: Always
12 | volumeMounts:
13 | - name: yasr-volume
14 | mountPath: /config
15 | subPath: configs/audiobookshelf/
16 | - name: yasr-volume
17 | mountPath: /metadata
18 | subPath: configs/audiobookshelf/
19 | - name: main-volume
20 | mountPath: /data
21 | subPath: public/Books/
22 | livenessProbe:
23 | httpGet:
24 | scheme: HTTP
25 | path: /
26 | port: http
27 | initialDelaySeconds: 30
28 | timeoutSeconds: 10
29 | ports:
30 | - name: http
31 | containerPort: 80
32 | protocol: TCP
33 |
--------------------------------------------------------------------------------
/apps/generic/overlays/audiobookshelf/patches/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: audiobookshelf
5 | annotations:
6 | forecastle.stakater.com/appName: "AudioBookShelf"
7 | forecastle.stakater.com/group: "Media"
8 | forecastle.stakater.com/expose: "true"
9 | forecastle.stakater.com/icon: "https://read.${CLUSTER_DOMAIN_NAME}/audiobookshelf/_nuxt/img/icon.d3d4aef.svg"
10 |
--------------------------------------------------------------------------------
/apps/generic/overlays/audiobookshelf/patches/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: audiobookshelf
5 | spec:
6 | ports:
7 | - name: http
8 | port: 5000
9 | protocol: TCP
10 | targetPort: http
11 |
--------------------------------------------------------------------------------
/apps/generic/overlays/jellyfin/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: default
5 | resources:
6 | - ../../base/
7 |
8 | labels:
9 | - pairs:
10 | app.kubernetes.io/name: jellyfin
11 | includeSelectors: true
12 |
13 | patches:
14 | - path: patches/deployment.yaml
15 | target:
16 | group: apps
17 | version: v1
18 | kind: Deployment
19 | name: deployment
20 | namespace: default
21 | options:
22 | allowNameChange: true
23 | - path: patches/ingress.yaml
24 | target:
25 | group: networking.k8s.io
26 | version: v1
27 | kind: Ingress
28 | name: ingress
29 | namespace: default
30 | options:
31 | allowNameChange: true
32 | - path: patches/service.yaml
33 | target:
34 | group: ""
35 | version: v1
36 | kind: Service
37 | name: service
38 | namespace: default
39 | options:
40 | allowNameChange: true
41 | - patch: |-
42 | - op: replace
43 | path: /spec/rules/0/http/paths/0/backend/service/name
44 | value: jellyfin
45 | - op: replace
46 | path: /spec/rules/0/host
47 | value: jellyfin.${CLUSTER_DOMAIN_NAME}
48 | - op: replace
49 | path: /spec/tls/0/hosts/0
50 | value: jellyfin.${CLUSTER_DOMAIN_NAME}
51 | target:
52 | group: networking.k8s.io
53 | version: v1
54 | kind: Ingress
55 | name: jellyfin
56 |
--------------------------------------------------------------------------------
/apps/generic/overlays/jellyfin/patches/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: jellyfin
5 | spec:
6 | template:
7 | spec:
8 | containers:
9 | - name: container
10 | image: ghcr.io/jellyfin/jellyfin:10.10
11 | imagePullPolicy: Always
12 | resources:
13 | requests:
14 | cpu: 1
15 | memory: 512Mi
16 | limits:
17 | cpu: 4
18 | memory: 8Gi
19 | volumeMounts:
20 | - name: yasr-volume
21 | mountPath: /config
22 | subPath: configs/jellyfin/
23 | - name: main-volume
24 | mountPath: /media
25 | subPath: public/
26 | livenessProbe:
27 | httpGet:
28 | scheme: HTTP
29 | path: /
30 | port: http
31 | initialDelaySeconds: 30
32 | timeoutSeconds: 10
33 | ports:
34 | - name: http
35 | containerPort: 8096
36 | protocol: TCP
37 |
--------------------------------------------------------------------------------
/apps/generic/overlays/jellyfin/patches/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: jellyfin
5 | annotations:
6 | forecastle.stakater.com/appName: "Jellyfin"
7 | forecastle.stakater.com/group: "Media"
8 | forecastle.stakater.com/expose: "true"
9 | forecastle.stakater.com/icon: "https://jellyfin.org/images/logo.svg"
10 |
--------------------------------------------------------------------------------
/apps/generic/overlays/jellyfin/patches/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: jellyfin
5 | spec:
6 | ports:
7 | - name: http
8 | port: 8096
9 | protocol: TCP
10 | targetPort: http
11 |
--------------------------------------------------------------------------------
/apps/generic/overlays/lidarr/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: default
5 | resources:
6 | - ../../base/
7 |
8 | labels:
9 | - pairs:
10 | app.kubernetes.io/name: lidarr
11 | includeSelectors: true
12 |
13 | patches:
14 | - path: patches/deployment.yaml
15 | target:
16 | group: apps
17 | version: v1
18 | kind: Deployment
19 | name: deployment
20 | namespace: default
21 | options:
22 | allowNameChange: true
23 | - path: patches/ingress.yaml
24 | target:
25 | group: networking.k8s.io
26 | version: v1
27 | kind: Ingress
28 | name: ingress
29 | namespace: default
30 | options:
31 | allowNameChange: true
32 | - path: patches/service.yaml
33 | target:
34 | group: ""
35 | version: v1
36 | kind: Service
37 | name: service
38 | namespace: default
39 | options:
40 | allowNameChange: true
41 | - patch: |-
42 | - op: replace
43 | path: /spec/rules/0/http/paths/0/backend/service/name
44 | value: lidarr
45 | - op: replace
46 | path: /spec/rules/0/host
47 | value: music.${CLUSTER_DOMAIN_NAME}
48 | - op: replace
49 | path: /spec/tls/0/hosts/0
50 | value: music.${CLUSTER_DOMAIN_NAME}
51 | target:
52 | group: networking.k8s.io
53 | version: v1
54 | kind: Ingress
55 | name: lidarr
56 |
--------------------------------------------------------------------------------
/apps/generic/overlays/lidarr/patches/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: lidarr
5 | spec:
6 | template:
7 | spec:
8 | containers:
9 | - name: container
10 | image: docker.io/linuxserver/lidarr:latest
11 | imagePullPolicy: Always
12 | volumeMounts:
13 | - name: yasr-volume
14 | mountPath: /config
15 | subPath: configs/lidarr/
16 | - name: yasr-volume
17 | mountPath: /data
18 | subPath: configs/transmission/
19 | - name: main-volume
20 | mountPath: /media
21 | subPath: public/Music/
22 | livenessProbe:
23 | httpGet:
24 | scheme: HTTP
25 | path: /
26 | port: http
27 | initialDelaySeconds: 300
28 | timeoutSeconds: 10
29 | periodSeconds: 10
30 | failureThreshold: 3
31 | env:
32 | - name: PUID
33 | value: "1000"
34 | - name: PGID
35 | value: "1000"
36 | - name: TZ
37 | value: "${CLUSTER_TIME_ZONE}"
38 | ports:
39 | - name: http
40 | containerPort: 8686
41 | protocol: TCP
42 |
--------------------------------------------------------------------------------
/apps/generic/overlays/lidarr/patches/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: lidarr
5 | annotations:
6 | forecastle.stakater.com/appName: "Music"
7 | forecastle.stakater.com/group: "Media"
8 | forecastle.stakater.com/expose: "true"
9 | forecastle.stakater.com/icon: "https://music.${CLUSTER_DOMAIN_NAME}/Content/Images/logo.svg"
10 |
--------------------------------------------------------------------------------
/apps/generic/overlays/lidarr/patches/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: lidarr
5 | spec:
6 | ports:
7 | - name: http
8 | port: 8686
9 | protocol: TCP
10 | targetPort: http
11 |
--------------------------------------------------------------------------------
/apps/generic/overlays/qdrant/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: default
5 | resources:
6 | - ../../base/
7 | labels:
8 | - pairs:
9 | app.kubernetes.io/name: qdrant
10 | includeSelectors: true
11 | patches:
12 | - path: patches/deployment.yaml
13 | target:
14 | group: apps
15 | version: v1
16 | kind: Deployment
17 | name: deployment
18 | namespace: default
19 | options:
20 | allowNameChange: true
21 | - path: patches/ingress.yaml
22 | target:
23 | group: networking.k8s.io
24 | version: v1
25 | kind: Ingress
26 | name: ingress
27 | namespace: default
28 | options:
29 | allowNameChange: true
30 | - path: patches/service.yaml
31 | target:
32 | group: ""
33 | version: v1
34 | kind: Service
35 | name: service
36 | namespace: default
37 | options:
38 | allowNameChange: true
39 | - patch: |-
40 | - op: replace
41 | path: /spec/rules/0/http/paths/0/backend/service/name
42 | value: qdrant
43 | - op: replace
44 | path: /spec/rules/0/host
45 | value: qdrant.${CLUSTER_DOMAIN_NAME}
46 | - op: replace
47 | path: /spec/tls/0/hosts/0
48 | value: qdrant.${CLUSTER_DOMAIN_NAME}
49 | target:
50 | group: networking.k8s.io
51 | version: v1
52 | kind: Ingress
53 | name: qdrant
54 |
--------------------------------------------------------------------------------
/apps/generic/overlays/qdrant/patches/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: qdrant
5 | spec:
6 | template:
7 | spec:
8 | containers:
9 | - name: container
10 | image: ghcr.io/qdrant/qdrant/qdrant:v1.14.1
11 | imagePullPolicy: IfNotPresent
12 | volumeMounts:
13 | - name: yasr-volume
14 | mountPath: /qdrant/config/storage
15 | subPath: configs/qdrant/
16 | livenessProbe:
17 | httpGet:
18 | scheme: HTTP
19 | path: /
20 | port: http
21 | initialDelaySeconds: 30
22 | timeoutSeconds: 10
23 | ports:
24 | - name: http
25 | containerPort: 6333
26 | protocol: TCP
27 |
--------------------------------------------------------------------------------
/apps/generic/overlays/qdrant/patches/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: qdrant
5 | annotations:
6 | forecastle.stakater.com/appName: "Qdrant"
7 | forecastle.stakater.com/group: "AI"
8 | forecastle.stakater.com/expose: "true"
9 | forecastle.stakater.com/icon: "https://qdrant.tech/images/logo.svg"
10 |
--------------------------------------------------------------------------------
/apps/generic/overlays/qdrant/patches/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: qdrant
5 | spec:
6 | ports:
7 | - name: http
8 | port: 6333
9 | protocol: TCP
10 | targetPort: http
11 |
--------------------------------------------------------------------------------
/apps/generic/overlays/rdiffweb/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: default
5 | resources:
6 | - ../../base/
7 |
8 | labels:
9 | - pairs:
10 | app.kubernetes.io/name: rdiffweb
11 | includeSelectors: true
12 |
13 | patches:
14 | - path: patches/deployment.yaml
15 | target:
16 | group: apps
17 | version: v1
18 | kind: Deployment
19 | name: deployment
20 | namespace: default
21 | options:
22 | allowNameChange: true
23 | - path: patches/ingress.yaml
24 | target:
25 | group: networking.k8s.io
26 | version: v1
27 | kind: Ingress
28 | name: ingress
29 | namespace: default
30 | options:
31 | allowNameChange: true
32 | - path: patches/service.yaml
33 | target:
34 | group: ""
35 | version: v1
36 | kind: Service
37 | name: service
38 | namespace: default
39 | options:
40 | allowNameChange: true
41 | - patch: |-
42 | - op: replace
43 | path: /spec/rules/0/http/paths/0/backend/service/name
44 | value: rdiffweb
45 | - op: replace
46 | path: /spec/rules/0/host
47 | value: backups.${CLUSTER_DOMAIN_NAME}
48 | - op: replace
49 | path: /spec/tls/0/hosts/0
50 | value: backups.${CLUSTER_DOMAIN_NAME}
51 | target:
52 | group: networking.k8s.io
53 | version: v1
54 | kind: Ingress
55 | name: rdiffweb
56 |
--------------------------------------------------------------------------------
/apps/generic/overlays/rdiffweb/patches/deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: rdiffweb
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: container
11 | image: docker.io/ikus060/rdiffweb:latest
12 | imagePullPolicy: Always
13 | env:
14 | - name: RDIFFWEB_ADMIN_USER
15 | value: gandazgul
16 | - name: RDIFFWEB_ADMIN_PASSWORD
17 | value: ${ADMIN_PASSWORD}
18 | volumeMounts:
19 | - name: yasr-volume
20 | mountPath: /etc/rdiffweb/
21 | subPath: configs/rdiffweb
22 | - name: backup-volume
23 | mountPath: /backups
24 | ports:
25 | - name: http
26 | containerPort: 8080
27 | protocol: TCP
28 |
--------------------------------------------------------------------------------
/apps/generic/overlays/rdiffweb/patches/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: rdiffweb
5 | annotations:
6 | forecastle.stakater.com/appName: "Backups (rdiffweb)"
7 | forecastle.stakater.com/expose: "true"
8 | forecastle.stakater.com/icon: "https://rdiffweb.org/web/image/website/2/logo/Rdiffweb?unique=1d91d70"
9 |
--------------------------------------------------------------------------------
/apps/generic/overlays/rdiffweb/patches/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: rdiffweb
5 | spec:
6 | ports:
7 | - name: http
8 | port: 8080
9 | protocol: TCP
10 | targetPort: http
11 |
--------------------------------------------------------------------------------
/apps/generic/overlays/transmission/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: default
5 |
6 | resources:
7 | - ../../base/
8 |
9 | labels:
10 | - pairs:
11 | app.kubernetes.io/name: transmission
12 | includeSelectors: true
13 |
14 | patches:
15 | - path: patches/deployment.yaml
16 | target:
17 | group: apps
18 | version: v1
19 | kind: Deployment
20 | name: deployment
21 | namespace: default
22 | options:
23 | allowNameChange: true
24 | - path: patches/ingress.yaml
25 | target:
26 | group: networking.k8s.io
27 | version: v1
28 | kind: Ingress
29 | name: ingress
30 | namespace: default
31 | options:
32 | allowNameChange: true
33 | - path: patches/service.yaml
34 | target:
35 | group: ""
36 | version: v1
37 | kind: Service
38 | name: service
39 | namespace: default
40 | options:
41 | allowNameChange: true
42 | - patch: |-
43 | - op: replace
44 | path: /spec/rules/0/http/paths/0/backend/service/name
45 | value: transmission
46 | - op: replace
47 | path: /spec/rules/0/host
48 | value: transmission.${CLUSTER_DOMAIN_NAME}
49 | - op: replace
50 | path: /spec/tls/0/hosts/0
51 | value: transmission.${CLUSTER_DOMAIN_NAME}
52 | target:
53 | group: networking.k8s.io
54 | version: v1
55 | kind: Ingress
56 | name: transmission
57 |
--------------------------------------------------------------------------------
/apps/generic/overlays/transmission/patches/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: transmission
5 | annotations:
6 | nginx.ingress.kubernetes.io/app-root: /transmission/web/
7 | forecastle.stakater.com/appName: "Transmission"
8 | forecastle.stakater.com/group: "Media"
9 | forecastle.stakater.com/expose: "true"
10 | forecastle.stakater.com/icon: "https://transmissionbt.com/assets/images/Transmission_icon.png"
11 |
--------------------------------------------------------------------------------
/apps/generic/overlays/transmission/patches/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: transmission
5 | spec:
6 | ports:
7 | - name: http
8 | port: 9091
9 | protocol: TCP
10 | targetPort: http
11 |
--------------------------------------------------------------------------------
/apps/mealie/base/deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: mealie
6 | namespace: default
7 | labels:
8 | app.kubernetes.io/name: mealie
9 | spec:
10 | replicas: 1
11 | revisionHistoryLimit: 2
12 | selector:
13 | matchLabels:
14 | app.kubernetes.io/name: mealie
15 | template:
16 | metadata:
17 | labels:
18 | app.kubernetes.io/name: mealie
19 | spec:
20 | containers:
21 | - name: mealie
22 | image: ghcr.io/mealie-recipes/mealie:latest
23 | imagePullPolicy: Always
24 | env:
25 | - name: PUID
26 | value: "1000"
27 | - name: PGID
28 | value: "1000"
29 | - name: TZ
30 | value: ${CLUSTER_TIME_ZONE}
31 | - name: ALLOW_SIGNUP
32 | value: "false"
33 | - name: BASE_URL
34 | value: https://recipes.${CLUSTER_DOMAIN_NAME}
35 | - name: DEFAULT_EMAIL
36 | value: ${EMAIL}
37 | - name: SMTP_HOST
38 | value: smtp.gmail.com
39 | - name: SMTP_PORT
40 | value: "587"
41 | - name: SMTP_FROM_EMAIL
42 | value: mealie@${CLUSTER_DOMAIN_NAME}
43 | - name: SMTP_USER
44 | value: ${EMAIL}
45 | - name: SMTP_PASSWORD
46 | value: ${SMTP_PASSWORD}
47 | ports:
48 | - name: http
49 | containerPort: 9000
50 | protocol: TCP
51 | livenessProbe:
52 | httpGet:
53 | scheme: HTTP
54 | path: /
55 | port: 9000
56 | initialDelaySeconds: 30
57 | timeoutSeconds: 10
58 | resources:
59 | requests:
60 | cpu: 100m
61 | memory: 250Mi
62 | limits:
63 | cpu: 500m
64 | memory: 500Mi
65 | volumeMounts:
66 | - name: tz-config
67 | mountPath: /etc/localtime
68 | readOnly: true
69 | - name: yasr-volume
70 | mountPath: /app/data
71 | subPath: configs/mealie
72 | volumes:
73 | - name: yasr-volume
74 | persistentVolumeClaim:
75 | claimName: yasr-volume
76 | - name: tz-config
77 | hostPath:
78 | path: /etc/localtime
79 |
--------------------------------------------------------------------------------
/apps/mealie/base/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: mealie
5 | namespace: default
6 | labels:
7 | app.kubernetes.io/name: mealie
8 | annotations:
9 | forecastle.stakater.com/appName: "Mealie"
10 | forecastle.stakater.com/expose: "true"
11 | # forecastle.stakater.com/group: "Media"
12 | forecastle.stakater.com/icon: "https://raw.githubusercontent.com/hay-kot/mealie/mealie-next/frontend/static/icon.png"
13 | spec:
14 | ingressClassName: nginx
15 | rules:
16 | - host: recipes.${CLUSTER_DOMAIN_NAME}
17 | http:
18 | paths:
19 | - path: /
20 | pathType: ImplementationSpecific
21 | backend:
22 | service:
23 | name: mealie
24 | port:
25 | name: http
26 | tls:
27 | - hosts:
28 | - recipes.${CLUSTER_DOMAIN_NAME}
29 | secretName: internal-ingress-cert
30 |
--------------------------------------------------------------------------------
/apps/mealie/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: default
4 | resources:
5 | - deployment.yaml
6 | - service.yaml
7 | - ingress.yaml
8 |
--------------------------------------------------------------------------------
/apps/mealie/base/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mealie
5 | namespace: default
6 | labels:
7 | app.kubernetes.io/name: mealie
8 | spec:
9 | type: ClusterIP
10 | selector:
11 | app.kubernetes.io/name: mealie
12 | ports:
13 | - name: http
14 | protocol: TCP
15 | port: 9000
16 | targetPort: http
17 |
--------------------------------------------------------------------------------
/apps/open-webui/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: default
4 | resources:
5 | #- ollama-service.yaml
6 | #- ollama-statefulset.yaml
7 | - webui-deployment.yaml
8 | - webui-service.yaml
9 | - webui-ingress.yaml
10 |
--------------------------------------------------------------------------------
/apps/open-webui/base/ollama-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: ollama-service
5 | namespace: default
6 | spec:
7 | selector:
8 | app: ollama
9 | ports:
10 | - protocol: TCP
11 | port: 11434
12 | targetPort: 11434
13 |
--------------------------------------------------------------------------------
/apps/open-webui/base/ollama-statefulset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: ollama
5 | namespace: default
6 | spec:
7 | serviceName: "ollama"
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: ollama
12 | template:
13 | metadata:
14 | labels:
15 | app: ollama
16 | spec:
17 | containers:
18 | - name: ollama
19 | image: ollama/ollama:latest
20 | ports:
21 | - containerPort: 11434
22 | resources:
23 | requests:
24 | cpu: "2000m"
25 | memory: "2Gi"
26 | limits:
27 | cpu: "4000m"
28 | memory: "4Gi"
29 | nvidia.com/gpu: "0"
30 | volumeMounts:
31 | - name: ollama-volume
32 | mountPath: /root/.ollama
33 | tty: true
34 | volumeClaimTemplates:
35 | - metadata:
36 | name: ollama-volume
37 | spec:
38 | accessModes: [ "ReadWriteOnce" ]
39 | resources:
40 | requests:
41 | storage: 30Gi
42 |
--------------------------------------------------------------------------------
/apps/open-webui/base/webui-deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: open-webui
6 | namespace: default
7 | labels:
8 | app.kubernetes.io/name: open-webui
9 | spec:
10 | replicas: 1
11 | revisionHistoryLimit: 2
12 | selector:
13 | matchLabels:
14 | app.kubernetes.io/name: open-webui
15 | template:
16 | metadata:
17 | labels:
18 | app.kubernetes.io/name: open-webui
19 | spec:
20 | containers:
21 | - name: open-webui
22 | image: ghcr.io/open-webui/open-webui:main
23 | imagePullPolicy: Always
24 | env:
25 | - name: OLLAMA_BASE_URL
26 | value: "http://ollama.default.svc.cluster.local:11434"
27 | - name: RAG_EMBEDDING_MODEL_AUTO_UPDATE
28 | value: "true"
29 | - name: ENABLE_SIGNUP
30 | value: "false"
31 | ports:
32 | - name: http
33 | containerPort: 8080
34 | protocol: TCP
35 | livenessProbe:
36 | httpGet:
37 | scheme: HTTP
38 | path: /
39 | port: 8080
40 | initialDelaySeconds: 30
41 | timeoutSeconds: 10
42 | resources:
43 | requests:
44 | cpu: 100m
45 | memory: 250Mi
46 | limits:
47 | cpu: 1000m
48 | memory: 2000Mi
49 | volumeMounts:
50 | - name: tz-config
51 | mountPath: /etc/localtime
52 | readOnly: true
53 | - name: yasr-volume
54 | mountPath: /app/backend/data
55 | subPath: configs/open-webui/
56 | volumes:
57 | - name: yasr-volume
58 | persistentVolumeClaim:
59 | claimName: yasr-volume
60 | - name: main-volume
61 | persistentVolumeClaim:
62 | claimName: main-volume
63 | - name: tz-config
64 | hostPath:
65 | path: /etc/localtime
66 |
--------------------------------------------------------------------------------
/apps/open-webui/base/webui-ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: open-webui
5 | namespace: default
6 | labels:
7 | app.kubernetes.io/name: open-webui
8 | annotations:
9 | forecastle.stakater.com/appName: "Open WebUI AI"
10 | forecastle.stakater.com/expose: "true"
11 | spec:
12 | ingressClassName: nginx
13 | rules:
14 | - host: webai.${CLUSTER_DOMAIN_NAME}
15 | http:
16 | paths:
17 | - path: /
18 | pathType: ImplementationSpecific
19 | backend:
20 | service:
21 | name: open-webui
22 | port:
23 | name: http
24 | tls:
25 | - hosts:
26 | - webai.${CLUSTER_DOMAIN_NAME}
27 | secretName: internal-ingress-cert
28 |
--------------------------------------------------------------------------------
/apps/open-webui/base/webui-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: open-webui
5 | namespace: default
6 | labels:
7 | app.kubernetes.io/name: open-webui
8 | spec:
9 | type: ClusterIP
10 | selector:
11 | app.kubernetes.io/name: open-webui
12 | ports:
13 | - name: http
14 | protocol: TCP
15 | port: 8080
16 | targetPort: http
17 |
--------------------------------------------------------------------------------
/apps/open-webui/gandazgul/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: default
4 | resources:
5 | #- ../base/ollama-service.yaml
6 | #- ../base/ollama-statefulset.yaml
7 | - ../base/webui-deployment.yaml
8 | - ../base/webui-service.yaml
9 | - ../base/webui-ingress.yaml
10 |
11 | patchesStrategicMerge:
12 | - webui-deployment-patch.yaml
13 |
--------------------------------------------------------------------------------
/apps/open-webui/gandazgul/webui-deployment-patch.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: open-webui
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: open-webui
11 | volumeMounts:
12 | - name: main-volume
13 | mountPath: /app/backend/data/docs
14 | subPath: "btsync/Carlos' Documents/"
15 |
--------------------------------------------------------------------------------
/apps/open-webui/rafag/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: default
4 | resources:
5 | #- ../base/ollama-service.yaml
6 | #- ../base/ollama-statefulset.yaml
7 | - ../base/webui-deployment.yaml
8 | - ../base/webui-service.yaml
9 | - ../base/webui-ingress.yaml
10 |
11 | patchesStrategicMerge:
12 | - webui-deployment-patch.yaml
13 |
--------------------------------------------------------------------------------
/apps/open-webui/rafag/webui-deployment-patch.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: open-webui
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: open-webui
11 | volumeMounts:
12 | - name: main-volume
13 | mountPath: /app/backend/data/docs
14 | subPath: "rafag"
15 |
--------------------------------------------------------------------------------
/apps/tdarr/base/ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: tdarr
6 | namespace: default
7 | labels:
8 | app.kubernetes.io/name: tdarr
9 | annotations:
10 | forecastle.stakater.com/appName: "Tdarr"
11 | forecastle.stakater.com/group: "Media"
12 | forecastle.stakater.com/expose: "true"
13 | forecastle.stakater.com/icon: "https://home.tdarr.io/static/media/logo3-min.246d6df44c7f16ddebaf.png"
14 | spec:
15 | ingressClassName: nginx
16 | tls:
17 | - hosts:
18 | - encoder.${CLUSTER_DOMAIN_NAME}
19 | secretName: internal-ingress-cert
20 | rules:
21 | - host: encoder.${CLUSTER_DOMAIN_NAME}
22 | http:
23 | paths:
24 | - path: /
25 | pathType: Prefix
26 | backend:
27 | service:
28 | name: tdarr
29 | port:
30 | name: http
31 |
--------------------------------------------------------------------------------
/apps/tdarr/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: default
5 | resources:
6 | - deployment.yaml
7 | - service.yaml
8 | - ingress.yaml
9 |
--------------------------------------------------------------------------------
/apps/tdarr/base/service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: tdarr
6 | labels:
7 | app.kubernetes.io/name: tdarr
8 | spec:
9 | type: ClusterIP
10 | ports:
11 | - name: http
12 | port: 8265
13 | protocol: TCP
14 | targetPort: http
15 | - name: server
16 | port: 8266
17 | protocol: TCP
18 | targetPort: server
19 | selector:
20 | app.kubernetes.io/name: tdarr
21 |
--------------------------------------------------------------------------------
/charts/hostpath-provisioner/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *~
18 | # Various IDEs
19 | .project
20 | .idea/
21 | *.tmproj
22 |
--------------------------------------------------------------------------------
/charts/hostpath-provisioner/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | description: A chart to install a storage provisioner for single node installs.
3 | name: hostpath-provisioner
4 | version: 0.2.2
5 | maintainers:
6 | - name: gandazgul
7 | email: ravelo.carlos@gmail.com
8 |
--------------------------------------------------------------------------------
/charts/hostpath-provisioner/README.md:
--------------------------------------------------------------------------------
1 | # hostpath-provisioner
2 |
3 | 
4 |
5 | A chart to install a storage provisioner for single node installs.
6 |
7 | ## Maintainers
8 |
9 | | Name | Email | Url |
10 | | ---- | ------ | --- |
11 | | gandazgul | | |
12 |
13 | ## Values
14 |
15 | | Key | Type | Default | Description |
16 | |-----|------|---------|-------------|
17 | | affinity | object | `{}` | |
18 | | filesystemPath | string | `"/var/kubernetes"` | |
19 | | image.pullPolicy | string | `"Always"` | |
20 | | image.repository | string | `"quay.io/kubevirt/hostpath-provisioner"` | |
21 | | image.tag | string | `"latest"` | |
22 | | nodeSelector | object | `{}` | |
23 | | pvReclaimPolicy | string | `"Retain"` | |
24 | | replicaCount | int | `1` | |
25 | | resources | object | `{}` | |
26 | | tolerations | list | `[]` | |
27 |
28 |
--------------------------------------------------------------------------------
/charts/hostpath-provisioner/templates/DaemonSet.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: DaemonSet
4 | metadata:
5 | name: hostpath-provisioner
6 | labels:
7 | k8s-app: hostpath-provisioner
8 | namespace: kube-system
9 | spec:
10 | selector:
11 | matchLabels:
12 | k8s-app: hostpath-provisioner
13 | template:
14 | metadata:
15 | labels:
16 | k8s-app: hostpath-provisioner
17 | spec:
18 | serviceAccountName: hostpath-provisioner
19 | containers:
20 | - name: hostpath-provisioner
21 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
22 | imagePullPolicy: {{ .Values.image.pullPolicy }}
23 | env:
24 | # change to true, to have the name of the pvc be part of the directory
25 | - name: USE_NAMING_PREFIX
26 | value: "true"
27 | - name: NODE_NAME
28 | valueFrom:
29 | fieldRef:
30 | fieldPath: spec.nodeName
31 | - name: PV_DIR
32 | value: {{ .Values.filesystemPath }}
33 | volumeMounts:
34 | - name: pv-volume # root dir where your bind mounts will be on the node
35 | mountPath: {{ .Values.filesystemPath }}
36 | #nodeSelector:
37 | #- name: xxxxxx
38 | volumes:
39 | - name: pv-volume
40 | hostPath:
41 | path: {{ .Values.filesystemPath }}
42 |
--------------------------------------------------------------------------------
/charts/hostpath-provisioner/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | The HostPath Provisioner should be running now
2 |
--------------------------------------------------------------------------------
/charts/hostpath-provisioner/templates/StorageClass.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: StorageClass
3 | apiVersion: storage.k8s.io/v1
4 | metadata:
5 | name: hostpath
6 | annotations:
7 | storageclass.kubernetes.io/is-default-class: "true"
8 | provisioner: kubevirt.io/hostpath-provisioner
9 | reclaimPolicy: {{ .Values.pvReclaimPolicy }}
10 | volumeBindingMode: WaitForFirstConsumer
11 |
--------------------------------------------------------------------------------
/charts/hostpath-provisioner/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Expand the name of the chart.
4 | */}}
5 | {{- define "chartInfo.name" -}}
6 | {{- .Chart.Name | trunc 63 | trimSuffix "-" -}}
7 | {{- end -}}
8 |
9 | {{- define "chartInfo.fullname" -}}
10 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
11 | {{- end -}}
12 |
13 | {{/*
14 | Create chart name and version as used by the chart label.
15 | */}}
16 | {{- define "chartInfo.chart" -}}
17 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
18 | {{- end -}}
19 |
--------------------------------------------------------------------------------
/charts/hostpath-provisioner/templates/rbac.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: hostpath-provisioner
6 | namespace: kube-system
7 | ---
8 | apiVersion: rbac.authorization.k8s.io/v1
9 | kind: ClusterRole
10 | metadata:
11 | name: hostpath-provisioner
12 | rules:
13 | - apiGroups: [""]
14 | resources: ["nodes"]
15 | verbs: ["get"]
16 | - apiGroups: [""]
17 | resources: ["persistentvolumes"]
18 | verbs: ["get", "list", "watch", "create", "delete"]
19 | - apiGroups: [""]
20 | resources: ["persistentvolumeclaims"]
21 | verbs: ["get", "list", "watch", "update"]
22 | - apiGroups: ["storage.k8s.io"]
23 | resources: ["storageclasses"]
24 | verbs: ["get", "list", "watch"]
25 | - apiGroups: [""]
26 | resources: ["events"]
27 | verbs: ["list", "watch", "create", "update", "patch"]
28 | ---
29 | apiVersion: rbac.authorization.k8s.io/v1
30 | kind: ClusterRoleBinding
31 | metadata:
32 | name: hostpath-provisioner
33 | roleRef:
34 | apiGroup: rbac.authorization.k8s.io
35 | kind: ClusterRole
36 | name: hostpath-provisioner
37 | subjects:
38 | - kind: ServiceAccount
39 | name: hostpath-provisioner
40 | namespace: kube-system
41 |
--------------------------------------------------------------------------------
/charts/hostpath-provisioner/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values
2 |
3 | replicaCount: 1
4 |
5 | image:
6 | repository: quay.io/kubevirt/hostpath-provisioner
7 | tag: latest
8 | pullPolicy: Always
9 |
10 | filesystemPath: /var/kubernetes
11 | pvReclaimPolicy: Retain
12 |
13 | resources: {}
14 | # We usually recommend not to specify default resources and to leave this as a conscious
15 | # choice for the user. This also increases chances charts run on environments with little
16 | # resources, such as Minikube. If you do want to specify resources, uncomment the following
17 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
18 | # limits:
19 | # cpu: 100m
20 | # memory: 128Mi
21 | # requests:
22 | # cpu: 100m
23 | # memory: 128Mi
24 |
25 | nodeSelector: {}
26 |
27 | tolerations: []
28 |
29 | affinity: {}
30 |
--------------------------------------------------------------------------------
/charts/mosca/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/charts/mosca/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: mosca
3 | description: A Helm chart for the Mosca.io MQTT Broker
4 | type: application
5 | version: 0.2.5
6 | appVersion: "2.8.3"
7 | maintainers:
8 | - name: gandazgul
9 | email: ravelo.carlos@gmail.com
10 |
--------------------------------------------------------------------------------
/charts/mosca/README.md:
--------------------------------------------------------------------------------
1 | # mosca
2 |
3 |   
4 |
5 | A Helm chart for the Mosca.io MQTT Broker
6 |
7 | ## Maintainers
8 |
9 | | Name | Email | Url |
10 | | ---- | ------ | --- |
11 | | gandazgul | | |
12 |
13 | ## Values
14 |
15 | | Key | Type | Default | Description |
16 | |-----|------|---------|-------------|
17 | | affinity | object | `{}` | |
18 | | fullnameOverride | string | `""` | |
19 | | image.pullPolicy | string | `"Always"` | |
20 | | image.repository | string | `"matteocollina/mosca"` | |
21 | | image.tag | string | `"latest"` | |
22 | | imagePullSecrets | list | `[]` | |
23 | | ingress.annotations | object | `{}` | |
24 | | ingress.enabled | bool | `false` | |
25 | | ingress.hosts[0].host | string | `"chart-example.local"` | |
26 | | ingress.hosts[0].paths | list | `[]` | |
27 | | ingress.tls | list | `[]` | |
28 | | nameOverride | string | `""` | |
29 | | nodeSelector | object | `{}` | |
30 | | podAnnotations | object | `{}` | |
31 | | podSecurityContext | object | `{}` | |
32 | | replicaCount | int | `1` | |
33 | | resources | object | `{}` | |
34 | | securityContext | object | `{}` | |
35 | | service.port | int | `1883` | |
36 | | service.type | string | `"ClusterIP"` | |
37 | | tolerations | list | `[]` | |
38 | | volumeMounts | list | `[]` | |
39 | | volumes | list | `[]` | |
40 |
41 |
--------------------------------------------------------------------------------
/charts/mosca/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | 1. Get the application URL by running these commands:
2 | {{- if .Values.ingress.enabled }}
3 | {{- range $host := .Values.ingress.hosts }}
4 | {{- range .paths }}
5 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
6 | {{- end }}
7 | {{- end }}
8 | {{- else if contains "NodePort" .Values.service.type }}
9 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "helpers.fullname" . }})
10 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
11 | echo http://$NODE_IP:$NODE_PORT
12 | {{- else if contains "LoadBalancer" .Values.service.type }}
13 | NOTE: It may take a few minutes for the LoadBalancer IP to be available.
14 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "helpers.fullname" . }}'
15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "helpers.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
16 | echo http://$SERVICE_IP:{{ .Values.service.port }}
17 | {{- else if contains "ClusterIP" .Values.service.type }}
18 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "helpers.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
19 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
20 | echo "Visit http://127.0.0.1:8080 to use your application"
21 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
22 | {{- end }}
23 |
--------------------------------------------------------------------------------
/charts/mosca/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/* vim: set filetype=mustache: */}}
2 | {{/*
3 | Expand the name of the chart.
4 | */}}
5 | {{- define "helpers.name" -}}
6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
7 | {{- end -}}
8 |
9 | {{/*
10 | Create a default fully qualified app name.
11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
12 | If release name contains chart name it will be used as a full name.
13 | */}}
14 | {{- define "helpers.fullname" -}}
15 | {{- if .Values.fullnameOverride -}}
16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
17 | {{- else -}}
18 | {{- $name := default .Chart.Name .Values.nameOverride -}}
19 | {{- if contains $name .Release.Name -}}
20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}}
21 | {{- else -}}
22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
23 | {{- end -}}
24 | {{- end -}}
25 | {{- end -}}
26 |
27 | {{/*
28 | Create chart name and version as used by the chart label.
29 | */}}
30 | {{- define "helpers.chart" -}}
31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
32 | {{- end }}
33 |
34 | {{/*
35 | Common labels
36 | */}}
37 | {{- define "helpers.labels" -}}
38 | helm.sh/chart: {{ include "helpers.chart" . }}
39 | {{ include "helpers.selectorLabels" . }}
40 | {{- if .Chart.AppVersion }}
41 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
42 | {{- end }}
43 | app.kubernetes.io/managed-by: {{ .Release.Service }}
44 | {{- end }}
45 |
46 | {{/*
47 | Selector labels
48 | */}}
49 | {{- define "helpers.selectorLabels" -}}
50 | app.kubernetes.io/name: {{ include "helpers.name" . }}
51 | app.kubernetes.io/instance: {{ .Release.Name }}
52 | {{- end }}
53 |
--------------------------------------------------------------------------------
/charts/mosca/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.ingress.enabled -}}
2 | {{- $fullName := include "helpers.fullname" . -}}
3 | {{- $serviceName := include "helpers.fullname" . -}}
4 | {{- $svcPort := .Values.service.port -}}
5 | apiVersion: networking.k8s.io/v1
6 | kind: Ingress
7 | metadata:
8 | name: {{ $fullName }}
9 | labels: {{- include "helpers.labels" . | nindent 4 }}
10 | {{- with .Values.ingress.annotations }}
11 | annotations: {{- toYaml . | nindent 4 }}
12 | {{- end }}
13 | spec:
14 | {{- if .Values.ingress.tls }}
15 | tls:
16 | {{- range .Values.ingress.tls }}
17 | - hosts:
18 | {{- range .hosts }}
19 | - {{ . | quote }}
20 | {{- end }}
21 | secretName: {{ .secretName }}
22 | {{- end }}
23 | {{- end }}
24 | rules:
25 | {{- range .Values.ingress.hosts }}
26 | - host: {{ .host | quote }}
27 | http:
28 | paths:
29 | {{- range .paths }}
30 | - path: {{ .path }}
31 | pathType: Prefix
32 | backend:
33 | service:
34 | name: {{ $serviceName }}
35 | port:
36 | number: {{ $svcPort }}
37 | {{- end }}
38 | {{- end }}
39 | {{- end }}
40 |
--------------------------------------------------------------------------------
/charts/mosca/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "helpers.fullname" . }}
5 | labels: {{- include "helpers.labels" . | nindent 4 }}
6 | spec:
7 | type: {{ .Values.service.type }}
8 | ports:
9 | - name: http
10 | port: {{ .Values.service.port }}
11 | {{- if contains "NodePort" .Values.service.type }}
12 | nodePort: {{ .Values.service.port }}
13 | {{- end }}
14 | targetPort: http
15 | protocol: TCP
16 | selector: {{- include "helpers.selectorLabels" . | nindent 4 }}
17 |
--------------------------------------------------------------------------------
/charts/mosca/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values
2 |
3 | replicaCount: 1
4 |
5 | image:
6 | repository: matteocollina/mosca
7 | # Overrides the image tag whose default is the chart appVersion.
8 | tag: latest
9 | pullPolicy: Always
10 |
11 | imagePullSecrets: []
12 | nameOverride: ""
13 | fullnameOverride: ""
14 |
15 | podAnnotations: {}
16 |
17 | podSecurityContext: {}
18 | # fsGroup: 2000
19 |
20 | securityContext: {}
21 | # capabilities:
22 | # drop:
23 | # - ALL
24 | # readOnlyRootFilesystem: true
25 | # runAsNonRoot: true
26 | # runAsUser: 1000
27 |
28 | service:
29 | type: ClusterIP
30 | port: 1883
31 |
32 | ingress:
33 | enabled: false
34 | annotations: {}
35 | # kubernetes.io/ingress.class: nginx
36 | # kubernetes.io/tls-acme: "true"
37 | hosts:
38 | - host: chart-example.local
39 | paths: []
40 | tls: []
41 | # - secretName: chart-example-tls
42 | # hosts:
43 | # - chart-example.local
44 |
45 | volumes: []
46 | volumeMounts: []
47 |
48 | resources: {}
49 | # We usually recommend not to specify default resources and to leave this as a conscious
50 | # choice for the user. This also increases chances charts run on environments with little
51 | # resources, such as Minikube. If you do want to specify resources, uncomment the following
52 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
53 | # limits:
54 | # cpu: 100m
55 | # memory: 128Mi
56 | # requests:
57 | # cpu: 100m
58 | # memory: 128Mi
59 |
60 | nodeSelector: {}
61 |
62 | tolerations: []
63 |
64 | affinity: {}
65 |
--------------------------------------------------------------------------------
/clusters/gandazgul/ClusterKustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: gandazgul
6 | namespace: kube-system
7 | spec:
8 | dependsOn:
9 | - name: sealed-secret
10 | - name: kube-system
11 | - name: main-volume
12 | - name: backup-volume
13 | - name: yasr-volume
14 | interval: 10m0s
15 | path: ./clusters/gandazgul/
16 | prune: true
17 | sourceRef:
18 | kind: GitRepository
19 | name: k8s-infrastructure
20 | namespace: kube-system
21 | postBuild:
22 | # constants can be specified like this:
23 | substitute:
24 | PHOTOS_PATH: gandazgul/Pictures
25 | PHOTOS_UID: "1000"
26 | PHOTOS_GID: "1000"
27 | BITWARDEN_SUBDOMAIN: "pass"
28 | HASS_SUBDOMAIN: "ha"
29 | CLUSTER_NAME: "gandazgul"
30 | substituteFrom:
31 | # substitutions can also come from a config map or secret
32 | # - kind: ConfigMap
33 | # name: cluster-vars
34 | - kind: Secret
35 | name: secrets
36 |
37 | # use ${var:=default} or ${var} in the files to place variables
38 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | - ./secrets/
6 | - ./RsyncCronJobs.yaml
7 | - ../../../apps/ActualBudget.yaml
8 | - ../../../apps/Bitwarden.yaml
9 | - ../../../apps/Forecastle.yaml
10 | - ../../../apps/Headlamp.yaml
11 | - ../../../apps/HomeAssistant.yaml
12 | - ../../../apps/MosquitoMQTTBroker.yaml
13 | - ../../../apps/Paperless.yaml
14 | - ../../../apps/Resilio.yaml
15 | - ../../../apps/SFTPGo.yaml
16 | - ../../../apps/Sharry.yaml
17 | - ../../../apps/ZWaveJS.yaml
18 | - ../../../apps/Mealie.yaml
19 | - ../../../apps/AudioBookShelf.yaml
20 | - ../../../apps/Transmission.yaml
21 | - ../../../apps/Lidarr.yaml
22 | - ../../../apps/Overseerr.yaml
23 | - ../../../apps/Prowlarr.yaml
24 | - ../../../apps/Radarr.yaml
25 | - ../../../apps/Sonarr.yaml
26 | - ../../../apps/Tdarr.yaml
27 | - ../../../apps/Plex.yaml
28 | - ../../../apps/RDiffWeb.yaml
29 | - ../../../apps/Harbor.yaml
30 | - ../../../apps/Jellyfin.yaml
31 | - ../../../apps/Qdrant.yaml
32 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/secrets/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | - paperless-values.yaml
6 | - plex-values.yaml
7 | - radarr-values.yaml
8 | - resilio-values.yaml
9 | - sonarr-values.yaml
10 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/secrets/paperless-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgbWFpbjoKICAgIGVuYWJsZWQ6IHRydWUKICAgIGV4aXN0aW5nQ2xhaW06IG1haW4tdm9sdW1lCiAgICBtb3VudFBhdGg6IC91c3Ivc3JjL3BhcGVybGVzcy9jb25zdW1lCiAgICBzdWJQYXRoOiAiYnRzeW5jL0NhcmxvcycgRG9jdW1lbnRzL1BhcGVybGVzc0NvbnN1bWVyIgo=
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: paperless-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/secrets/plex-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgZGF0YToKICAgIGVuYWJsZWQ6IHRydWUKICAgIHR5cGU6IHB2YwogICAgZXhpc3RpbmdDbGFpbTogbWFpbi12b2x1bWUKICAgIHN1YlBhdGg6CiAgICAtIHBhdGg6IHB1YmxpYwogICAgICBtb3VudFBhdGg6IC9kYXRhCiAgICAtIHBhdGg6IGdhbmRhemd1bC9QaWN0dXJlcwogICAgICBtb3VudFBhdGg6IC9waWN0dXJlcwogICAgLSBwYXRoOiByZW5lcG9yL1BJQ1RVUkVTCiAgICAgIG1vdW50UGF0aDogL3JlbmVfcGljdHVyZXMK
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: plex-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/secrets/radarr-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgbW92aWVzOgogICAgZW5hYmxlZDogdHJ1ZQogICAgdHlwZTogcHZjCiAgICBleGlzdGluZ0NsYWltOiBtYWluLXZvbHVtZQogICAgc3ViUGF0aDogcHVibGljL01vdmllcwo=
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: radarr-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/secrets/resilio-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgY29uZmlnOgogICAgZW5hYmxlZDogdHJ1ZQogICAgZXhpc3RpbmdDbGFpbTogeWFzci12b2x1bWUKICAgIG1vdW50UGF0aDogL2NvbmZpZwogICAgc3ViUGF0aDogY29uZmlncy9yZXNpbGlvCiAgc3luYzoKICAgIGVuYWJsZWQ6IHRydWUKICAgIGV4aXN0aW5nQ2xhaW06IG1haW4tdm9sdW1lCiAgICBtb3VudFBhdGg6IC9zeW5jCiAgICBzdWJQYXRoOiBidHN5bmMK
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: resilio-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/secrets/sonarr-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgdHY6CiAgICBlbmFibGVkOiB0cnVlCiAgICB0eXBlOiBwdmMKICAgIGV4aXN0aW5nQ2xhaW06IG1haW4tdm9sdW1lCiAgICBzdWJQYXRoOiBwdWJsaWMvVFYK
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: sonarr-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/values/paperless-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | main:
3 | enabled: true
4 | existingClaim: main-volume
5 | mountPath: /usr/src/paperless/consume
6 | subPath: "btsync/Carlos' Documents/PaperlessConsumer"
7 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/values/plex-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | data:
3 | enabled: true
4 | type: pvc
5 | existingClaim: main-volume
6 | subPath:
7 | - path: public
8 | mountPath: /data
9 | - path: gandazgul/Pictures
10 | mountPath: /pictures
11 | - path: renepor/PICTURES
12 | mountPath: /rene_pictures
13 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/values/radarr-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | movies:
3 | enabled: true
4 | type: pvc
5 | existingClaim: main-volume
6 | subPath: public/Movies
7 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/values/resilio-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | config:
3 | enabled: true
4 | existingClaim: yasr-volume
5 | mountPath: /config
6 | subPath: configs/resilio
7 | sync:
8 | enabled: true
9 | existingClaim: main-volume
10 | mountPath: /sync
11 | subPath: btsync
12 |
--------------------------------------------------------------------------------
/clusters/gandazgul/apps/values/sonarr-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | tv:
3 | enabled: true
4 | type: pvc
5 | existingClaim: main-volume
6 | subPath: public/TV
7 |
--------------------------------------------------------------------------------
/clusters/rafag/ClusterKustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: rafag
6 | namespace: kube-system
7 | spec:
8 | dependsOn:
9 | - name: sealed-secret
10 | - name: kube-system
11 | - name: main-volume
12 | - name: backup-volume
13 | - name: yasr-volume
14 | interval: 10m0s
15 | path: ./clusters/rafag/
16 | prune: true
17 | sourceRef:
18 | kind: GitRepository
19 | name: k8s-infrastructure
20 | namespace: kube-system
21 | postBuild:
22 | # constants can be specified like this:
23 | substitute:
24 | PHOTOS_PATH: rafag/photos
25 | PHOTOS_UID: "1000"
26 | PHOTOS_GID: "1000"
27 | BITWARDEN_SUBDOMAIN: "p"
28 | HASS_SUBDOMAIN: "hass"
29 | CLUSTER_NAME: "rafag"
30 | substituteFrom:
31 | # substitutions can also come from a config map or secret
32 | # - kind: ConfigMap
33 | # name: cluster-vars
34 | - kind: Secret
35 | name: secrets
36 |
37 | # use ${var:=default} or ${var} in the files to place variables
38 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | - ./secrets/
6 | - ../../../apps/Bitwarden.yaml
7 | - ../../../apps/Forecastle.yaml
8 | - ../../../apps/HomeAssistant.yaml
9 | - ../../../apps/MosquitoMQTTBroker.yaml
10 | - ../../../apps/Paperless.yaml
11 | - ../../../apps/Photoprism.yaml
12 | - ../../../apps/Resilio.yaml
13 | - ../../../apps/SFTPGo.yaml
14 | - ../../../apps/Sharry.yaml
15 | # - ../../../apps/ZWaveJS.yaml
16 | - ../../../apps/Mealie.yaml
17 | - ../../../apps/Headlamp.yaml
18 | - ../../../apps/Transmission.yaml
19 | - ../../../apps/Lidarr.yaml
20 | #- ../../../apps/Overseerr.yaml
21 | - ../../../apps/Prowlarr.yaml
22 | - ../../../apps/Radarr.yaml
23 | - ../../../apps/Sonarr.yaml
24 | - ../../../apps/Tdarr.yaml
25 | - ../../../apps/Plex.yaml
26 | # - ../../../apps/Ollama.yaml
27 | # - ../../../apps/OpenWebUI.yaml
28 | - ../../../apps/Zigbee2mqtt.yaml
29 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/secrets/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | - paperless-values.yaml
6 | - plex-values.yaml
7 | - radarr-values.yaml
8 | - resilio-values.yaml
9 | - sonarr-values.yaml
10 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/secrets/paperless-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgbWFpbjoKICAgIGVuYWJsZWQ6IHRydWUKICAgIGV4aXN0aW5nQ2xhaW06IG1haW4tdm9sdW1lCiAgICBtb3VudFBhdGg6IC91c3Ivc3JjL3BhcGVybGVzcy9jb25zdW1lCiAgICBzdWJQYXRoOiAicmFmYWcvUGFwZXJsZXNzQ29uc3VtZXIiCg==
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: paperless-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/secrets/plex-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgZGF0YToKICAgIGVuYWJsZWQ6IHRydWUKICAgIHR5cGU6IHB2YwogICAgZXhpc3RpbmdDbGFpbTogbWFpbi12b2x1bWUKICAgIHN1YlBhdGg6CiAgICAtIHBhdGg6IHB1YmxpYwogICAgICBtb3VudFBhdGg6IC9kYXRhCiAgICAtIHBhdGg6IHJhZmFnL3Bob3Rvcy9rYWxpZHJvaWQvQ2FtZXJhCiAgICAgIG1vdW50UGF0aDogL3BpY3R1cmVzCg==
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: plex-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/secrets/radarr-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgbW92aWVzOgogICAgZW5hYmxlZDogdHJ1ZQogICAgdHlwZTogcHZjCiAgICBleGlzdGluZ0NsYWltOiBtYWluLXZvbHVtZQogICAgc3ViUGF0aDogcHVibGljL2ZpbG1zCg==
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: radarr-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/secrets/resilio-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgY29uZmlnOgogICAgZW5hYmxlZDogdHJ1ZQogICAgZXhpc3RpbmdDbGFpbTogeWFzci12b2x1bWUKICAgIG1vdW50UGF0aDogL2NvbmZpZwogICAgc3ViUGF0aDogY29uZmlncy9yZXNpbGlvCiAgc3luYzoKICAgIGVuYWJsZWQ6IHRydWUKICAgIGV4aXN0aW5nQ2xhaW06IG1haW4tdm9sdW1lCiAgICBtb3VudFBhdGg6IC9zeW5jCiAgICBzdWJQYXRoOiByYWZhZwo=
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: resilio-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/secrets/sonarr-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgdHY6CiAgICBlbmFibGVkOiB0cnVlCiAgICB0eXBlOiBwdmMKICAgIGV4aXN0aW5nQ2xhaW06IG1haW4tdm9sdW1lCiAgICBzdWJQYXRoOiBwdWJsaWMvc2hvd3MK
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: sonarr-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/values/paperless-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | main:
3 | enabled: true
4 | existingClaim: main-volume
5 | mountPath: /usr/src/paperless/consume
6 | subPath: "rafag/PaperlessConsumer"
7 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/values/plex-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | data:
3 | enabled: true
4 | type: pvc
5 | existingClaim: main-volume
6 | subPath:
7 | - path: public
8 | mountPath: /data
9 | - path: rafag/photos/kalidroid/Camera
10 | mountPath: /pictures
11 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/values/radarr-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | movies:
3 | enabled: true
4 | type: pvc
5 | existingClaim: main-volume
6 | subPath: public/films
7 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/values/resilio-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | config:
3 | enabled: true
4 | existingClaim: yasr-volume
5 | mountPath: /config
6 | subPath: configs/resilio
7 | sync:
8 | enabled: true
9 | existingClaim: main-volume
10 | mountPath: /sync
11 | subPath: rafag
12 |
--------------------------------------------------------------------------------
/clusters/rafag/apps/values/sonarr-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | tv:
3 | enabled: true
4 | type: pvc
5 | existingClaim: main-volume
6 | subPath: public/shows
7 |
--------------------------------------------------------------------------------
/clusters/renepor/ClusterKustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: renepor
6 | namespace: kube-system
7 | spec:
8 | dependsOn:
9 | - name: sealed-secret
10 | - name: kube-system
11 | - name: main-volume
12 | - name: backup-volume
13 | - name: yasr-volume
14 | interval: 10m0s
15 | path: ./clusters/renepor/
16 | prune: true
17 | sourceRef:
18 | kind: GitRepository
19 | name: k8s-infrastructure
20 | namespace: kube-system
21 | postBuild:
22 | # constants can be specified like this:
23 | substitute:
24 | PHOTOS_PATH: renepor/PICTURES
25 | PHOTOS_UID: "1003"
26 | PHOTOS_GID: "1004"
27 | PLEX_TRANSCODE_SIZE: 2Gi
28 | # The time zone to apply to all containers
29 | CLUSTER_TIME_ZONE: America/New_York
30 | BITWARDEN_SUBDOMAIN: "pw"
31 | substituteFrom:
32 | # substitutions can also come from a config map or secret
33 | # - kind: ConfigMap
34 | # name: cluster-vars
35 | - kind: Secret
36 | name: secrets
37 |
38 | # use ${var:=default} or ${var} in the files to place variables
39 |
--------------------------------------------------------------------------------
/clusters/renepor/apps/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | - ./secrets/
6 | - ../../../apps/Bitwarden.yaml
7 | - ../../../apps/Forecastle.yaml
8 | - ../../../apps/Headlamp.yaml
9 | - ../../../apps/Resilio.yaml
10 | - ../../../apps/SFTPGo.yaml
11 | - ../../../apps/Plex.yaml
12 | - ../../../apps/Photoprism.yaml
13 |
--------------------------------------------------------------------------------
/clusters/renepor/apps/secrets/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | - plex-values.yaml
6 | - resilio-values.yaml
7 |
--------------------------------------------------------------------------------
/clusters/renepor/apps/secrets/plex-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgZGF0YToKICAgIGVuYWJsZWQ6IHRydWUKICAgIHR5cGU6IHB2YwogICAgZXhpc3RpbmdDbGFpbTogbWFpbi12b2x1bWUKICAgIHN1YlBhdGg6CiAgICAtIHBhdGg6IHB1YmxpYwogICAgICBtb3VudFBhdGg6IC9kYXRhCiAgICAtIHBhdGg6IHJlbmVwb3IvUElDVFVSRVMKICAgICAgbW91bnRQYXRoOiAvcGljdHVyZXMK
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: plex-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/renepor/apps/secrets/resilio-values.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | values.yaml: cGVyc2lzdGVuY2U6CiAgY29uZmlnOgogICAgZW5hYmxlZDogdHJ1ZQogICAgZXhpc3RpbmdDbGFpbTogeWFzci12b2x1bWUKICAgIG1vdW50UGF0aDogL2NvbmZpZwogICAgc3ViUGF0aDogY29uZmlncy9yZXNpbGlvCiAgc3luYzoKICAgIGVuYWJsZWQ6IHRydWUKICAgIGV4aXN0aW5nQ2xhaW06IG1haW4tdm9sdW1lCiAgICBtb3VudFBhdGg6IC9zeW5jCiAgICBzdWJQYXRoOiBidHN5bmMK
4 | kind: Secret
5 | metadata:
6 | creationTimestamp: null
7 | name: resilio-values
8 | namespace: default
9 |
--------------------------------------------------------------------------------
/clusters/renepor/apps/values/plex-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | data:
3 | enabled: true
4 | type: pvc
5 | existingClaim: main-volume
6 | subPath:
7 | - path: public
8 | mountPath: /data
9 | - path: renepor/PICTURES
10 | mountPath: /pictures
11 |
--------------------------------------------------------------------------------
/clusters/renepor/apps/values/resilio-values.yaml:
--------------------------------------------------------------------------------
1 | persistence:
2 | config:
3 | enabled: true
4 | existingClaim: yasr-volume
5 | mountPath: /config
6 | subPath: configs/resilio
7 | sync:
8 | enabled: true
9 | existingClaim: main-volume
10 | mountPath: /sync
11 | subPath: btsync
12 |
--------------------------------------------------------------------------------
/containers/container-run.js:
--------------------------------------------------------------------------------
1 | const { spawn: exec, execSync } = require('child_process');
2 | const argv = require('minimist')(process.argv.slice(2));
3 |
4 | console.log(argv);
5 |
6 | const imageName = argv.image;
7 | if (!imageName) {
8 | console.error('DOCKER:BUILD', 'Please specify an image name with --image=');
9 | process.exit(1);
10 | }
11 |
12 | const stdio = [process.stdin, process.stdout, process.stderr];
13 | const ioOptions = { detached: true, shell: true, stdio };
14 | const username = execSync('whoami').toString('ascii').trim();
15 | const imageNameLatest = `docker.io/${username}/${imageName}:latest`;
16 |
17 | process.on('exit', (code) => {
18 | if (code !== 0) {
19 | console.error('CONTAINERS:RUN', `Exiting with exit code: ${code}`);
20 | }
21 |
22 | console.info('CONTAINERS:RUN', 'Shutting down');
23 | execSync(`podman stop ${imageName}`, { stdio });
24 | console.info('CONTAINERS:RUN', 'Shut down complete');
25 | });
26 |
27 | process.on('SIGINT', () => {
28 | process.exit(0);
29 | });
30 |
31 | const runCommand = `podman run --rm --name=${imageName} ${imageNameLatest} ${argv._.join(' ')}`;
32 |
33 | try {
34 | console.info('CONTAINERS:RUN', `Running: ${runCommand}`);
35 | exec(runCommand, ioOptions);
36 | }
37 | catch (error) {
38 | console.error('ERROR!');
39 | console.error(error.status);
40 |
41 | if (error.status > 0) {
42 | console.error('A fatal error has occurred.');
43 | console.error(error.message);
44 | process.exit(1);
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/containers/ddns-cloudflare/Containerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/node:22
2 |
3 | WORKDIR /app
4 | COPY package.json .
5 | COPY yarn.lock .
6 | RUN yarn install --production
7 |
8 | COPY app.js index.js
9 |
10 | ENTRYPOINT ["node", "/app/index.js"]
11 |
--------------------------------------------------------------------------------
/containers/ddns-cloudflare/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ddns-cloudflare",
3 | "version": "1.0.0",
4 | "description": "An updater for DNS entries, one per cluster, to the cluster's external IP address",
5 | "main": "app.js",
6 | "repository": "https://github.com/gandazgul/k8s-infrastructure.git",
7 | "author": "Carlos Ravelo",
8 | "license": "MIT",
9 | "private": true,
10 | "type": "module",
11 | "dependencies": {
12 | "cloudflare": "^4.2.0",
13 | "dotenv": "^16.4.7",
14 | "find-up": "^7.0.0"
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/containers/rdiff-backup/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:3.21.3
2 |
3 | VOLUME /media/main
4 | VOLUME /media/backup
5 |
6 | # Docs: https://www.nongnu.org/rdiff-backup/docs.html
7 | RUN apk update && apk add --no-cache rdiff-backup && rm -rf /tmp/* /var/tmp/*
8 |
9 | ENTRYPOINT ["rdiff-backup", "-v5"]
10 |
--------------------------------------------------------------------------------
/containers/transmission-pia-port-forward/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:3.11
2 |
3 | RUN apk add --no-cache bash curl nano
4 |
5 | # Configure the port-forward script
6 | RUN \
7 | echo "**** Adding cronjob to keep the transmission port open *****" \
8 | # at reboot wait 60s for VPN to initialize then set the port
9 | && echo '@reboot sleep 60s && /usr/bin/port-forwarding.sh | while IFS= read -r line; do echo "$(date) $line"; done >> /dev/stdout 2>&1 #PIA Port Forward' >> /etc/crontabs/root \
10 | # refresh port every 2 hours
11 | && echo '0 */2 * * * /usr/bin/port-forwarding.sh | while IFS= read -r line; do echo "$(date) $line"; done >> /dev/stdout 2>&1 #PIA Port Forward' >> /etc/crontabs/root
12 |
13 | COPY ./port-forwarding.sh /usr/bin/port-forwarding.sh
14 | RUN chmod +x /usr/bin/port-forwarding.sh
15 |
16 | # start crond to run the transmission port cronjob
17 | CMD ["crond", "-f", "-L", "/dev/stdout"]
18 |
--------------------------------------------------------------------------------
/containers/transmission-pia-port-forward/port-forwarding.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Set path for root Cron Job
4 | # PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin
5 |
6 | #
7 | # Enable port forwarding when using Private Internet Access
8 | #
9 | # Usage:
10 | # ./port_forwarding.sh
11 |
12 | error() {
13 | echo "$@" 1>&2
14 | exit 1
15 | }
16 |
17 | error_and_usage() {
18 | echo "$@" 1>&2
19 | usage_and_exit 1
20 | }
21 |
22 | usage() {
23 | echo "Usage: $(dirname $0)/$PROGRAM"
24 | }
25 |
26 | usage_and_exit() {
27 | usage
28 | exit $1
29 | }
30 |
31 | version() {
32 | echo "$PROGRAM version $VERSION"
33 | }
34 |
35 | port_forward_assignment() {
36 | echo 'Setting the IP for MAM...'
37 | curl -c /data/mam.cookies -b /data/mam.cookies https://t.myanonamouse.net/json/dynamicSeedbox.php
38 |
39 | TRANSMISSION_HOST=localhost
40 |
41 | echo 'Loading port forward assignment information...'
42 | PORT=`cat /data/forwarded_port`
43 |
44 | #change transmission port on the fly
45 | echo "Changing transmission's port to ${PORT}..."
46 |
47 | SESSIONID=$(curl ${TRANSMISSION_HOST}:9091/transmission/rpc --silent | grep -oE "X-Transmission-Session-Id: ([^<]+)" | awk -F:\ '{print $2}')
48 | echo "SessionID: ${SESSIONID}"
49 |
50 | DATA='{"method": "session-set", "arguments": { "peer-port" :'$PORT' } }'
51 |
52 | curl -u "$TRANSMISSIONS_USER:$TRANSMISSIONS_PASS" http://${TRANSMISSION_HOST}:9091/transmission/rpc -d "$DATA" -H "X-Transmission-Session-Id: $SESSIONID"
53 | }
54 |
55 | PROGRAM=`basename $0`
56 | VERSION=2.1
57 |
58 | while test $# -gt 0
59 | do
60 | case $1 in
61 | --usage | --help | -h )
62 | usage_and_exit 0
63 | ;;
64 | --version | -v )
65 | version
66 | exit 0
67 | ;;
68 | *)
69 | error_and_usage "Unrecognized option: $1"
70 | ;;
71 | esac
72 | shift
73 | done
74 |
75 | port_forward_assignment
76 |
77 | exit 0
78 |
--------------------------------------------------------------------------------
/infrastructure/cert-manager/Certificate.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cert-manager.io/v1
3 | kind: Certificate
4 | metadata:
5 | name: internal-ingress-cert
6 | namespace: kube-system
7 | spec:
8 | dnsNames:
9 | - "*.${CLUSTER_DOMAIN_NAME}"
10 | - ${CLUSTER_DOMAIN_NAME}
11 | issuerRef:
12 | group: cert-manager.io
13 | kind: ClusterIssuer
14 | name: letsencrypt-prod-cloudflare
15 | secretName: internal-ingress-cert
16 | usages:
17 | - digital signature
18 | - key encipherment
19 | secretTemplate:
20 | annotations:
21 | reflector.v1.k8s.emberstack.com/reflection-auto-enabled: "true"
22 | reflector.v1.k8s.emberstack.com/reflection-allowed: "true"
23 | reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces: "default,monitoring"
24 |
--------------------------------------------------------------------------------
/infrastructure/cert-manager/controller/CertManager.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: jetstack
6 | namespace: kube-system
7 | spec:
8 | interval: 1h0m0s
9 | url: https://charts.jetstack.io
10 | ---
11 | apiVersion: helm.toolkit.fluxcd.io/v2
12 | kind: HelmRelease
13 | metadata:
14 | name: cert-manager
15 | namespace: kube-system
16 | spec:
17 | interval: 5m
18 | chart:
19 | spec:
20 | chart: cert-manager
21 | version: 1.17.1
22 | sourceRef:
23 | kind: HelmRepository
24 | name: jetstack
25 | namespace: kube-system
26 | interval: 1h
27 | values:
28 | installCRDs: true
29 | extraArgs:
30 | - --max-concurrent-challenges=2
31 | - --dns01-recursive-nameservers-only
32 | - --dns01-recursive-nameservers=1.1.1.1:53,8.8.8.8:53
33 |
--------------------------------------------------------------------------------
/infrastructure/cert-manager/controller/Reflector.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: emberstack
6 | namespace: kube-system
7 | spec:
8 | interval: 1h0m0s
9 | url: https://emberstack.github.io/helm-charts
10 | ---
11 | apiVersion: helm.toolkit.fluxcd.io/v2
12 | kind: HelmRelease
13 | metadata:
14 | name: reflector
15 | namespace: kube-system
16 | spec:
17 | interval: 5m
18 | chart:
19 | spec:
20 | chart: reflector
21 | version: 7.1.288
22 | sourceRef:
23 | kind: HelmRepository
24 | name: emberstack
25 | namespace: kube-system
26 | interval: 1h
27 |
--------------------------------------------------------------------------------
/infrastructure/cert-manager/issuers/LetsEncryptProdCloudflareIssuer.yaml:
--------------------------------------------------------------------------------
1 | # while querying the Cloudflare API for POST \"/zones/1d3db8f84a9c4282a53b065902f5f8f2/dns_records\"
2 | # Error: 1038: You cannot use this API for domains with a .cf, .ga, .gq, .ml, or .tk TLD (top-level domain). To configure the DNS settings for this domain, use the Cloudflare Dashboard.
3 | ---
4 | apiVersion: cert-manager.io/v1
5 | kind: ClusterIssuer
6 | metadata:
7 | name: letsencrypt-prod-cloudflare
8 | spec:
9 | acme:
10 | # The ACME server URL
11 | server: https://acme-v02.api.letsencrypt.org/directory
12 | # Email address used for ACME registration
13 | email: ${EMAIL}
14 | # Name of a secret used to store the ACME account private key
15 | privateKeySecretRef:
16 | name: letsencrypt-prod
17 | # Enable the HTTP-01 challenge provider
18 | solvers:
19 | # An empty 'selector' means that this solver matches all domains
20 | - selector: { }
21 | dns01:
22 | cloudflare:
23 | apiTokenSecretRef:
24 | name: cloudflare-api-token-secret
25 | key: api-token
26 | ---
27 | apiVersion: v1
28 | kind: Secret
29 | metadata:
30 | name: cloudflare-api-token-secret
31 | namespace: kube-system
32 | stringData:
33 | api-token: ${CLOUDFLARE_API_TOKEN}
34 |
--------------------------------------------------------------------------------
/infrastructure/cert-manager/issuers/LetsEncryptProdDNSIssuer.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cert-manager.io/v1
3 | kind: ClusterIssuer
4 | metadata:
5 | name: letsencrypt-prod-dns01
6 | spec:
7 | acme:
8 | # The ACME server URL
9 | server: https://acme-v02.api.letsencrypt.org/directory
10 | # Email address used for ACME registration
11 | email: ${EMAIL}
12 | # Name of a secret used to store the ACME account private key
13 | privateKeySecretRef:
14 | name: letsencrypt-prod
15 | # Enable the HTTP-01 challenge provider
16 | solvers:
17 | # An empty 'selector' means that this solver matches all domains
18 | - selector: { }
19 | dns01:
20 | acmeDNS:
21 | accountSecretRef:
22 | name: auth-acme-dns-io-credentials
23 | key: acmedns.json
24 | host: https://auth.acme-dns.io
25 | ---
26 | apiVersion: v1
27 | kind: Secret
28 | metadata:
29 | name: auth-acme-dns-io-credentials
30 | namespace: kube-system
31 | stringData:
32 | acmedns.json: |
33 | {
34 | "${CLUSTER_DOMAIN_NAME}": {
35 | "username": "${ACME_DNS_USERNAME}",
36 | "password": "${ACME_DNS_PASSWORD}",
37 | "fulldomain": "${ACME_DNS_SUBDOMAIN}.auth.acme-dns.io",
38 | "subdomain": "${ACME_DNS_SUBDOMAIN}",
39 | "allowfrom": []
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/infrastructure/cert-manager/issuers/LetsEncryptStgCloudflareIssuer.yaml:
--------------------------------------------------------------------------------
1 | # while querying the Cloudflare API for POST \"/zones/1d3db8f84a9c4282a53b065902f5f8f2/dns_records\"
2 | # Error: 1038: You cannot use this API for domains with a .cf, .ga, .gq, .ml, or .tk TLD (top-level domain). To configure the DNS settings for this domain, use the Cloudflare Dashboard.
3 | ---
4 | apiVersion: cert-manager.io/v1
5 | kind: ClusterIssuer
6 | metadata:
7 | name: letsencrypt-stg-cloudflare
8 | spec:
9 | acme:
10 | # The ACME server URL
11 | server: https://acme-staging-v02.api.letsencrypt.org/directory
12 | # Email address used for ACME registration
13 | email: ${EMAIL}
14 | # Name of a secret used to store the ACME account private key
15 | privateKeySecretRef:
16 | name: letsencrypt-staging
17 | # Enable the HTTP-01 challenge provider
18 | solvers:
19 | # An empty 'selector' means that this solver matches all domains
20 | - selector: { }
21 | dns01:
22 | cloudflare:
23 | apiTokenSecretRef:
24 | name: cloudflare-api-token-secret
25 | key: api-token
26 |
--------------------------------------------------------------------------------
/infrastructure/cert-manager/issuers/LetsEncryptStgDNSIssuer.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: cert-manager.io/v1
3 | kind: ClusterIssuer
4 | metadata:
5 | name: letsencrypt-stg-dns01
6 | spec:
7 | acme:
8 | # The ACME server URL
9 | server: https://acme-staging-v02.api.letsencrypt.org/directory
10 | # Email address used for ACME registration
11 | email: ${EMAIL}
12 | # Name of a secret used to store the ACME account private key
13 | privateKeySecretRef:
14 | name: letsencrypt-staging
15 | # Enable the HTTP-01 challenge provider
16 | solvers:
17 | # An empty 'selector' means that this solver matches all domains
18 | - selector: { }
19 | dns01:
20 | acmeDNS:
21 | accountSecretRef:
22 | name: auth-acme-dns-io-credentials
23 | key: acmedns.json
24 | host: https://auth.acme-dns.io
25 |
--------------------------------------------------------------------------------
/infrastructure/cronjob/cronjob.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: batch/v1
3 | kind: CronJob
4 | metadata:
5 | name: ${CRONJOB_NAME}
6 | spec:
7 | schedule: "${SCHEDULE}"
8 | concurrencyPolicy: Forbid # Default value, can be customized
9 | failedJobsHistoryLimit: 1 # Default value, can be customized
10 | successfulJobsHistoryLimit: 3 # Default value, can be customized
11 | jobTemplate:
12 | spec:
13 | template:
14 | spec:
15 | volumes:
16 | - name: tz-config
17 | hostPath:
18 | path: /etc/localtime
19 | containers:
20 | - name: cronjob
21 | image: ${IMAGE}
22 | imagePullPolicy: IfNotPresent
23 | volumeMounts:
24 | - name: tz-config
25 | mountPath: /etc/localtime
26 | readOnly: true
27 | # env can be added via kustomize patches
28 | # initContainers can be added via kustomize patches if needed
29 | restartPolicy: OnFailure
30 |
--------------------------------------------------------------------------------
/infrastructure/cronjob/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: ${NAMESPACE}
4 | resources:
5 | - cronjob.yaml
6 |
7 | labels:
8 | - pairs:
9 | app.kubernetes.io/name: ${CRONJOB_NAME}
10 | includeSelectors: true
11 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/CertManagerKustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: cert-manager
6 | namespace: kube-system
7 | spec:
8 | dependsOn:
9 | - name: sealed-secret
10 | interval: 5m
11 | path: ./infrastructure/cert-manager/controller/
12 | prune: true
13 | sourceRef:
14 | kind: GitRepository
15 | name: k8s-infrastructure
16 | namespace: kube-system
17 | healthChecks:
18 | - apiVersion: apps/v1
19 | kind: Deployment
20 | name: cert-manager
21 | namespace: kube-system
22 | postBuild:
23 | substituteFrom:
24 | - kind: Secret
25 | name: secrets
26 | # The controller has to be installed first as it installs the CRDs as well, then the issuers and certificates can be pushed
27 | ---
28 | apiVersion: kustomize.toolkit.fluxcd.io/v1
29 | kind: Kustomization
30 | metadata:
31 | name: cert-manager-certificates
32 | namespace: kube-system
33 | spec:
34 | dependsOn:
35 | - name: sealed-secret
36 | - name: cert-manager
37 | interval: 5m
38 | path: ./infrastructure/cert-manager/
39 | prune: true
40 | sourceRef:
41 | kind: GitRepository
42 | name: k8s-infrastructure
43 | namespace: kube-system
44 | healthChecks:
45 | - apiVersion: apps/v1
46 | kind: Deployment
47 | name: cert-manager
48 | namespace: kube-system
49 | postBuild:
50 | substituteFrom:
51 | - kind: Secret
52 | name: secrets
53 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/CloudflareDDNS.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: cloudflare-ddns
6 | namespace: default
7 | spec:
8 | interval: 1h0m0s
9 | path: ./infrastructure/cronjob/
10 | sourceRef:
11 | kind: GitRepository
12 | name: k8s-infrastructure
13 | namespace: kube-system
14 | prune: true
15 | targetNamespace: default
16 | postBuild:
17 | substitute:
18 | CRONJOB_NAME: cloudflare-ddns
19 | # Every hour
20 | SCHEDULE: "0 */1 * * *"
21 | IMAGE: docker.io/gandazgul/ddns-cloudflare:vb299a70
22 | NAMESPACE: default
23 | patches:
24 | - patch: |
25 | - op: add
26 | path: /spec/jobTemplate/spec/template/spec/containers/0/env
27 | value:
28 | - name: CLUSTER_NAME
29 | value: ${CLUSTER_NAME}
30 | - name: CLOUDFLARE_API_TOKEN
31 | value: ${CLOUDFLARE_API_TOKEN}
32 | - name: CLOUDFLARE_API_EMAIL
33 | value: ${CLOUDFLARE_API_EMAIL}
34 | - name: CLUSTER_DOMAIN_NAME
35 | value: ${CLUSTER_DOMAIN_NAME}
36 | - name: CLOUDFLARE_ZONE_ID
37 | value: ${CLOUDFLARE_ZONE_ID}
38 | target:
39 | kind: CronJob
40 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/HostpathProvisioner.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: helm.toolkit.fluxcd.io/v2
2 | kind: HelmRelease
3 | metadata:
4 | name: hostpath-provisioner
5 | namespace: kube-system
6 | spec:
7 | chart:
8 | spec:
9 | chart: hostpath-provisioner
10 | version: '0.2.1'
11 | sourceRef:
12 | kind: HelmRepository
13 | name: gandazgul
14 | namespace: kube-system
15 | interval: 1m
16 | interval: 1h0m0s
17 | values:
18 | replicaCount: 1
19 | image:
20 | repository: quay.io/kubevirt/hostpath-provisioner
21 | tag: latest
22 | pullPolicy: IfNotPresent
23 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/IngressNginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: helm.toolkit.fluxcd.io/v2
2 | kind: HelmRelease
3 | metadata:
4 | name: ingress-nginx
5 | namespace: kube-system
6 | spec:
7 | chart:
8 | spec:
9 | chart: ingress-nginx
10 | version: 4.12.0
11 | sourceRef:
12 | kind: HelmRepository
13 | name: ingress-nginx
14 | namespace: kube-system
15 | dependsOn:
16 | - name: sealed-secrets-controller
17 | interval: 1h0m0s
18 | values:
19 | controller:
20 | config:
21 | custom-http-errors: 404,401,403,500,503
22 | client-body-buffer-size: "32M"
23 | proxy-body-size: "1G"
24 | proxy-buffering: "off"
25 | proxy-read-timeout: "600"
26 | proxy-send-timeout: "600"
27 | stats:
28 | enabled: true
29 | metrics:
30 | enabled: true
31 | service:
32 | annotations:
33 | prometheus.io/scrape: "true"
34 | prometheus.io/port: "10254"
35 | service:
36 | type: NodePort
37 | externalIPs:
38 | - ${CONTROL_PLANE_IP}
39 | extraArgs:
40 | default-ssl-certificate: "kube-system/internal-ingress-cert"
41 | defaultBackend:
42 | enabled: true
43 | image:
44 | repository: billimek/custom-error-pages
45 | tag: 0.4.4
46 | resources:
47 | requests:
48 | memory: 250Mi
49 | cpu: 25m
50 | limits:
51 | memory: 350Mi
52 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/SealedSecretsController.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: sealed-secrets
6 | namespace: kube-system
7 | spec:
8 | interval: 1h0m0s
9 | url: https://bitnami-labs.github.io/sealed-secrets
10 | ---
11 | apiVersion: helm.toolkit.fluxcd.io/v2
12 | kind: HelmRelease
13 | metadata:
14 | name: sealed-secrets-controller
15 | namespace: kube-system
16 | spec:
17 | chart:
18 | spec:
19 | chart: sealed-secrets
20 | version: 2.17.1
21 | sourceRef:
22 | kind: HelmRepository
23 | name: sealed-secrets
24 | namespace: kube-system
25 | interval: 1h0m0s
26 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/repos/GeekCookbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: geek-cookbook
6 | namespace: kube-system
7 | spec:
8 | interval: 1h0m0s
9 | url: https://geek-cookbook.github.io/charts/
10 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/repos/HelmGandazgul.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: source.toolkit.fluxcd.io/v1
2 | kind: HelmRepository
3 | metadata:
4 | name: gandazgul
5 | namespace: kube-system
6 | spec:
7 | interval: 10m0s
8 | url: https://gandazgul.github.io/k8s-infrastructure/
9 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/repos/HelmK8sAtHome.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: source.toolkit.fluxcd.io/v1
2 | kind: HelmRepository
3 | metadata:
4 | name: k8s-at-home
5 | namespace: kube-system
6 | spec:
7 | interval: 1h0m0s
8 | url: https://k8s-at-home.com/charts/
9 | # url: git+https://github.com/gandazgul/charts-1@charts?ref=patch-1
10 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/repos/HelmKubernetes.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: source.toolkit.fluxcd.io/v1
2 | kind: HelmRepository
3 | metadata:
4 | name: kubernetes-dashboard
5 | namespace: kube-system
6 | spec:
7 | interval: 1h0m0s
8 | url: https://kubernetes.github.io/dashboard/
9 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/repos/HelmNginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: source.toolkit.fluxcd.io/v1
2 | kind: HelmRepository
3 | metadata:
4 | name: ingress-nginx
5 | namespace: kube-system
6 | spec:
7 | interval: 1h0m0s
8 | url: https://kubernetes.github.io/ingress-nginx
9 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/repos/HelmSagikazarmark.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: source.toolkit.fluxcd.io/v1
2 | kind: HelmRepository
3 | metadata:
4 | name: skm
5 | namespace: kube-system
6 | spec:
7 | interval: 10m0s
8 | url: https://charts.sagikazarmark.dev
9 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/repos/HelmStable.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: source.toolkit.fluxcd.io/v1
2 | kind: HelmRepository
3 | metadata:
4 | name: stable
5 | namespace: kube-system
6 | spec:
7 | interval: 1h0m0s
8 | url: https://charts.helm.sh/stable
9 |
--------------------------------------------------------------------------------
/infrastructure/kube-system/repos/K8sHomeLab.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: HelmRepository
4 | metadata:
5 | name: k8s-home-lab
6 | namespace: kube-system
7 | spec:
8 | interval: 1h0m0s
9 | url: https://k8s-home-lab.github.io/helm-charts/
10 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/alertmanager/alertmanager-alertmanager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Alertmanager
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: alert-router
6 | app.kubernetes.io/instance: main
7 | app.kubernetes.io/name: alertmanager
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: main
10 | namespace: monitoring
11 | spec:
12 | image: quay.io/prometheus/alertmanager:v0.28.1
13 | nodeSelector:
14 | kubernetes.io/os: linux
15 | podMetadata:
16 | labels:
17 | app.kubernetes.io/component: alert-router
18 | app.kubernetes.io/instance: main
19 | app.kubernetes.io/name: alertmanager
20 | app.kubernetes.io/part-of: kube-prometheus
21 | replicas: 3
22 | resources:
23 | limits:
24 | cpu: 100m
25 | memory: 100Mi
26 | requests:
27 | cpu: 4m
28 | memory: 100Mi
29 | secrets: []
30 | securityContext:
31 | fsGroup: 2000
32 | runAsNonRoot: true
33 | runAsUser: 1000
34 | serviceAccountName: alertmanager-main
35 | version: 0.28.1
36 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/alertmanager/alertmanager-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: alert-router
6 | app.kubernetes.io/instance: main
7 | app.kubernetes.io/name: alertmanager
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: alertmanager-main
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 9093
21 | protocol: TCP
22 | - port: 8080
23 | protocol: TCP
24 | - from:
25 | - podSelector:
26 | matchLabels:
27 | app.kubernetes.io/name: alertmanager
28 | ports:
29 | - port: 9094
30 | protocol: TCP
31 | - port: 9094
32 | protocol: UDP
33 | podSelector:
34 | matchLabels:
35 | app.kubernetes.io/component: alert-router
36 | app.kubernetes.io/instance: main
37 | app.kubernetes.io/name: alertmanager
38 | app.kubernetes.io/part-of: kube-prometheus
39 | policyTypes:
40 | - Egress
41 | - Ingress
42 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/alertmanager/alertmanager-podDisruptionBudget.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: alert-router
6 | app.kubernetes.io/instance: main
7 | app.kubernetes.io/name: alertmanager
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: alertmanager-main
10 | namespace: monitoring
11 | spec:
12 | maxUnavailable: 1
13 | selector:
14 | matchLabels:
15 | app.kubernetes.io/component: alert-router
16 | app.kubernetes.io/instance: main
17 | app.kubernetes.io/name: alertmanager
18 | app.kubernetes.io/part-of: kube-prometheus
19 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/alertmanager/alertmanager-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: alert-router
6 | app.kubernetes.io/instance: main
7 | app.kubernetes.io/name: alertmanager
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: alertmanager-main
10 | namespace: monitoring
11 | stringData:
12 | alertmanager.yaml: |-
13 | "global":
14 | "resolve_timeout": "5m"
15 | "inhibit_rules":
16 | - "equal":
17 | - "namespace"
18 | - "alertname"
19 | "source_matchers":
20 | - "severity = critical"
21 | "target_matchers":
22 | - "severity =~ warning|info"
23 | - "equal":
24 | - "namespace"
25 | - "alertname"
26 | "source_matchers":
27 | - "severity = warning"
28 | "target_matchers":
29 | - "severity = info"
30 | - "equal":
31 | - "namespace"
32 | "source_matchers":
33 | - "alertname = InfoInhibitor"
34 | "target_matchers":
35 | - "severity = info"
36 | "receivers":
37 | - "name": "Default"
38 | - "name": "Watchdog"
39 | - "name": "Critical"
40 | - "name": "null"
41 | "route":
42 | "group_by":
43 | - "namespace"
44 | "group_interval": "5m"
45 | "group_wait": "30s"
46 | "receiver": "Default"
47 | "repeat_interval": "12h"
48 | "routes":
49 | - "matchers":
50 | - "alertname = Watchdog"
51 | "receiver": "Watchdog"
52 | - "matchers":
53 | - "alertname = InfoInhibitor"
54 | "receiver": "null"
55 | - "matchers":
56 | - "severity = critical"
57 | "receiver": "Critical"
58 | type: Opaque
59 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/alertmanager/alertmanager-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: alert-router
6 | app.kubernetes.io/instance: main
7 | app.kubernetes.io/name: alertmanager
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: alertmanager-main
10 | namespace: monitoring
11 | spec:
12 | ports:
13 | - name: web
14 | port: 9093
15 | targetPort: web
16 | - name: reloader-web
17 | port: 8080
18 | targetPort: reloader-web
19 | selector:
20 | app.kubernetes.io/component: alert-router
21 | app.kubernetes.io/instance: main
22 | app.kubernetes.io/name: alertmanager
23 | app.kubernetes.io/part-of: kube-prometheus
24 | sessionAffinity: ClientIP
25 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/alertmanager/alertmanager-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: alert-router
7 | app.kubernetes.io/instance: main
8 | app.kubernetes.io/name: alertmanager
9 | app.kubernetes.io/part-of: kube-prometheus
10 | name: alertmanager-main
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/alertmanager/alertmanager-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: alert-router
6 | app.kubernetes.io/instance: main
7 | app.kubernetes.io/name: alertmanager
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: alertmanager-main
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - interval: 30s
14 | port: web
15 | - interval: 30s
16 | port: reloader-web
17 | selector:
18 | matchLabels:
19 | app.kubernetes.io/component: alert-router
20 | app.kubernetes.io/instance: main
21 | app.kubernetes.io/name: alertmanager
22 | app.kubernetes.io/part-of: kube-prometheus
23 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/alertmanager/ingress.yml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: alertmanager
5 | namespace: monitoring
6 | annotations:
7 | forecastle.stakater.com/appName: "Alert Manager"
8 | forecastle.stakater.com/group: "Management"
9 | forecastle.stakater.com/expose: "true"
10 | forecastle.stakater.com/icon: "https://raw.githubusercontent.com/stakater/ForecastleIcons/master/alert-manager.png"
11 | spec:
12 | ingressClassName: nginx
13 | rules:
14 | - host: alerts.${CLUSTER_DOMAIN_NAME}
15 | http:
16 | paths:
17 | - path: /
18 | pathType: Prefix
19 | backend:
20 | service:
21 | name: alertmanager-main
22 | port:
23 | name: web
24 | tls:
25 | - hosts:
26 | - alerts.${CLUSTER_DOMAIN_NAME}
27 | secretName: internal-ingress-cert
28 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/grafana/grafana-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | name: grafana-config
9 | namespace: monitoring
10 | stringData:
11 | grafana.ini: |
12 | [date_formats]
13 | default_timezone = UTC
14 | type: Opaque
15 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/grafana/grafana-ingress.yml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: grafana-ingress
5 | namespace: monitoring
6 | annotations:
7 | forecastle.stakater.com/appName: "Grafana"
8 | forecastle.stakater.com/group: "Management"
9 | forecastle.stakater.com/expose: "true"
10 | forecastle.stakater.com/icon: "https://grafana.dumbhome.uk/public/img/grafana_icon.svg"
11 | spec:
12 | ingressClassName: nginx
13 | rules:
14 | - host: grafana.${CLUSTER_DOMAIN_NAME}
15 | http:
16 | paths:
17 | - path: /
18 | pathType: Prefix
19 | backend:
20 | service:
21 | name: grafana
22 | port:
23 | name: http
24 | tls:
25 | - hosts:
26 | - grafana.${CLUSTER_DOMAIN_NAME}
27 | secretName: grafana-tls
28 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/grafana/grafana-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | name: grafana
9 | namespace: monitoring
10 | spec:
11 | ports:
12 | - name: http
13 | port: 3000
14 | targetPort: http
15 | selector:
16 | app.kubernetes.io/component: grafana
17 | app.kubernetes.io/name: grafana
18 | app.kubernetes.io/part-of: kube-prometheus
19 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/grafana/grafana-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: grafana
7 | app.kubernetes.io/name: grafana
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: grafana
10 | namespace: monitoring
11 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/grafana/grafana-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 11.6.0
9 | name: grafana
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - interval: 15s
14 | port: http
15 | selector:
16 | matchLabels:
17 | app.kubernetes.io/name: grafana
18 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/blackboxExporter-clusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: blackbox-exporter
5 | rules:
6 | - apiGroups:
7 | - authentication.k8s.io
8 | resources:
9 | - tokenreviews
10 | verbs:
11 | - create
12 | - apiGroups:
13 | - authorization.k8s.io
14 | resources:
15 | - subjectaccessreviews
16 | verbs:
17 | - create
18 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/blackboxExporter-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: blackbox-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.26.0
9 | name: blackbox-exporter
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: blackbox-exporter
14 | subjects:
15 | - kind: ServiceAccount
16 | name: blackbox-exporter
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/blackboxExporter-configuration.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | config.yml: |-
4 | "modules":
5 | "http_2xx":
6 | "http":
7 | "preferred_ip_protocol": "ip4"
8 | "prober": "http"
9 | "http_post_2xx":
10 | "http":
11 | "method": "POST"
12 | "preferred_ip_protocol": "ip4"
13 | "prober": "http"
14 | "irc_banner":
15 | "prober": "tcp"
16 | "tcp":
17 | "preferred_ip_protocol": "ip4"
18 | "query_response":
19 | - "send": "NICK prober"
20 | - "send": "USER prober prober prober :prober"
21 | - "expect": "PING :([^ ]+)"
22 | "send": "PONG ${1}"
23 | - "expect": "^:[^ ]+ 001"
24 | "pop3s_banner":
25 | "prober": "tcp"
26 | "tcp":
27 | "preferred_ip_protocol": "ip4"
28 | "query_response":
29 | - "expect": "^+OK"
30 | "tls": true
31 | "tls_config":
32 | "insecure_skip_verify": false
33 | "ssh_banner":
34 | "prober": "tcp"
35 | "tcp":
36 | "preferred_ip_protocol": "ip4"
37 | "query_response":
38 | - "expect": "^SSH-2.0-"
39 | "tcp_connect":
40 | "prober": "tcp"
41 | "tcp":
42 | "preferred_ip_protocol": "ip4"
43 | kind: ConfigMap
44 | metadata:
45 | labels:
46 | app.kubernetes.io/component: exporter
47 | app.kubernetes.io/name: blackbox-exporter
48 | app.kubernetes.io/part-of: kube-prometheus
49 | app.kubernetes.io/version: 0.26.0
50 | name: blackbox-exporter-configuration
51 | namespace: monitoring
52 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/blackboxExporter-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: blackbox-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.26.0
9 | name: blackbox-exporter
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 9115
21 | protocol: TCP
22 | - port: 19115
23 | protocol: TCP
24 | podSelector:
25 | matchLabels:
26 | app.kubernetes.io/component: exporter
27 | app.kubernetes.io/name: blackbox-exporter
28 | app.kubernetes.io/part-of: kube-prometheus
29 | policyTypes:
30 | - Egress
31 | - Ingress
32 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/blackboxExporter-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: blackbox-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.26.0
9 | name: blackbox-exporter
10 | namespace: monitoring
11 | spec:
12 | ports:
13 | - name: https
14 | port: 9115
15 | targetPort: https
16 | - name: probe
17 | port: 19115
18 | targetPort: http
19 | selector:
20 | app.kubernetes.io/component: exporter
21 | app.kubernetes.io/name: blackbox-exporter
22 | app.kubernetes.io/part-of: kube-prometheus
23 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/blackboxExporter-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: exporter
7 | app.kubernetes.io/name: blackbox-exporter
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 0.26.0
10 | name: blackbox-exporter
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/blackboxExporter-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: blackbox-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.26.0
9 | name: blackbox-exporter
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 | interval: 30s
15 | path: /metrics
16 | port: https
17 | scheme: https
18 | tlsConfig:
19 | insecureSkipVerify: true
20 | selector:
21 | matchLabels:
22 | app.kubernetes.io/component: exporter
23 | app.kubernetes.io/name: blackbox-exporter
24 | app.kubernetes.io/part-of: kube-prometheus
25 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/grafana-dashboardDatasources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 11.6.0
9 | name: grafana-datasources
10 | namespace: monitoring
11 | stringData:
12 | datasources.yaml: |-
13 | {
14 | "apiVersion": 1,
15 | "datasources": [
16 | {
17 | "access": "proxy",
18 | "editable": false,
19 | "name": "prometheus",
20 | "orgId": 1,
21 | "type": "prometheus",
22 | "url": "http://prometheus-k8s.monitoring.svc:9090",
23 | "version": 1
24 | }
25 | ]
26 | }
27 | type: Opaque
28 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/grafana-dashboardSources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | dashboards.yaml: |-
4 | {
5 | "apiVersion": 1,
6 | "providers": [
7 | {
8 | "folder": "Default",
9 | "folderUid": "",
10 | "name": "0",
11 | "options": {
12 | "path": "/grafana-dashboard-definitions/0"
13 | },
14 | "orgId": 1,
15 | "type": "file"
16 | }
17 | ]
18 | }
19 | kind: ConfigMap
20 | metadata:
21 | labels:
22 | app.kubernetes.io/component: grafana
23 | app.kubernetes.io/name: grafana
24 | app.kubernetes.io/part-of: kube-prometheus
25 | app.kubernetes.io/version: 11.6.0
26 | name: grafana-dashboards
27 | namespace: monitoring
28 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/grafana-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 11.6.0
9 | name: grafana
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 3000
21 | protocol: TCP
22 | podSelector:
23 | matchLabels:
24 | app.kubernetes.io/component: grafana
25 | app.kubernetes.io/name: grafana
26 | app.kubernetes.io/part-of: kube-prometheus
27 | policyTypes:
28 | - Egress
29 | - Ingress
30 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/grafana-prometheusRule.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: PrometheusRule
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 11.6.0
9 | prometheus: k8s
10 | role: alert-rules
11 | name: grafana-rules
12 | namespace: monitoring
13 | spec:
14 | groups:
15 | - name: GrafanaAlerts
16 | rules:
17 | - alert: GrafanaRequestsFailing
18 | annotations:
19 | message: '{{ $labels.namespace }}/{{ $labels.job }}/{{ $labels.handler }} is experiencing {{ $value | humanize }}% errors'
20 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/grafana/grafanarequestsfailing
21 | expr: |
22 | 100 * namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query", status_code=~"5.."}
23 | / ignoring (status_code)
24 | sum without (status_code) (namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m{handler!~"/api/datasources/proxy/:id.*|/api/ds/query|/api/tsdb/query"})
25 | > 50
26 | for: 5m
27 | labels:
28 | severity: warning
29 | - name: grafana_rules
30 | rules:
31 | - expr: |
32 | sum by (namespace, job, handler, status_code) (rate(grafana_http_request_duration_seconds_count[5m]))
33 | record: namespace_job_handler_statuscode:grafana_http_request_duration_seconds_count:rate5m
34 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/kubeStateMetrics-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: kube-state-metrics
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 2.15.0
9 | name: kube-state-metrics
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: kube-state-metrics
14 | subjects:
15 | - kind: ServiceAccount
16 | name: kube-state-metrics
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/kubeStateMetrics-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: kube-state-metrics
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 2.15.0
9 | name: kube-state-metrics
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 8443
21 | protocol: TCP
22 | - port: 9443
23 | protocol: TCP
24 | podSelector:
25 | matchLabels:
26 | app.kubernetes.io/component: exporter
27 | app.kubernetes.io/name: kube-state-metrics
28 | app.kubernetes.io/part-of: kube-prometheus
29 | policyTypes:
30 | - Egress
31 | - Ingress
32 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/kubeStateMetrics-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: kube-state-metrics
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 2.15.0
9 | name: kube-state-metrics
10 | namespace: monitoring
11 | spec:
12 | clusterIP: None
13 | ports:
14 | - name: https-main
15 | port: 8443
16 | targetPort: https-main
17 | - name: https-self
18 | port: 9443
19 | targetPort: https-self
20 | selector:
21 | app.kubernetes.io/component: exporter
22 | app.kubernetes.io/name: kube-state-metrics
23 | app.kubernetes.io/part-of: kube-prometheus
24 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/kubeStateMetrics-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: exporter
7 | app.kubernetes.io/name: kube-state-metrics
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 2.15.0
10 | name: kube-state-metrics
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/kubeStateMetrics-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: kube-state-metrics
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 2.15.0
9 | name: kube-state-metrics
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 | honorLabels: true
15 | interval: 30s
16 | metricRelabelings:
17 | - action: drop
18 | regex: kube_(endpoint_(address_not_ready|address_available|ports))
19 | sourceLabels:
20 | - __name__
21 | port: https-main
22 | relabelings:
23 | - action: labeldrop
24 | regex: (pod|service|endpoint|namespace)
25 | scheme: https
26 | scrapeTimeout: 30s
27 | tlsConfig:
28 | insecureSkipVerify: true
29 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
30 | interval: 30s
31 | port: https-self
32 | scheme: https
33 | tlsConfig:
34 | insecureSkipVerify: true
35 | jobLabel: app.kubernetes.io/name
36 | selector:
37 | matchLabels:
38 | app.kubernetes.io/component: exporter
39 | app.kubernetes.io/name: kube-state-metrics
40 | app.kubernetes.io/part-of: kube-prometheus
41 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/kubernetesControlPlane-serviceMonitorCoreDNS.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: coredns
6 | app.kubernetes.io/part-of: kube-prometheus
7 | name: coredns
8 | namespace: monitoring
9 | spec:
10 | endpoints:
11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
12 | interval: 15s
13 | metricRelabelings:
14 | - action: drop
15 | regex: coredns_cache_misses_total
16 | sourceLabels:
17 | - __name__
18 | port: metrics
19 | jobLabel: app.kubernetes.io/name
20 | namespaceSelector:
21 | matchNames:
22 | - kube-system
23 | selector:
24 | matchLabels:
25 | k8s-app: kube-dns
26 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/kubernetesControlPlane-serviceMonitorKubeScheduler.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-scheduler
6 | app.kubernetes.io/part-of: kube-prometheus
7 | name: kube-scheduler
8 | namespace: monitoring
9 | spec:
10 | endpoints:
11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
12 | interval: 30s
13 | port: https-metrics
14 | scheme: https
15 | tlsConfig:
16 | insecureSkipVerify: true
17 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
18 | interval: 5s
19 | metricRelabelings:
20 | - action: drop
21 | regex: process_start_time_seconds
22 | sourceLabels:
23 | - __name__
24 | path: /metrics/slis
25 | port: https-metrics
26 | scheme: https
27 | tlsConfig:
28 | insecureSkipVerify: true
29 | jobLabel: app.kubernetes.io/name
30 | namespaceSelector:
31 | matchNames:
32 | - kube-system
33 | selector:
34 | matchLabels:
35 | app.kubernetes.io/name: kube-scheduler
36 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/nodeExporter-clusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: node-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | name: node-exporter
9 | rules:
10 | - apiGroups:
11 | - authentication.k8s.io
12 | resources:
13 | - tokenreviews
14 | verbs:
15 | - create
16 | - apiGroups:
17 | - authorization.k8s.io
18 | resources:
19 | - subjectaccessreviews
20 | verbs:
21 | - create
22 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/nodeExporter-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: node-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | name: node-exporter
9 | roleRef:
10 | apiGroup: rbac.authorization.k8s.io
11 | kind: ClusterRole
12 | name: node-exporter
13 | subjects:
14 | - kind: ServiceAccount
15 | name: node-exporter
16 | namespace: monitoring
17 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/nodeExporter-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: node-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | name: node-exporter
9 | namespace: monitoring
10 | spec:
11 | egress:
12 | - {}
13 | ingress:
14 | - from:
15 | - podSelector:
16 | matchLabels:
17 | app.kubernetes.io/name: prometheus
18 | ports:
19 | - port: 9100
20 | protocol: TCP
21 | podSelector:
22 | matchLabels:
23 | app.kubernetes.io/component: exporter
24 | app.kubernetes.io/name: node-exporter
25 | app.kubernetes.io/part-of: kube-prometheus
26 | policyTypes:
27 | - Egress
28 | - Ingress
29 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/nodeExporter-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: node-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | name: node-exporter
9 | namespace: monitoring
10 | spec:
11 | clusterIP: None
12 | ports:
13 | - name: https
14 | port: 9100
15 | targetPort: https
16 | selector:
17 | app.kubernetes.io/component: exporter
18 | app.kubernetes.io/name: node-exporter
19 | app.kubernetes.io/part-of: kube-prometheus
20 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/nodeExporter-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: exporter
7 | app.kubernetes.io/name: node-exporter
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: node-exporter
10 | namespace: monitoring
11 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/nodeExporter-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: node-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | name: node-exporter
9 | namespace: monitoring
10 | spec:
11 | endpoints:
12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
13 | interval: 15s
14 | port: https
15 | relabelings:
16 | - action: replace
17 | regex: (.*)
18 | replacement: $1
19 | sourceLabels:
20 | - __meta_kubernetes_pod_node_name
21 | targetLabel: instance
22 | scheme: https
23 | tlsConfig:
24 | insecureSkipVerify: true
25 | jobLabel: app.kubernetes.io/name
26 | selector:
27 | matchLabels:
28 | app.kubernetes.io/component: exporter
29 | app.kubernetes.io/name: node-exporter
30 | app.kubernetes.io/part-of: kube-prometheus
31 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-apiService.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apiregistration.k8s.io/v1
2 | kind: APIService
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: v1beta1.metrics.k8s.io
10 | spec:
11 | group: metrics.k8s.io
12 | groupPriorityMinimum: 100
13 | insecureSkipTLSVerify: true
14 | service:
15 | name: prometheus-adapter
16 | namespace: monitoring
17 | version: v1beta1
18 | versionPriority: 100
19 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-clusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | rules:
11 | - apiGroups:
12 | - ""
13 | resources:
14 | - nodes
15 | - namespaces
16 | - pods
17 | - services
18 | verbs:
19 | - get
20 | - list
21 | - watch
22 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | rbac.authorization.k8s.io/aggregate-to-admin: "true"
10 | rbac.authorization.k8s.io/aggregate-to-edit: "true"
11 | rbac.authorization.k8s.io/aggregate-to-view: "true"
12 | name: system:aggregated-metrics-reader
13 | rules:
14 | - apiGroups:
15 | - metrics.k8s.io
16 | resources:
17 | - pods
18 | - nodes
19 | verbs:
20 | - get
21 | - list
22 | - watch
23 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: prometheus-adapter
14 | subjects:
15 | - kind: ServiceAccount
16 | name: prometheus-adapter
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-clusterRoleBindingDelegator.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: resource-metrics:system:auth-delegator
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: system:auth-delegator
14 | subjects:
15 | - kind: ServiceAccount
16 | name: prometheus-adapter
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-clusterRoleServerResources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | name: resource-metrics-server-resources
9 | rules:
10 | - apiGroups:
11 | - metrics.k8s.io
12 | resources:
13 | - '*'
14 | verbs:
15 | - '*'
16 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - {}
16 | podSelector:
17 | matchLabels:
18 | app.kubernetes.io/component: metrics-adapter
19 | app.kubernetes.io/name: prometheus-adapter
20 | app.kubernetes.io/part-of: kube-prometheus
21 | policyTypes:
22 | - Egress
23 | - Ingress
24 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-podDisruptionBudget.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | namespace: monitoring
11 | spec:
12 | minAvailable: 1
13 | selector:
14 | matchLabels:
15 | app.kubernetes.io/component: metrics-adapter
16 | app.kubernetes.io/name: prometheus-adapter
17 | app.kubernetes.io/part-of: kube-prometheus
18 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-roleBindingAuthReader.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: resource-metrics-auth-reader
10 | namespace: kube-system
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: Role
14 | name: extension-apiserver-authentication-reader
15 | subjects:
16 | - kind: ServiceAccount
17 | name: prometheus-adapter
18 | namespace: monitoring
19 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | namespace: monitoring
11 | spec:
12 | ports:
13 | - name: https
14 | port: 443
15 | targetPort: 6443
16 | selector:
17 | app.kubernetes.io/component: metrics-adapter
18 | app.kubernetes.io/name: prometheus-adapter
19 | app.kubernetes.io/part-of: kube-prometheus
20 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: metrics-adapter
7 | app.kubernetes.io/name: prometheus-adapter
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 0.12.0
10 | name: prometheus-adapter
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusAdapter-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 | interval: 30s
15 | metricRelabelings:
16 | - action: drop
17 | regex: (apiserver_client_certificate_.*|apiserver_envelope_.*|apiserver_flowcontrol_.*|apiserver_storage_.*|apiserver_webhooks_.*|workqueue_.*)
18 | sourceLabels:
19 | - __name__
20 | port: https
21 | scheme: https
22 | tlsConfig:
23 | insecureSkipVerify: true
24 | selector:
25 | matchLabels:
26 | app.kubernetes.io/component: metrics-adapter
27 | app.kubernetes.io/name: prometheus-adapter
28 | app.kubernetes.io/part-of: kube-prometheus
29 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusOperator-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.81.0
9 | name: prometheus-operator
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: prometheus-operator
14 | subjects:
15 | - kind: ServiceAccount
16 | name: prometheus-operator
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusOperator-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.81.0
9 | name: prometheus-operator
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 8443
21 | protocol: TCP
22 | podSelector:
23 | matchLabels:
24 | app.kubernetes.io/component: controller
25 | app.kubernetes.io/name: prometheus-operator
26 | app.kubernetes.io/part-of: kube-prometheus
27 | policyTypes:
28 | - Egress
29 | - Ingress
30 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusOperator-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.81.0
9 | name: prometheus-operator
10 | namespace: monitoring
11 | spec:
12 | clusterIP: None
13 | ports:
14 | - name: https
15 | port: 8443
16 | targetPort: https
17 | selector:
18 | app.kubernetes.io/component: controller
19 | app.kubernetes.io/name: prometheus-operator
20 | app.kubernetes.io/part-of: kube-prometheus
21 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusOperator-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: controller
7 | app.kubernetes.io/name: prometheus-operator
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 0.81.0
10 | name: prometheus-operator
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/kube-prometheus/prometheusOperator-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.81.0
9 | name: prometheus-operator
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 | honorLabels: true
15 | port: https
16 | scheme: https
17 | tlsConfig:
18 | insecureSkipVerify: true
19 | selector:
20 | matchLabels:
21 | app.kubernetes.io/component: controller
22 | app.kubernetes.io/name: prometheus-operator
23 | app.kubernetes.io/part-of: kube-prometheus
24 | app.kubernetes.io/version: 0.81.0
25 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/ingress.yml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: prometheus
5 | namespace: monitoring
6 | annotations:
7 | forecastle.stakater.com/appName: "Prometheus"
8 | forecastle.stakater.com/group: "Management"
9 | forecastle.stakater.com/expose: "true"
10 | forecastle.stakater.com/icon: "https://raw.githubusercontent.com/stakater/ForecastleIcons/master/prometheus.png"
11 | spec:
12 | ingressClassName: nginx
13 | rules:
14 | - host: prometheus.${CLUSTER_DOMAIN_NAME}
15 | http:
16 | paths:
17 | - path: /
18 | pathType: Prefix
19 | backend:
20 | service:
21 | name: prometheus-k8s
22 | port:
23 | name: web
24 | tls:
25 | - hosts:
26 | - prometheus.${CLUSTER_DOMAIN_NAME}
27 | secretName: internal-ingress-cert
28 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-clusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: prometheus-k8s
10 | rules:
11 | - apiGroups:
12 | - ""
13 | resources:
14 | - nodes/metrics
15 | verbs:
16 | - get
17 | - nonResourceURLs:
18 | - /metrics
19 | - /metrics/slis
20 | verbs:
21 | - get
22 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: prometheus-k8s
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: prometheus-k8s
14 | subjects:
15 | - kind: ServiceAccount
16 | name: prometheus-k8s
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: prometheus-k8s
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 9090
21 | protocol: TCP
22 | - port: 8080
23 | protocol: TCP
24 | - from:
25 | - podSelector:
26 | matchLabels:
27 | app.kubernetes.io/name: prometheus-adapter
28 | ports:
29 | - port: 9090
30 | protocol: TCP
31 | - from:
32 | - podSelector:
33 | matchLabels:
34 | app.kubernetes.io/name: grafana
35 | ports:
36 | - port: 9090
37 | protocol: TCP
38 | podSelector:
39 | matchLabels:
40 | app.kubernetes.io/component: prometheus
41 | app.kubernetes.io/instance: k8s
42 | app.kubernetes.io/name: prometheus
43 | app.kubernetes.io/part-of: kube-prometheus
44 | policyTypes:
45 | - Egress
46 | - Ingress
47 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-podDisruptionBudget.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: prometheus-k8s
10 | namespace: monitoring
11 | spec:
12 | minAvailable: 1
13 | selector:
14 | matchLabels:
15 | app.kubernetes.io/component: prometheus
16 | app.kubernetes.io/instance: k8s
17 | app.kubernetes.io/name: prometheus
18 | app.kubernetes.io/part-of: kube-prometheus
19 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-prometheus.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Prometheus
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: k8s
10 | namespace: monitoring
11 | spec:
12 | alerting:
13 | alertmanagers:
14 | - apiVersion: v2
15 | name: alertmanager-main
16 | namespace: monitoring
17 | port: web
18 | enableFeatures: []
19 | externalLabels: {}
20 | image: quay.io/prometheus/prometheus:v3.2.1
21 | nodeSelector:
22 | kubernetes.io/os: linux
23 | podMetadata:
24 | labels:
25 | app.kubernetes.io/component: prometheus
26 | app.kubernetes.io/instance: k8s
27 | app.kubernetes.io/name: prometheus
28 | app.kubernetes.io/part-of: kube-prometheus
29 | podMonitorNamespaceSelector: { }
30 | podMonitorSelector: {}
31 | probeNamespaceSelector: {}
32 | probeSelector: {}
33 | replicas: 2
34 | resources:
35 | requests:
36 | memory: 400Mi
37 | ruleNamespaceSelector: {}
38 | ruleSelector: {}
39 | scrapeConfigNamespaceSelector: {}
40 | scrapeConfigSelector: {}
41 | securityContext:
42 | fsGroup: 2000
43 | runAsNonRoot: true
44 | runAsUser: 1000
45 | serviceAccountName: prometheus-k8s
46 | serviceMonitorNamespaceSelector: {}
47 | serviceMonitorSelector: {}
48 | version: 3.2.1
49 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-roleBindingConfig.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: prometheus-k8s-config
10 | namespace: monitoring
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: Role
14 | name: prometheus-k8s-config
15 | subjects:
16 | - kind: ServiceAccount
17 | name: prometheus-k8s
18 | namespace: monitoring
19 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-roleBindingSpecificNamespaces.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | items:
3 | - apiVersion: rbac.authorization.k8s.io/v1
4 | kind: RoleBinding
5 | metadata:
6 | labels:
7 | app.kubernetes.io/component: prometheus
8 | app.kubernetes.io/instance: k8s
9 | app.kubernetes.io/name: prometheus
10 | app.kubernetes.io/part-of: kube-prometheus
11 | name: prometheus-k8s
12 | namespace: default
13 | roleRef:
14 | apiGroup: rbac.authorization.k8s.io
15 | kind: Role
16 | name: prometheus-k8s
17 | subjects:
18 | - kind: ServiceAccount
19 | name: prometheus-k8s
20 | namespace: monitoring
21 | - apiVersion: rbac.authorization.k8s.io/v1
22 | kind: RoleBinding
23 | metadata:
24 | labels:
25 | app.kubernetes.io/component: prometheus
26 | app.kubernetes.io/instance: k8s
27 | app.kubernetes.io/name: prometheus
28 | app.kubernetes.io/part-of: kube-prometheus
29 | name: prometheus-k8s
30 | namespace: kube-system
31 | roleRef:
32 | apiGroup: rbac.authorization.k8s.io
33 | kind: Role
34 | name: prometheus-k8s
35 | subjects:
36 | - kind: ServiceAccount
37 | name: prometheus-k8s
38 | namespace: monitoring
39 | - apiVersion: rbac.authorization.k8s.io/v1
40 | kind: RoleBinding
41 | metadata:
42 | labels:
43 | app.kubernetes.io/component: prometheus
44 | app.kubernetes.io/instance: k8s
45 | app.kubernetes.io/name: prometheus
46 | app.kubernetes.io/part-of: kube-prometheus
47 | name: prometheus-k8s
48 | namespace: monitoring
49 | roleRef:
50 | apiGroup: rbac.authorization.k8s.io
51 | kind: Role
52 | name: prometheus-k8s
53 | subjects:
54 | - kind: ServiceAccount
55 | name: prometheus-k8s
56 | namespace: monitoring
57 | kind: RoleBindingList
58 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-roleConfig.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: Role
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: prometheus-k8s-config
10 | namespace: monitoring
11 | rules:
12 | - apiGroups:
13 | - ""
14 | resources:
15 | - configmaps
16 | verbs:
17 | - get
18 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: prometheus-k8s
10 | namespace: monitoring
11 | spec:
12 | ports:
13 | - name: web
14 | port: 9090
15 | targetPort: web
16 | - name: reloader-web
17 | port: 8080
18 | targetPort: reloader-web
19 | selector:
20 | app.kubernetes.io/component: prometheus
21 | app.kubernetes.io/instance: k8s
22 | app.kubernetes.io/name: prometheus
23 | app.kubernetes.io/part-of: kube-prometheus
24 | sessionAffinity: ClientIP
25 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: true
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: prometheus
7 | app.kubernetes.io/instance: k8s
8 | app.kubernetes.io/name: prometheus
9 | app.kubernetes.io/part-of: kube-prometheus
10 | name: prometheus-k8s
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/infrastructure/monitoring/prometheus/prometheus-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | name: prometheus-k8s
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - interval: 30s
14 | port: web
15 | - interval: 30s
16 | port: reloader-web
17 | selector:
18 | matchLabels:
19 | app.kubernetes.io/component: prometheus
20 | app.kubernetes.io/instance: k8s
21 | app.kubernetes.io/name: prometheus
22 | app.kubernetes.io/part-of: kube-prometheus
23 |
--------------------------------------------------------------------------------
/infrastructure/setup/GitRepoSync.yaml.templ:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: source.toolkit.fluxcd.io/v1
3 | kind: GitRepository
4 | metadata:
5 | name: k8s-infrastructure
6 | namespace: kube-system
7 | spec:
8 | interval: 1m0s
9 | ref:
10 | branch: main
11 | url: https://github.com/gandazgul/k8s-infrastructure.git
12 | ignore: |
13 | clusters/**/apps/values/
14 |
--------------------------------------------------------------------------------
/infrastructure/setup/KubeSystem.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.toolkit.fluxcd.io/v1
2 | kind: Kustomization
3 | metadata:
4 | name: kube-system
5 | namespace: kube-system
6 | spec:
7 | dependsOn:
8 | - name: sealed-secret
9 | interval: 10m0s
10 | path: ./infrastructure/kube-system/
11 | prune: true
12 | sourceRef:
13 | kind: GitRepository
14 | name: k8s-infrastructure
15 | namespace: kube-system
16 | postBuild:
17 | substituteFrom:
18 | - kind: Secret
19 | name: secrets
20 | optional: true
21 |
--------------------------------------------------------------------------------
/infrastructure/setup/SealedSecretsKustomization.yaml.templ:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.toolkit.fluxcd.io/v1
3 | kind: Kustomization
4 | metadata:
5 | name: sealed-secret
6 | namespace: kube-system
7 | spec:
8 | interval: 10m0s
9 | path: ./clusters/{{CLUSTER_NAME}}/sealed-secret/
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: k8s-infrastructure
14 | namespace: kube-system
15 |
--------------------------------------------------------------------------------
/infrastructure/setup/change-branch.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # The directory where this script is located
4 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
5 | source "$SCRIPT_DIR/requirements.sh"
6 |
7 | # Get the current branch name
8 | CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
9 |
10 | # Check if the current branch exists on the remote
11 | if ! git ls-remote --exit-code origin "$CURRENT_BRANCH" &> /dev/null; then
12 | message "Branch $CURRENT_BRANCH does not exist on the remote. Pushing..."
13 | git push --set-upstream origin "$CURRENT_BRANCH"
14 | fi
15 |
16 | message "Changing branch to ${CURRENT_BRANCH}"
17 | < "$SCRIPT_DIR"/GitRepoSync.yaml.templ sed "s/main/${CURRENT_BRANCH}/g" | kubectl apply -f -
18 |
19 | message "Reconciling..."
20 | flux reconcile kustomization --with-source kube-system -n kube-system && flux reconcile kustomization ${CLUSTER_NAME} -n kube-system
21 |
--------------------------------------------------------------------------------
/infrastructure/setup/configure-cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # The directory where this script is located
4 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
5 | source "$SCRIPT_DIR/requirements.sh"
6 |
7 | # Seal main secrets file with SealedSecrets
8 | message "Generating $CLUSTER_NAME secret..."
9 | rm -rf "$REPO_ROOT/clusters/$CLUSTER_NAME/sealed-secret/SealedSecret.yaml"
10 | kubectl create secret generic secrets --dry-run=client --namespace=kube-system --from-env-file="$REPO_ROOT/clusters/$CLUSTER_NAME/secrets.env" -o json |
11 | jq '.metadata.annotations |= { "reflector.v1.k8s.emberstack.com/reflection-auto-enabled": "true", "reflector.v1.k8s.emberstack.com/reflection-allowed": "true", "reflector.v1.k8s.emberstack.com/reflection-allowed-namespaces": "default,monitoring" }' |
12 | kubeseal -o yaml > "$REPO_ROOT/clusters/$CLUSTER_NAME/sealed-secret/SealedSecret.yaml"
13 |
14 | # apply the sealed secret
15 | kubectl apply -f "$REPO_ROOT/clusters/$CLUSTER_NAME/sealed-secret/SealedSecret.yaml"
16 |
17 | # Create a Kustomization for the cluster's Secrets so that apps can depend on it
18 | echo "$(sed "s/{{CLUSTER_NAME}}/$CLUSTER_NAME/g" <"$SCRIPT_DIR"/SealedSecretsKustomization.yaml.templ)" | kubectl apply -f -
19 |
20 | # Create value/yaml secrets
21 | message "Generating $CLUSTER_NAME app secrets from values..."
22 | rm -rf "$REPO_ROOT/clusters/$CLUSTER_NAME/apps/secrets/"
23 | mkdir "$REPO_ROOT/clusters/$CLUSTER_NAME/apps/secrets/"
24 | cat <> "$REPO_ROOT/clusters/$CLUSTER_NAME/apps/secrets/kustomization.yaml"
25 | ---
26 | apiVersion: kustomize.config.k8s.io/v1beta1
27 | kind: Kustomization
28 | resources:
29 | EOT
30 |
31 | for f in "$REPO_ROOT"/clusters/"$CLUSTER_NAME"/apps/values/*.yaml; do
32 | echo "Generating secrets from values file: $f..."
33 | basename=$(basename "$f" .yaml)
34 | kubectl -n default create secret generic "${basename}" --dry-run=client --from-file=values.yaml="${f}" -o yaml >"$REPO_ROOT/clusters/$CLUSTER_NAME/apps/secrets/${basename}.yaml"
35 | echo "- ${basename}.yaml" >> "$REPO_ROOT/clusters/$CLUSTER_NAME/apps/secrets/kustomization.yaml"
36 | done
37 |
38 | message "Installing $CLUSTER_NAME configs..."
39 | kubectl apply -f "$REPO_ROOT/clusters/$CLUSTER_NAME/ClusterKustomization.yaml"
40 |
41 | message "Done configuring $CLUSTER_NAME's cluster"
42 |
--------------------------------------------------------------------------------
/infrastructure/setup/requirements.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | need() {
6 | if ! command -v "$1" &>/dev/null; then
7 | echo "Binary '$1' is missing but required"
8 | exit 1
9 | fi
10 | }
11 |
12 | message() {
13 | echo -e "\n######################################################################"
14 | echo "# $1"
15 | echo "######################################################################"
16 | }
17 |
18 | pause() {
19 | read -r -s -n 1 -p "Check these values. If anything looks wrong stop now and check the secrets.env file. Press any key to continue . . ."
20 | echo ""
21 | }
22 |
23 | # make sure all required binaries are installed
24 | need "kubectl"
25 | need "kubectl-ctx"
26 | need "flux"
27 | need "git"
28 | need "kubeseal"
29 | need "jq"
30 |
31 | # check if have the cluster name, otherwise set it to the current user
32 | if [ -z ${1+x} ]; then
33 | echo -e "Cluster Name was not specified. Assuming \e[1;32m$(whoami)\e[0m as the cluster name."
34 | CLUSTER_NAME=$(whoami)
35 | else
36 | CLUSTER_NAME=$1
37 | fi
38 |
39 | # The root of the git repo
40 | REPO_ROOT=$(git rev-parse --show-toplevel)
41 | SECRET_FILE="$REPO_ROOT/clusters/$CLUSTER_NAME/secrets.env"
42 |
43 | if [ ! -f "${SECRET_FILE}" ]; then
44 | echo "The secrets.env file for $CLUSTER_NAME does not exist. Please create it."
45 | exit 1
46 | fi
47 |
48 | # Enable automatic exporting of all subsequent variables
49 | set -o allexport
50 |
51 | # Source the .env file to load its variables
52 | # shellcheck source=$REPO_ROOT/clusters/$CLUSTER_NAME/secrets.env
53 | source "${SECRET_FILE}"
54 |
55 | # Disable automatic exporting
56 | set +o allexport
57 |
58 | message "Make sure we are using the right context"
59 | kubectl-ctx "${KUBECTX_NAME}"
--------------------------------------------------------------------------------
/infrastructure/storage/hdd-class/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: ${NAMESPACE}
5 | resources:
6 | - ../pv/
7 | - ../pvc/
8 | - sc.yaml
9 |
10 | commonLabels:
11 | app.kubernetes.io/name: ${VOLUME_NAME}
--------------------------------------------------------------------------------
/infrastructure/storage/hdd-class/sc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: storage.k8s.io/v1
3 | kind: StorageClass
4 | metadata:
5 | name: hdd
6 | provisioner: kubernetes.io/no-provisioner
7 | reclaimPolicy: Retain
8 |
--------------------------------------------------------------------------------
/infrastructure/storage/hdd/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: ${NAMESPACE}
5 | resources:
6 | - ../pv/
7 | - ../pvc/
8 |
9 | commonLabels:
10 | app.kubernetes.io/name: ${VOLUME_NAME}
11 |
--------------------------------------------------------------------------------
/infrastructure/storage/pv/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: ${NAMESPACE}
5 | resources:
6 | - pv.yaml
7 |
8 | commonLabels:
9 | app.kubernetes.io/name: ${VOLUME_NAME}
10 |
--------------------------------------------------------------------------------
/infrastructure/storage/pv/pv.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolume
2 | apiVersion: v1
3 | metadata:
4 | name: ${VOLUME_NAME}
5 | spec:
6 | storageClassName: ${VOLUME_STORAGE_CLASS}
7 | accessModes:
8 | - ReadWriteOnce
9 | capacity:
10 | storage: ${VOLUME_CAPACITY}
11 | volumeMode: ${VOLUME_MODE}
12 | local:
13 | path: ${VOLUME_PATH}
14 | nodeAffinity:
15 | required:
16 | nodeSelectorTerms:
17 | - matchExpressions:
18 | - key: node-role.kubernetes.io/control-plane
19 | operator: Exists
20 | persistentVolumeReclaimPolicy: Retain
21 |
--------------------------------------------------------------------------------
/infrastructure/storage/pvc/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | namespace: ${NAMESPACE}
5 | resources:
6 | - pvc.yaml
7 |
8 | commonLabels:
9 | app.kubernetes.io/name: ${VOLUME_NAME}
10 |
--------------------------------------------------------------------------------
/infrastructure/storage/pvc/pvc.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: ${VOLUME_NAME}
5 | spec:
6 | storageClassName: ${VOLUME_STORAGE_CLASS}
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: ${VOLUME_CAPACITY}
12 | selector:
13 | matchLabels:
14 | app.kubernetes.io/name: ${VOLUME_NAME}
15 |
--------------------------------------------------------------------------------
/install-k8s/2-configK8SControlPlane.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ./configNode.sh || exit 1
4 |
5 | printf "\nInstalling kubernetes ===================================================================================\n"
6 | # Check if kubeadm already ran, do kubeadm reset to re-run
7 | if [[ ! -f "/etc/kubernetes/kubelet.conf" ]]; then
8 | sudo systemctl enable kubelet.service
9 | sudo hostnamectl set-hostname --static k8s-control-plane
10 | sudo kubeadm config images pull
11 | sudo kubeadm init --config=./kubeadm.yaml || exit 1
12 | else
13 | printf "\nNOTE: Looks like kubeadm init already ran. If you want to run it again, run kubeadm reset ===============\n"
14 | fi;
15 |
16 | printf "\nCopy kubectl config ======================================================================================\n"
17 | mkdir -p $HOME/.kube
18 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
19 | sudo chown $(id -u):$(id -g) $HOME/.kube/config
20 |
21 | printf "\nInstalling flannel =======================================================================================\n"
22 | kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
23 |
24 | printf "\n\n=========================================================================================================\n"
25 | printf "Kubernetes is now installed. Please check the status of flannel and kubelet to make sure the network is ready before we proceed to the next step."
26 | printf "\nVerify that is running:\n"
27 | kubectl get nodes
28 | printf "\n"
29 | kubectl get ds --watch --all-namespaces
30 |
31 | echo -n "I have to restart in order to finish installing K8s. After reboot, run step 3. Reboot? (y/n)? "
32 | read answer
33 | if [ "$answer" != "${answer#[Yy]}" ] ;then
34 | sudo reboot
35 | fi;
36 |
--------------------------------------------------------------------------------
/install-k8s/2-configK8SNode.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ./configNode.sh || exit 1
4 |
5 | if [[ ! -n ${KUBE_JOIN_COMMAND} ]]; then
6 | printf "Run 'kubeadm token create --print-join-command' in the control plane to get a join command. Then re-run this script with KUBE_JOIN_COMMAND=command ./2-configK8SNode.sh"
7 | exit 1
8 | fi;
9 |
10 | printf "\nJoin Kubernetes Cluster ==================================================================================\n"
11 | if [ ! -f "kubeadminit.lock" ]; then
12 | sudo systemctl enable kubelet.service
13 | # how to get this automatically
14 | eval ${KUBE_JOIN_COMMAND}
15 | touch kubeadminit.lock
16 | else
17 | printf "\nNOTE: Looks like kubeadm init already ran. If you want to run it again, delete kubeadminit.lock =========\n"
18 | fi;
19 |
20 | printf "\n\n=========================================================================================================\n"
21 | printf "Node is now joined. Verify that is running:\n"
22 | kubectl get nodes
23 | kubectl get ds --namespace=kube-system
24 | printf "\n\n"
25 |
--------------------------------------------------------------------------------
/install-k8s/3-installKVM.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | printf "\nInstall Virtualization ====================================================================================\n"
4 | sudo dnf install -y @virtualization
5 | sudo systemctl enable libvirtd.service
6 | sudo systemctl start libvirtd.service
7 |
8 | printf "\nInstall Cockpit support for KVM ===========================================================================\n"
9 | sudo dnf install -y cockpit-machines
10 | sudo systemctl restart cockpit.service
11 |
12 | # TODO: Install the windows machine
13 |
--------------------------------------------------------------------------------
/install-k8s/jail.local:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | # Ban hosts for one hour:
3 | bantime = 3600
4 | ignoreip = 127.0.0.1/8 10.1.1.0/24 192.168.1.0/24
5 |
6 | # Override /etc/fail2ban/jail.d/00-firewalld.conf:
7 | banaction = iptables-multiport
8 |
9 | [sshd]
10 | enabled = true
11 | mode = aggressive
12 |
--------------------------------------------------------------------------------
/install-k8s/kubeadm.yaml:
--------------------------------------------------------------------------------
1 | # https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/apis/kubeadm/v1beta4/types.go
2 | # generated by kubeadm
3 |
4 | apiVersion: kubeadm.k8s.io/v1beta4
5 | kind: InitConfiguration
6 | nodeRegistration:
7 | criSocket: unix:///var/run/crio/crio.sock
8 | name: k8s-control-plane
9 | # Remove all taints from the control plane to allow it to schedule pods since we are running a single machine/home lab
10 | taints: []
11 | ---
12 | apiVersion: kubeadm.k8s.io/v1beta4
13 | kind: ClusterConfiguration
14 | controllerManager:
15 | extraArgs:
16 | bind-address: "0.0.0.0"
17 | scheduler:
18 | extraArgs:
19 | bind-address: "0.0.0.0"
20 | apiServer:
21 | extraArgs:
22 | # This is important to allow any non-root ports to be used for NodePort
23 | service-node-port-range: 1025-32767
24 | authorization-mode: "Node,RBAC"
25 | # OIDC configuration
26 | oidc-issuer-url: "https://accounts.google.com"
27 | oidc-client-id: "399292359639-no3v1g6obqns0r1anqsb6hn520r6cu79.apps.googleusercontent.com"
28 | oidc-username-claim: "sub"
29 | oidc-username-prefix: "oidc:"
30 | # This is super important, without this setting networking won't work
31 | networking:
32 | dnsDomain: cluster.local
33 | podSubnet: 10.244.0.0/16
34 | serviceSubnet: 10.96.0.0/12
35 | ---
36 | apiVersion: kubeproxy.config.k8s.io/v1alpha1
37 | kind: KubeProxyConfiguration
38 | bindAddress: 0.0.0.0
39 | # This is super important, without this setting networking won't work
40 | clusterCIDR: 10.244.0.0/16
41 | ---
42 | apiVersion: kubelet.config.k8s.io/v1beta1
43 | kind: KubeletConfiguration
44 | cgroupDriver: systemd
45 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "k8s-infrastructure",
3 | "version": "1.0.0",
4 | "main": "index.js",
5 | "repository": "git@github.com:gandazgul/my-infrastructure.git",
6 | "author": "Carlos Ravelo ",
7 | "license": "MIT",
8 | "private": true,
9 | "scripts": {
10 | "docker:build": "node containers/docker-build.js",
11 | "docker:run": "node containers/docker-run.js",
12 | "docs": "helm-docs"
13 | },
14 | "dependencies": {
15 | "minimist": "^1.2.8",
16 | "pre-commit": "^1.2.2"
17 | },
18 | "pre-commit": [
19 | "docs"
20 | ],
21 | "resolutions": {
22 | "cross-spawn": "6.0.6"
23 | }
24 | }
25 |
--------------------------------------------------------------------------------