├── .envrc ├── .github ├── codeql │ └── codeql-config.yml └── workflows │ ├── codeql.yaml │ ├── permissions.yaml │ └── test.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── .secrets.baseline ├── README.md ├── k8s ├── argo │ ├── .gitignore │ ├── README.md │ ├── appset.yaml │ ├── argocd.yaml │ ├── configmap.yaml │ ├── kustomization.yaml │ ├── namespace.yaml │ ├── sa-wf.yaml │ ├── tailscale-wf.yaml │ ├── tailscale.yaml │ └── vault.yaml ├── debug.yaml ├── prod │ ├── actions-runner │ │ ├── actions.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ └── vault.yaml │ ├── coredns │ │ ├── .gitignore │ │ └── kustomization.yaml │ ├── datadog │ │ ├── .gitignore │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ └── vault.yaml │ ├── descheduler │ │ ├── .gitignore │ │ ├── kustomization.yaml │ │ └── namespace.yaml │ ├── esphome │ │ ├── .gitignore │ │ ├── devices │ │ │ ├── ble_proxy.yaml │ │ │ ├── front-porch-switch.yaml │ │ │ ├── garage.yaml │ │ │ ├── liam-room-starlights.yaml │ │ │ ├── liamplug.yaml │ │ │ ├── pergola.yaml │ │ │ ├── plaato-airlock.yaml │ │ │ ├── plaato-keg.yaml │ │ │ ├── plug1.yaml │ │ │ ├── plug2.yaml │ │ │ ├── plug3.yaml │ │ │ └── tubeszb-upstairs.yaml │ │ ├── esphome.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── tailscale.yaml │ │ └── vault.yaml │ ├── hass │ │ ├── files │ │ │ ├── automations.yaml │ │ │ ├── configuration.yaml │ │ │ ├── customize.yaml │ │ │ ├── groups.yaml │ │ │ ├── puck_js_bthome.js │ │ │ ├── remove_backup.sh │ │ │ ├── scenes.yaml │ │ │ └── scripts.yaml │ │ ├── hass.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── pv.yaml │ │ ├── pvc.yaml │ │ ├── tailscale.yaml │ │ └── vault.yaml │ ├── http-to-mqtt │ │ ├── .gitignore │ │ ├── http-to-mqtt.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── tailscale.yaml │ │ └── vault.yaml │ ├── kube-state-metrics │ │ ├── .gitignore │ │ └── kustomization.yaml │ ├── local-path-storage │ │ ├── chart.yaml │ │ └── kustomization.yaml │ ├── longhorn │ │ ├── .gitignore │ │ ├── backuptargets.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── recurringjobs.yaml │ │ ├── tailscale.yaml │ │ └── vault.yaml │ ├── mailgun-to-paperless-ngx │ │ ├── .gitignore │ │ ├── kustomization.yaml │ │ ├── mailgun-to-paperless-ngx.yaml │ │ ├── namespace.yaml │ │ ├── tailscale.yaml │ │ └── vault.yaml │ ├── mosquitto │ │ ├── kustomization.yaml │ │ ├── mosquitto.yaml │ │ └── namespace.yaml │ ├── node-feature-discovery │ │ ├── .gitignore │ │ ├── kustomization.yaml │ │ └── namespace.yaml │ ├── obsidian │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── obsidian.yaml │ │ ├── tailscale.yaml │ │ └── vault.yaml │ ├── paperless-ngx │ │ ├── README.md │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── paperless-ngx.yaml │ │ ├── pv.yaml │ │ ├── pvc.yaml │ │ ├── redis.yaml │ │ ├── tailscale.yaml │ │ └── vault.yaml │ ├── reloader │ │ ├── .gitignore │ │ ├── kustomization.yaml │ │ └── namespace.yaml │ ├── ser2net │ │ ├── common.yaml │ │ ├── kustomization.yaml │ │ ├── ser2net-zigbee.yaml │ │ └── ser2net-zwave.yaml │ ├── tailscale │ │ ├── .gitignore │ │ ├── cr.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ └── vault.yaml │ ├── vault-secrets-operator │ │ ├── .gitignore │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── sa.yaml │ │ └── setup.sh │ ├── workflows │ │ ├── default_sa.yaml │ │ ├── etcdsnapshot.yaml │ │ ├── get-token.sh │ │ ├── kustomization.yaml │ │ ├── psql-hass.yaml │ │ ├── psql-paperless.yaml │ │ ├── talos.yaml │ │ ├── vault.yaml │ │ ├── zigbee2mqtt.yaml │ │ └── zwavejsui.yaml │ ├── zigbee2mqtt-upstairs │ │ ├── configmap.yaml │ │ ├── kustomization.yaml │ │ ├── pv.yaml │ │ ├── pvc.yaml │ │ ├── tailscale.yaml │ │ └── zigbee2mqtt.yaml │ ├── zigbee2mqtt │ │ ├── configmap.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── pv.yaml │ │ ├── pvc.yaml │ │ ├── tailscale.yaml │ │ ├── vault.yaml │ │ └── zigbee2mqtt.yaml │ └── zwave-js-ui │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── pv.yaml │ │ ├── pvc.yaml │ │ ├── tailscale.yaml │ │ └── zwave.yaml └── stage │ ├── bluey │ ├── README.md │ ├── dv_ubuntu.yaml │ ├── kustomization.yaml │ ├── ubuntu.yaml │ └── usbredir.yaml │ ├── goldilocks │ ├── kustomization.yaml │ ├── namespace.yaml │ └── tailscale.yaml │ ├── kubevirt │ ├── cdi-cr.yaml │ ├── cdi-operator.yaml │ ├── kubevirt-cr.yaml │ ├── kubevirt-operator.yaml │ └── kustomization.yaml │ └── vpa │ ├── kustomization.yaml │ └── namespace.yaml ├── misc ├── netboot.xyz-rpi4-sdcard.img ├── netboot.xyz.img └── vault-backup.sh ├── nomad ├── datadog.hcl ├── fluentbit.hcl ├── influxdb.hcl ├── jackett.hcl ├── minio.hcl ├── mosquitto.hcl ├── mysql.hcl ├── netbootxyz.hcl ├── nextcloud.hcl ├── obsidian.hcl ├── postgresql.hcl ├── radarr.hcl ├── sonarr.hcl ├── synology-ups-datadog.hcl ├── traefik.hcl ├── utorrent.hcl └── vault.hcl.age ├── renovate.json ├── talos ├── kubeconfig.age └── talosconfig.age └── terraform ├── cloudflare ├── .terraform-version ├── .terraform.lock.hcl ├── README.md ├── backend.tf ├── dns.tf ├── iot.tf └── locals.tf ├── fastly ├── .terraform-version ├── .terraform.lock.hcl ├── README.md ├── backend.tf ├── hotchicken_rocks.tf ├── locals.tf └── poopgeni_us.tf ├── tailscale ├── .terraform.lock.hcl ├── acl.tf ├── locals.tf └── sources.tf ├── talos ├── .terraform-version ├── .terraform.lock.hcl ├── README.md ├── controlplane.tf ├── locals.tf ├── main.tf ├── sources.tf ├── templates │ ├── controlplane.yaml.tmpl │ ├── extensionserviceconfig.yaml.tmpl │ ├── install-disk-and-hostname.yaml.tmpl │ ├── longhorn.yaml.tmpl │ └── worker-with-extra-disk.yaml.tmpl └── worker.tf └── unifi ├── .terraform-version ├── .terraform.lock.hcl ├── README.md ├── backend.tf ├── device_garage_ap.tf ├── device_living_room_u6_iw.tf ├── device_office_ap.tf ├── device_office_usw_lite.tf ├── device_udm_pro.tf ├── device_usw.tf ├── locals.tf ├── port_forward.tf ├── radius.tf ├── settings.tf ├── site.tf ├── usg.tf ├── vlan_cluster.tf ├── vlan_defaults.tf ├── vlan_iot.tf ├── vlan_nas.tf ├── vlan_printer.tf ├── vlan_wifi.tf ├── vlan_your_mom.tf └── wlan.tf /.envrc: -------------------------------------------------------------------------------- 1 | export KUBECONFIG=$(pwd)/talos/kubeconfig 2 | export TALOSCONFIG=$(pwd)/talos/talosconfig 3 | export TERM=xterm-256color # https://github.com/siderolabs/talos/issues/8762 4 | export VAULT_ADDR=http://192.168.3.2:8200/ 5 | export NOMAD_ADDR=http://192.168.3.2:4646/ 6 | -------------------------------------------------------------------------------- /.github/codeql/codeql-config.yml: -------------------------------------------------------------------------------- 1 | query-filters: 2 | - include: 3 | id: actions/unpinned-tag 4 | -------------------------------------------------------------------------------- /.github/workflows/codeql.yaml: -------------------------------------------------------------------------------- 1 | name: CodeQL Security Analysis 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | 7 | jobs: 8 | analyze: 9 | name: Analyze GitHub Actions YAML 10 | runs-on: ubuntu-latest 11 | permissions: 12 | security-events: write 13 | actions: read 14 | contents: read 15 | 16 | steps: 17 | - name: Checkout repository 18 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 19 | 20 | - name: Initialize CodeQL 21 | uses: github/codeql-action/init@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 22 | with: 23 | languages: "actions" 24 | queries: security-extended 25 | config-file: .github/codeql/codeql-config.yml 26 | 27 | - name: Perform CodeQL Analysis 28 | uses: github/codeql-action/analyze@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19 29 | with: 30 | category: "/language:actions" 31 | -------------------------------------------------------------------------------- /.github/workflows/permissions.yaml: -------------------------------------------------------------------------------- 1 | name: Check User Permission 2 | 3 | on: 4 | pull_request: 5 | 6 | jobs: 7 | check: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - id: auth_check 11 | uses: morfien101/actions-authorized-user@4a3cfbf0bcb3cafe4a71710a278920c5d94bb38b # v3 12 | with: 13 | username: ${{ github.actor }} 14 | team: "admin" 15 | org: "OctoKode" 16 | whitelist: "myoung34,renovate[bot]" 17 | github_token: ${{ secrets.GITHUB_TOKEN }} 18 | - name: can continue 19 | shell: bash 20 | run: | 21 | if [ ${{ steps.auth_check.outputs.authorized }} != "true" ]; then 22 | echo "::error title=User Unauthorized::User ${{ github.actor }} is not authorized to run this workflow!" 23 | exit 1 24 | fi 25 | -------------------------------------------------------------------------------- /.github/workflows/test.yaml: -------------------------------------------------------------------------------- 1 | name: Run tests 2 | 3 | on: 4 | push: 5 | 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 11 | - uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 12 | - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | talos/*.yaml 2 | talos/talosconfig 3 | talos/kubeconfig 4 | nomad/vault.hcl 5 | **/*.tfstate 6 | **/*.backup 7 | .terraform 8 | *.sw[op] 9 | 10 | # Because we want no oopsies 11 | k8s/prod/vault-secrets-operator/ca.crt 12 | k8s/prod/vault-secrets-operator/sa.token 13 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/pre-commit/pre-commit-hooks 3 | rev: v4.4.0 4 | hooks: 5 | - id: check-yaml 6 | args: [--allow-multiple-documents] 7 | exclude: 'k8s/prod/tailscale/charts/.*|k8s/prod/esphome/devices/.*|k8s/prod/hass/files/configuration.yaml' 8 | - id: end-of-file-fixer 9 | - id: trailing-whitespace 10 | - id: check-case-conflict 11 | - id: check-merge-conflict 12 | - id: detect-private-key 13 | - repo: https://github.com/Yelp/detect-secrets 14 | rev: v1.5.0 15 | hooks: 16 | - id: detect-secrets 17 | args: ['--baseline', '.secrets.baseline'] 18 | -------------------------------------------------------------------------------- /k8s/argo/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /k8s/argo/README.md: -------------------------------------------------------------------------------- 1 | To get the argo workflow token: 2 | 3 | ``` 4 | kubectl -n argocd exec $(kubectl get pod -n argocd -l 'app.kubernetes.io/name=argo-workflows-server' -o jsonpath='{.items[0].metadata.name}') -- argo auth token 5 | ``` 6 | -------------------------------------------------------------------------------- /k8s/argo/appset.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: ApplicationSet 4 | metadata: 5 | name: apps 6 | namespace: argocd 7 | spec: 8 | generators: 9 | - git: 10 | repoURL: 'https://github.com/myoung34/homelab' 11 | revision: HEAD 12 | directories: 13 | - path: "k8s/prod/*" 14 | template: 15 | metadata: 16 | name: "{{ path.basename }}" 17 | spec: 18 | project: default 19 | source: 20 | repoURL: https://github.com/myoung34/homelab.git 21 | targetRevision: HEAD 22 | path: "k8s/prod/{{ path.basename }}" 23 | destination: 24 | server: https://kubernetes.default.svc 25 | syncPolicy: 26 | syncOptions: 27 | - PrunePropagationPolicy=foreground 28 | - PruneLast=true 29 | - ServerSideApply=true 30 | - FailOnSharedResource=true 31 | - RespectIgnoreDifferences=true 32 | - SkipDryRunOnMissingResource=true 33 | automated: 34 | prune: true 35 | selfHeal: true 36 | revisionHistoryLimit: 5 37 | -------------------------------------------------------------------------------- /k8s/argo/argocd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: argocd 6 | spec: 7 | destination: 8 | namespace: argocd 9 | server: 'https://kubernetes.default.svc' 10 | source: 11 | path: k8s/argo 12 | repoURL: 'https://github.com/myoung34/homelab' 13 | targetRevision: HEAD 14 | project: default 15 | syncPolicy: 16 | syncOptions: 17 | - PrunePropagationPolicy=foreground 18 | - PruneLast=true 19 | - ServerSideApply=true 20 | - FailOnSharedResource=true 21 | - RespectIgnoreDifferences=true 22 | - SkipDryRunOnMissingResource=true 23 | automated: 24 | prune: true 25 | selfHeal: true 26 | -------------------------------------------------------------------------------- /k8s/argo/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: argocd-cm 5 | namespace: argocd 6 | labels: 7 | app.kubernetes.io/name: argocd-cm 8 | app.kubernetes.io/part-of: argocd 9 | data: 10 | kustomize.buildOptions: --enable-helm 11 | url: https://argocd.king-gila.ts.net 12 | admin.enabled: "true" 13 | repositories: | 14 | - url: https://github.com/myoung34/homelab 15 | passwordSecret: 16 | name: argocd-secret 17 | key: github_password 18 | usernameSecret: 19 | name: argocd-secret 20 | key: github_username 21 | resource.customizations: | 22 | PersistentVolume: 23 | ignoreDifferences: | 24 | jsonPointers: 25 | - /spec/claimRef/resourceVersion 26 | - /spec/claimRef/uid 27 | - /status/lastPhaseTransitionTime 28 | StatefulSet: 29 | ignoreDifferences: | 30 | jsonPointers: 31 | - /spec/volumeClaimTemplates/0/apiVersion 32 | - /spec/volumeClaimTemplates/0/kind 33 | v1/Secret: 34 | ignoreDifferences: | 35 | jsonPointers: 36 | - /metadata/labels 37 | volumes.longhorn.io/Volume: 38 | ignoreDifferences: | 39 | jsonPointers: 40 | - /spec/size 41 | - /spec/disableFrontend 42 | apiextensions.k8s.io/CustomResourceDefinition: 43 | ignoreDifferences: | 44 | jsonPointers: 45 | - /status 46 | - /spec/conversion/webhook/clientConfig/service 47 | - /spec/conversion/webhook/clientConfig/caBundle 48 | - /spec/validation/openAPIV3Schema/properties/spec/properties/solver/properties/dns01/properties/webhook/properties/config/x-kubernetes-preserve-unknown-fields 49 | - /spec/validation/openAPIV3Schema/properties/spec/properties/acme/properties/solvers/items/properties/dns01/properties/webhook/properties/config/x-kubernetes-preserve-unknown-fields 50 | - /spec/preserveUnknownFields 51 | admissionregistration.k8s.io/ValidatingWebhookConfiguration: 52 | ignoreDifferences: | 53 | jsonPointers: 54 | - /webhooks/0/clientConfig/caBundle 55 | - /webhooks/0/failurePolicy 56 | - /webhooks/1/clientConfig/caBundle 57 | - /webhooks/1/failurePolicy 58 | admissionregistration.k8s.io/MutatingWebhookConfiguration: 59 | ignoreDifferences: | 60 | jsonPointers: 61 | - /webhooks/0/clientConfig/caBundle 62 | argoproj.io/CronWorkflow: 63 | ignoreDifferences: | 64 | jsonPointers: 65 | - /spec/schedule 66 | -------------------------------------------------------------------------------- /k8s/argo/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: argocd 4 | metadata: 5 | name: argocd 6 | 7 | resources: 8 | - namespace.yaml 9 | - vault.yaml 10 | - argocd.yaml 11 | - appset.yaml 12 | - sa-wf.yaml 13 | - tailscale.yaml 14 | - tailscale-wf.yaml 15 | patches: 16 | - path: configmap.yaml 17 | 18 | helmCharts: 19 | - name: argo-cd 20 | releaseName: argocd 21 | namespace: argocd 22 | version: 8.0.16 23 | includeCRDs: true 24 | repo: https://argoproj.github.io/argo-helm 25 | valuesInline: 26 | configs: 27 | params: 28 | controller.sync.timeout.seconds: "600" 29 | secret: 30 | createSecret: false 31 | global: 32 | deploymentAnnotations: 33 | configmap.reloader.stakater.com/reload: "argocd-cm,argocd-rbac-cm" 34 | dex: 35 | enabled: false 36 | repoServer: 37 | metrics: 38 | enabled: true 39 | resources: 40 | requests: 41 | cpu: 100m 42 | memory: 512M 43 | applicationSet: 44 | webhook: 45 | ingress: 46 | enabled: false 47 | resources: 48 | requests: 49 | cpu: 23m 50 | memory: 105M 51 | metrics: 52 | enabled: true 53 | notifications: 54 | metrics: 55 | enabled: true 56 | resources: 57 | requests: 58 | cpu: 15m 59 | memory: 105M 60 | controller: 61 | metrics: 62 | enabled: true 63 | resources: 64 | requests: 65 | cpu: 296m 66 | memory: 512M 67 | server: 68 | metrics: 69 | enabled: true 70 | extraArgs: 71 | - --insecure 72 | resources: 73 | requests: 74 | cpu: 63m 75 | memory: 110M 76 | redis: 77 | resources: 78 | requests: 79 | cpu: 15m 80 | memory: 105M 81 | - name: argo-workflows 82 | releaseName: argocd-workflows 83 | namespace: argocd 84 | version: 0.45.18 85 | includeCRDs: true 86 | repo: https://argoproj.github.io/argo-helm 87 | valuesInline: 88 | controller: 89 | metrics: 90 | enabled: true 91 | logging: 92 | level: debug 93 | workflowNamespaces: 94 | - argocd 95 | server: 96 | metrics: 97 | enabled: true 98 | logging: 99 | level: debug 100 | extraArgs: 101 | - --insecure-skip-verify=true 102 | workflow: 103 | logging: 104 | level: debug 105 | serviceAccount: 106 | create: true 107 | labels: {} 108 | name: "admin-user" 109 | -------------------------------------------------------------------------------- /k8s/argo/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: argocd 6 | labels: 7 | name: argocd 8 | app.kubernetes.io/instance: argocd 9 | goldilocks.fairwinds.com/enabled: "true" 10 | -------------------------------------------------------------------------------- /k8s/argo/sa-wf.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: admin-user 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: admin-user 10 | subjects: 11 | - kind: ServiceAccount 12 | name: admin-user 13 | namespace: argocd 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRole 17 | metadata: 18 | name: admin-user 19 | rules: 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - pods 24 | verbs: 25 | - '*' 26 | - apiGroups: 27 | - argoproj.io 28 | resources: 29 | - workflows 30 | - workflowtemplates 31 | - clusterworkflowtemplates 32 | - cronworkflows 33 | - eventsources 34 | - sensors 35 | - workfloweventbindings 36 | verbs: 37 | - create 38 | - get 39 | - list 40 | - watch 41 | - update 42 | - patch 43 | - delete 44 | --- 45 | apiVersion: v1 46 | kind: Secret 47 | metadata: 48 | name: admin-user 49 | annotations: 50 | kubernetes.io/service-account.name: admin-user 51 | type: kubernetes.io/service-account-token 52 | -------------------------------------------------------------------------------- /k8s/argo/tailscale-wf.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: argocd-workflows-argo-workflows-server 5 | spec: 6 | defaultBackend: 7 | service: 8 | name: argocd-workflows-argo-workflows-server 9 | port: 10 | number: 2746 11 | ingressClassName: tailscale 12 | tls: 13 | - hosts: 14 | - argowf 15 | -------------------------------------------------------------------------------- /k8s/argo/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: argocd-server 5 | spec: 6 | defaultBackend: 7 | service: 8 | name: argocd-server 9 | port: 10 | number: 80 11 | ingressClassName: tailscale 12 | tls: 13 | - hosts: 14 | - argocd 15 | -------------------------------------------------------------------------------- /k8s/argo/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: argocd 6 | spec: 7 | vaultAuthRef: argocd 8 | mount: secret/ 9 | type: kv-v2 10 | path: argocd 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: argocd-secret 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: argocd 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: argocd 25 | serviceAccount: default 26 | -------------------------------------------------------------------------------- /k8s/debug.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: foo 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | capacity: 10 | storage: 128Mi 11 | hostPath: 12 | path: /var/mnt/storage 13 | type: DirectoryOrCreate 14 | nodeAffinity: 15 | required: 16 | nodeSelectorTerms: 17 | - matchExpressions: 18 | - key: kubernetes.io/hostname 19 | operator: In 20 | values: 21 | - cluster12 22 | persistentVolumeReclaimPolicy: Retain 23 | storageClassName: local-path 24 | volumeMode: Filesystem 25 | --- 26 | apiVersion: apps/v1 27 | kind: Deployment 28 | metadata: 29 | name: slim 30 | spec: 31 | selector: 32 | matchLabels: 33 | app: slim 34 | template: 35 | metadata: 36 | labels: 37 | app: slim 38 | spec: 39 | containers: 40 | - name: app 41 | image: alpine:latest 42 | command: 43 | - "sleep" 44 | - "600" 45 | volumeMounts: 46 | - mountPath: /opt/storage 47 | name: store 48 | volumes: 49 | - name: store 50 | persistentVolumeClaim: 51 | claimName: foo 52 | --- 53 | apiVersion: v1 54 | kind: PersistentVolumeClaim 55 | metadata: 56 | name: foo 57 | spec: 58 | accessModes: 59 | - ReadWriteOnce 60 | resources: 61 | requests: 62 | storage: 128Mi 63 | storageClassName: local-path 64 | volumeMode: Filesystem 65 | volumeName: foo 66 | -------------------------------------------------------------------------------- /k8s/prod/actions-runner/actions.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: actions-runner 6 | namespace: actions-runner 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: actions-runner 12 | namespace: runners 13 | spec: 14 | replicas: 0 15 | selector: 16 | matchLabels: 17 | app: actions-runner 18 | template: 19 | metadata: 20 | labels: 21 | app: actions-runner 22 | spec: 23 | containers: 24 | - name: runner 25 | image: myoung34/github-runner:latest 26 | env: 27 | - name: RUNNER_SCOPE 28 | value: "org" 29 | - name: ORG_NAME 30 | value: "OctoKode" 31 | - name: ACCESS_TOKEN 32 | valueFrom: 33 | secretKeyRef: 34 | name: actions-runner 35 | key: ACCESS_TOKEN 36 | - name: RUNNER_NAME 37 | valueFrom: 38 | fieldRef: 39 | fieldPath: metadata.name 40 | envFrom: 41 | - secretRef: 42 | name: actions-runner 43 | securityContext: 44 | privileged: true 45 | nodeSelector: 46 | kubernetes.io/arch: arm64 47 | -------------------------------------------------------------------------------- /k8s/prod/actions-runner/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: actions-runner 4 | 5 | resources: 6 | - namespace.yaml 7 | - actions.yaml 8 | - vault.yaml 9 | -------------------------------------------------------------------------------- /k8s/prod/actions-runner/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: actions-runner 6 | labels: 7 | name: actions-runner 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/prod/actions-runner/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: actions-runner 6 | spec: 7 | vaultAuthRef: actions-runner 8 | mount: secret/ 9 | type: kv-v2 10 | path: actions-runner 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: actions-runner 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: actions-runner 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: actions-runner 25 | serviceAccount: actions-runner 26 | -------------------------------------------------------------------------------- /k8s/prod/coredns/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /k8s/prod/coredns/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: kube-system 4 | 5 | patches: 6 | - target: 7 | kind: Service 8 | name: kube-dns 9 | patch: |- 10 | - op: remove 11 | path: "/spec/selector/app.kubernetes.io~1instance" 12 | - op: remove 13 | path: "/spec/selector/app.kubernetes.io~1name" 14 | 15 | helmCharts: 16 | - name: coredns 17 | releaseName: coredns 18 | version: 1.42.2 19 | includeCRDs: true 20 | repo: https://coredns.github.io/helm 21 | valuesInline: 22 | replicaCount: 2 23 | k8sAppLabelOverride: "kube-dns" 24 | fullnameOverride: coredns 25 | service: 26 | name: "kube-dns" 27 | clusterIP: "10.96.0.10" 28 | servers: 29 | - zones: 30 | - zone: . 31 | port: 53 32 | plugins: 33 | - name: errors 34 | # Serves a /health endpoint on :8080, required for livenessProbe 35 | - name: health 36 | configBlock: |- 37 | lameduck 5s 38 | # Serves a /ready endpoint on :8181, required for readinessProbe 39 | - name: ready 40 | # Required to query kubernetes API for data 41 | - name: kubernetes 42 | parameters: cluster.local in-addr.arpa ip6.arpa 43 | configBlock: |- 44 | pods insecure 45 | fallthrough in-addr.arpa ip6.arpa 46 | ttl 30 47 | 48 | # Serves a /metrics endpoint on :9153, required for serviceMonitor 49 | - name: prometheus 50 | parameters: 0.0.0.0:9153 51 | - name: forward 52 | parameters: . /etc/resolv.conf 53 | - name: cache 54 | parameters: 30 55 | - name: loop 56 | - name: reload 57 | - name: loadbalance 58 | zoneFiles: 59 | - filename: NodeHosts 60 | domain: NodeHosts 61 | contents: | 62 | 192.168.1.19 cluster11 63 | 192.168.1.21 cluster12 64 | 192.168.1.22 cluster13 65 | 192.168.1.23 cluster14 66 | 192.168.1.24 cluster21 67 | 192.168.1.25 cluster22 68 | 192.168.1.26 cluster23 69 | 192.168.1.27 cluster24 70 | 192.168.3.2 bignasty 71 | -------------------------------------------------------------------------------- /k8s/prod/datadog/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /k8s/prod/datadog/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: datadog 4 | 5 | resources: 6 | - namespace.yaml 7 | - vault.yaml 8 | 9 | helmCharts: 10 | - name: datadog 11 | releaseName: datadog 12 | namespace: datadog 13 | version: 3.118.0 14 | includeCRDs: true 15 | repo: https://helm.datadoghq.com 16 | valuesInline: 17 | providers: 18 | talos: 19 | enabled: true 20 | datadog: 21 | confd: 22 | argocd.yaml: |- 23 | instances: 24 | - 25 | app_controller_endpoint: http://argocd-application-controller-metrics.argocd.svc.cluster.local:8082/metrics 26 | api_server_endpoint: http://argocd-server-metrics.argocd.svc.cluster.local:8083/metrics 27 | repo_server_endpoint: http://argocd-repo-server-metrics.argocd.svc.cluster.local:8084/metrics 28 | apiKeyExistingSecret: datadog 29 | logs: 30 | enabled: true 31 | containerCollectAll: true 32 | networkMonitoring: 33 | enabled: true 34 | serviceMonitoring: 35 | enabled: true 36 | systemProbe: 37 | mountPackageManagementDirs: 38 | - name: "public-key-dir" 39 | hostPath: /etc/pki 40 | mountPath: /host/etc/pki 41 | envFrom: 42 | - secretRef: 43 | name: datadog 44 | agents: 45 | tolerations: 46 | - key: "node-role.kubernetes.io/control-plane" 47 | effect: "NoSchedule" 48 | -------------------------------------------------------------------------------- /k8s/prod/datadog/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: datadog 6 | labels: 7 | name: datadog 8 | goldilocks.fairwinds.com/enabled: "true" 9 | pod-security.kubernetes.io/enforce: privileged 10 | -------------------------------------------------------------------------------- /k8s/prod/datadog/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: datadog 6 | spec: 7 | vaultAuthRef: datadog 8 | mount: secret 9 | type: kv-v2 10 | path: datadog 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: datadog 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: datadog 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: datadog 25 | serviceAccount: datadog 26 | -------------------------------------------------------------------------------- /k8s/prod/descheduler/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /k8s/prod/descheduler/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: descheduler 4 | 5 | resources: 6 | - namespace.yaml 7 | 8 | helmCharts: 9 | - name: descheduler 10 | includeCRDs: true 11 | namespace: descheduler 12 | releaseName: descheduler 13 | version: 0.33.0 14 | repo: https://kubernetes-sigs.github.io/descheduler/ 15 | valuesInline: 16 | schedule: "0 4 */3 * *" # every 3 days at 4am 17 | deschedulerPolicy: 18 | profiles: 19 | - name: default 20 | pluginConfig: 21 | - name: DefaultEvictor 22 | args: 23 | ignorePvcPods: true 24 | evictLocalStoragePods: true 25 | - name: RemoveDuplicates 26 | - name: PodLifeTime 27 | args: 28 | maxPodLifeTimeSeconds: 86400 29 | namespaces: 30 | include: 31 | - "actions-runner" 32 | plugins: 33 | balance: 34 | enabled: 35 | - RemoveDuplicates 36 | deschedule: 37 | enabled: 38 | - PodLifeTime 39 | -------------------------------------------------------------------------------- /k8s/prod/descheduler/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: descheduler 6 | labels: 7 | name: descheduler 8 | -------------------------------------------------------------------------------- /k8s/prod/esphome/.gitignore: -------------------------------------------------------------------------------- 1 | myauthsecret.yaml 2 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/front-porch-switch.yaml: -------------------------------------------------------------------------------- 1 | esphome: 2 | name: front-porch-switch 3 | 4 | esp8266: 5 | board: esp01_1m 6 | 7 | wifi: 8 | ssid: !secret wifi_ssid 9 | password: !secret wifi_password 10 | domain: !secret wifi_domain 11 | 12 | logger: 13 | level: VERBOSE 14 | baud_rate: 0 15 | 16 | api: 17 | encryption: 18 | key: !secret front_porch_switch_key 19 | 20 | ota: 21 | - platform: esphome 22 | password: !secret ota_password 23 | 24 | sensor: 25 | - platform: wifi_signal 26 | name: front_porch_switch_signal 27 | update_interval: 15s 28 | 29 | switch: 30 | - platform: gpio 31 | id: "relay" 32 | name: "front_porch_relay" 33 | pin: 12 34 | on_turn_on: 35 | then: 36 | - switch.turn_on: relay 37 | - output.turn_on: status_led 38 | on_turn_off: 39 | then: 40 | - switch.turn_off: relay 41 | - output.turn_off: status_led 42 | 43 | output: 44 | - platform: esp8266_pwm 45 | id: status_led 46 | pin: 47 | number: GPIO4 48 | inverted: True 49 | 50 | binary_sensor: 51 | - platform: gpio 52 | name: "front_porch_switch" 53 | pin: 54 | number: 13 55 | inverted: True 56 | on_press: 57 | - switch.toggle: relay 58 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/garage.yaml: -------------------------------------------------------------------------------- 1 | esphome: 2 | name: garage-switch 3 | 4 | esp8266: 5 | board: esp01_1m 6 | 7 | wifi: 8 | ssid: !secret wifi_ssid 9 | password: !secret wifi_password 10 | domain: !secret wifi_domain 11 | 12 | # Enable logging 13 | logger: 14 | 15 | # Enable Home Assistant API 16 | api: 17 | encryption: 18 | key: !secret garage_switch_key 19 | 20 | ota: 21 | - platform: esphome 22 | password: !secret ota_password 23 | 24 | switch: 25 | - platform: gpio 26 | name: "garage" 27 | pin: 5 28 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/liam-room-starlights.yaml: -------------------------------------------------------------------------------- 1 | esphome: 2 | name: liam-room-starlights 3 | 4 | esp8266: 5 | board: esp01_1m 6 | 7 | wifi: 8 | ssid: !secret wifi_ssid 9 | password: !secret wifi_password 10 | domain: !secret wifi_domain 11 | 12 | # Enable logging 13 | logger: 14 | 15 | # Enable Home Assistant API 16 | api: 17 | encryption: 18 | key: !secret liam_room_starlights_key 19 | 20 | ota: 21 | - platform: esphome 22 | password: !secret ota_password 23 | 24 | light: 25 | - platform: rgbww 26 | name: "Liam Starlight" 27 | red: output_component1 28 | green: output_component2 29 | blue: output_component3 30 | cold_white: output_component4 31 | cold_white_color_temperature: 6536 K 32 | warm_white: output_component5 33 | warm_white_color_temperature: 2000 K 34 | # restore_mode: ALWAYS_ON 35 | 36 | output: 37 | - platform: esp8266_pwm 38 | id: output_component1 39 | max_power: 1% 40 | pin: 5 41 | 42 | - platform: esp8266_pwm 43 | id: output_component2 44 | max_power: 1% 45 | pin: 12 46 | 47 | - platform: esp8266_pwm 48 | id: output_component3 49 | max_power: 1% 50 | pin: 13 51 | 52 | - platform: esp8266_pwm 53 | id: output_component4 54 | max_power: 1% 55 | pin: 15 56 | 57 | - platform: esp8266_pwm 58 | id: output_component5 59 | max_power: 1% 60 | pin: 16 61 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/liamplug.yaml: -------------------------------------------------------------------------------- 1 | esphome: 2 | name: liamplug 3 | 4 | esp8266: 5 | board: esp01_1m 6 | 7 | wifi: 8 | ssid: !secret wifi_ssid 9 | password: !secret wifi_password 10 | domain: !secret wifi_domain 11 | 12 | # Enable logging 13 | logger: 14 | baud_rate: 0 15 | 16 | uart: 17 | rx_pin: RX 18 | baud_rate: 4800 19 | parity: EVEN 20 | 21 | api: 22 | encryption: 23 | key: !secret liamplug_key 24 | 25 | ota: 26 | - platform: esphome 27 | password: !secret ota_password 28 | 29 | 30 | binary_sensor: 31 | - platform: gpio 32 | pin: 33 | number: GPIO0 34 | mode: INPUT_PULLUP 35 | inverted: True 36 | name: "liam_plug_button" 37 | on_press: 38 | - switch.toggle: fakebutton 39 | - platform: template 40 | name: "liam_plug_running" 41 | filters: 42 | - delayed_off: 15s 43 | lambda: |- 44 | if (isnan(id(power).state)) { 45 | return {}; 46 | } else if (id(power).state > 4) { 47 | // Running 48 | return true; 49 | } else { 50 | // Not running 51 | return false; 52 | } 53 | 54 | switch: 55 | - platform: template 56 | name: "liam_plug_pow_relay" 57 | optimistic: true 58 | id: fakebutton 59 | turn_on_action: 60 | - switch.turn_on: relay 61 | - light.turn_on: led 62 | turn_off_action: 63 | - switch.turn_off: relay 64 | - light.turn_off: led 65 | - platform: gpio 66 | id: relay 67 | pin: GPIO12 68 | restore_mode: ALWAYS_ON 69 | 70 | output: 71 | - platform: esp8266_pwm 72 | id: pow_blue_led 73 | pin: 74 | number: GPIO13 75 | inverted: True 76 | 77 | light: 78 | - platform: monochromatic 79 | name: "liam_plug_blue_led" 80 | output: pow_blue_led 81 | id: led 82 | restore_mode: ALWAYS_ON 83 | 84 | sensor: 85 | - platform: wifi_signal 86 | name: "liam_plug_wifi_signal" 87 | update_interval: 60s 88 | - platform: cse7766 89 | current: 90 | name: "liam_plug_current" 91 | voltage: 92 | name: "liam_plug_voltage" 93 | power: 94 | name: "liam_plug_pow_power" 95 | id: power 96 | on_value_range: 97 | - above: 4.0 98 | then: 99 | - light.turn_on: led 100 | - below: 4.0 101 | then: 102 | - light.turn_off: led 103 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/pergola.yaml: -------------------------------------------------------------------------------- 1 | esphome: 2 | name: pergola-lights 3 | 4 | esp8266: 5 | board: esp01_1m 6 | 7 | wifi: 8 | ssid: !secret wifi_ssid 9 | password: !secret wifi_password 10 | domain: !secret wifi_domain 11 | 12 | # Enable logging 13 | logger: 14 | 15 | # Enable Home Assistant API 16 | api: 17 | encryption: 18 | key: !secret pergola_lights_key 19 | 20 | ota: 21 | - platform: esphome 22 | password: !secret ota_password 23 | 24 | 25 | light: 26 | - platform: rgbww 27 | name: "pergola lights" 28 | red: output_component1 29 | green: output_component2 30 | blue: output_component3 31 | cold_white: output_component4 32 | cold_white_color_temperature: 6536 K 33 | warm_white: output_component5 34 | warm_white_color_temperature: 2000 K 35 | 36 | output: 37 | - platform: esp8266_pwm 38 | id: output_component1 39 | pin: 5 40 | 41 | - platform: esp8266_pwm 42 | id: output_component2 43 | pin: 12 44 | 45 | - platform: esp8266_pwm 46 | id: output_component3 47 | pin: 13 48 | 49 | - platform: esp8266_pwm 50 | id: output_component4 51 | pin: 15 52 | 53 | - platform: esp8266_pwm 54 | id: output_component5 55 | pin: 16 56 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/plaato-airlock.yaml: -------------------------------------------------------------------------------- 1 | esphome: 2 | name: plaato-airlock 3 | 4 | esp8266: 5 | board: nodemcuv2 6 | 7 | external_components: 8 | - source: github://myoung34/esphome-components@main 9 | components: [plaato_airlock] 10 | 11 | wifi: 12 | ssid: !secret wifi_ssid 13 | password: !secret wifi_password 14 | domain: !secret wifi_domain 15 | 16 | logger: 17 | level: DEBUG 18 | 19 | web_server: 20 | port: 80 21 | 22 | api: 23 | encryption: 24 | key: !secret plaato_airlock_key 25 | 26 | ota: 27 | - platform: esphome 28 | password: !secret ota_password 29 | 30 | i2c: 31 | sda: GPIO14 32 | scl: GPIO12 33 | scan: false 34 | frequency: 100kHz 35 | id: bus_a 36 | 37 | sensor: 38 | - platform: plaato_airlock 39 | temp_sensor: 40 | name: "Temperature" 41 | bubble_sensor: 42 | name: "Number Of Bubbles" 43 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/plaato-keg.yaml: -------------------------------------------------------------------------------- 1 | esphome: 2 | name: plaato-keg 3 | libraries: 4 | - SPI 5 | - Wire 6 | - adafruit/Adafruit BusIO 7 | - adafruit/Adafruit PCT2075 8 | 9 | esp32: 10 | board: nodemcu-32s 11 | 12 | external_components: 13 | - source: github://myoung34/esphome-components@main 14 | components: [plaato_keg] 15 | 16 | web_server: 17 | port: 80 18 | 19 | wifi: 20 | ssid: !secret wifi_ssid 21 | password: !secret wifi_password 22 | domain: !secret wifi_domain 23 | 24 | logger: 25 | 26 | api: 27 | encryption: 28 | key: !secret plaato_keg_key 29 | 30 | ota: 31 | - platform: esphome 32 | password: !secret ota_password 33 | 34 | 35 | # This is the 3rd LED (closest to the plug) 36 | # https://esphome.io/components/status_led.html 37 | status_led: 38 | pin: GPIO27 39 | 40 | output: 41 | - platform: ledc 42 | pin: GPIO25 43 | id: plaato_keg_led1 44 | inverted: True 45 | - platform: ledc 46 | pin: GPIO26 47 | id: plaato_keg_led2 48 | inverted: True 49 | - platform: gpio 50 | pin: GPIO32 51 | id: plaato_keg_water_sensor_activation 52 | 53 | switch: 54 | - platform: output 55 | name: "Plaato Keg Water Activation" 56 | output: 'plaato_keg_water_sensor_activation' 57 | 58 | light: 59 | - platform: monochromatic 60 | output: plaato_keg_led1 61 | name: "Plaato Keg LED1" 62 | restore_mode: ALWAYS_ON 63 | - platform: monochromatic 64 | output: plaato_keg_led2 65 | name: "Plaato Keg LED2" 66 | 67 | binary_sensor: 68 | - platform: gpio 69 | id: plaato_keg_magnet_sensor 70 | name: "Plaato Keg Magnetic Sensor" 71 | pin: 72 | number: GPIO34 73 | inverted: True 74 | - platform: gpio 75 | id: plaato_keg_water_sensor 76 | name: "Plaato Keg Water Sensor" 77 | pin: 78 | number: GPIO35 79 | inverted: True 80 | 81 | http_request: 82 | useragent: esphome/device 83 | timeout: 60s 84 | verify_ssl: false 85 | 86 | sensor: 87 | - platform: plaato_keg 88 | temp_sensor: 89 | name: "Temperature" 90 | - platform: hx711 91 | name: "Plaato Keg HX711 Value" 92 | dout_pin: 17 93 | clk_pin: 16 94 | gain: 128 95 | update_interval: 60s 96 | filters: 97 | - multiply: -1.0 98 | - offset: -24255 # Probably want to calibrate this yourself 99 | - calibrate_linear: 100 | - 49712 -> 2.5 # Probably want to calibrate this yourself 101 | - 203270 -> 10 # Probably want to calibrate this yourself 102 | unit_of_measurement: lb 103 | on_value: 104 | # Turn on the 2nd LED when its submitting data 105 | - lambda: |- 106 | id(plaato_keg_led2).turn_on(); 107 | - http_request.post: 108 | url: !secret plaato_keg_hook_url 109 | headers: 110 | Content-Type: application/json 111 | # {"weight": "{value}"} 112 | json: |- 113 | root["weight"] = x; 114 | # Turn off the 2nd LED when its done submitting data 115 | - lambda: |- 116 | id(plaato_keg_led2).turn_off(); 117 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/plug1.yaml: -------------------------------------------------------------------------------- 1 | esphome: 2 | name: plug1 3 | 4 | esp8266: 5 | board: esp01_1m 6 | 7 | wifi: 8 | ssid: !secret wifi_ssid 9 | password: !secret wifi_password 10 | domain: !secret wifi_domain 11 | 12 | # Enable logging 13 | logger: 14 | baud_rate: 0 15 | 16 | uart: 17 | rx_pin: RX 18 | baud_rate: 4800 19 | parity: EVEN 20 | 21 | api: 22 | encryption: 23 | key: !secret plug1_key 24 | 25 | ota: 26 | - platform: esphome 27 | password: !secret ota_password 28 | 29 | 30 | binary_sensor: 31 | - platform: gpio 32 | pin: 33 | number: GPIO0 34 | mode: INPUT_PULLUP 35 | inverted: True 36 | name: "plug1_button" 37 | on_press: 38 | - switch.toggle: fakebutton 39 | - platform: template 40 | name: "plug1_running" 41 | filters: 42 | - delayed_off: 15s 43 | lambda: |- 44 | if (isnan(id(power).state)) { 45 | return {}; 46 | } else if (id(power).state > 4) { 47 | // Running 48 | return true; 49 | } else { 50 | // Not running 51 | return false; 52 | } 53 | 54 | switch: 55 | - platform: template 56 | name: "plug1_pow_relay" 57 | optimistic: true 58 | id: fakebutton 59 | turn_on_action: 60 | - switch.turn_on: relay 61 | - light.turn_on: led 62 | turn_off_action: 63 | - switch.turn_off: relay 64 | - light.turn_off: led 65 | - platform: gpio 66 | id: relay 67 | pin: GPIO12 68 | restore_mode: ALWAYS_ON 69 | 70 | output: 71 | - platform: esp8266_pwm 72 | id: pow_blue_led 73 | pin: 74 | number: GPIO13 75 | inverted: True 76 | 77 | light: 78 | - platform: monochromatic 79 | name: "plug1_blue_led" 80 | output: pow_blue_led 81 | id: led 82 | restore_mode: ALWAYS_ON 83 | 84 | sensor: 85 | - platform: wifi_signal 86 | name: "plug1_wifi_signal" 87 | update_interval: 60s 88 | - platform: cse7766 89 | current: 90 | name: "plug1_current" 91 | voltage: 92 | name: "plug1_voltage" 93 | power: 94 | name: "plug1_pow_power" 95 | id: power 96 | on_value_range: 97 | - above: 4.0 98 | then: 99 | - light.turn_on: led 100 | - below: 4.0 101 | then: 102 | - light.turn_off: led 103 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/plug2.yaml: -------------------------------------------------------------------------------- 1 | esphome: 2 | name: plug2 3 | 4 | esp8266: 5 | board: esp01_1m 6 | 7 | wifi: 8 | ssid: !secret wifi_ssid 9 | password: !secret wifi_password 10 | domain: !secret wifi_domain 11 | 12 | # Enable logging 13 | logger: 14 | baud_rate: 0 15 | 16 | uart: 17 | rx_pin: RX 18 | baud_rate: 4800 19 | parity: EVEN 20 | 21 | api: 22 | encryption: 23 | key: !secret plug2_key 24 | 25 | ota: 26 | - platform: esphome 27 | password: !secret ota_password 28 | 29 | 30 | binary_sensor: 31 | - platform: gpio 32 | pin: 33 | number: GPIO0 34 | mode: INPUT_PULLUP 35 | inverted: True 36 | name: "Button" 37 | on_press: 38 | - switch.toggle: fakebutton 39 | - platform: template 40 | name: "Running" 41 | filters: 42 | - delayed_off: 15s 43 | lambda: |- 44 | if (isnan(id(power).state)) { 45 | return {}; 46 | } else if (id(power).state > 4) { 47 | // Running 48 | return true; 49 | } else { 50 | // Not running 51 | return false; 52 | } 53 | 54 | switch: 55 | - platform: template 56 | name: "POW Relay" 57 | optimistic: true 58 | id: fakebutton 59 | turn_on_action: 60 | - switch.turn_on: relay 61 | - light.turn_on: led 62 | turn_off_action: 63 | - switch.turn_off: relay 64 | - light.turn_off: led 65 | - platform: gpio 66 | id: relay 67 | pin: GPIO12 68 | restore_mode: ALWAYS_ON 69 | 70 | output: 71 | - platform: esp8266_pwm 72 | id: pow_blue_led 73 | pin: 74 | number: GPIO13 75 | inverted: True 76 | 77 | light: 78 | - platform: monochromatic 79 | name: "Blue LED" 80 | output: pow_blue_led 81 | id: led 82 | restore_mode: ALWAYS_ON 83 | 84 | sensor: 85 | - platform: wifi_signal 86 | name: "WiFi Signal" 87 | update_interval: 60s 88 | - platform: uptime 89 | name: "Uptime" 90 | - platform: cse7766 91 | current: 92 | name: "Current" 93 | voltage: 94 | name: "Voltage" 95 | power: 96 | name: "POW Power" 97 | id: power 98 | on_value_range: 99 | - above: 4.0 100 | then: 101 | - light.turn_on: led 102 | - below: 4.0 103 | then: 104 | - light.turn_off: led 105 | 106 | text_sensor: 107 | - platform: version 108 | name: "ESPHome Version" 109 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/plug3.yaml: -------------------------------------------------------------------------------- 1 | esphome: 2 | name: plug3 3 | 4 | esp8266: 5 | board: esp01_1m 6 | 7 | wifi: 8 | ssid: !secret wifi_ssid 9 | password: !secret wifi_password 10 | domain: !secret wifi_domain 11 | 12 | # Enable logging 13 | logger: 14 | baud_rate: 0 15 | 16 | uart: 17 | rx_pin: RX 18 | baud_rate: 4800 19 | parity: EVEN 20 | 21 | api: 22 | encryption: 23 | key: !secret plug3_key 24 | 25 | ota: 26 | - platform: esphome 27 | password: !secret ota_password 28 | 29 | 30 | binary_sensor: 31 | - platform: gpio 32 | pin: 33 | number: GPIO0 34 | mode: INPUT_PULLUP 35 | inverted: True 36 | name: "Button" 37 | on_press: 38 | - switch.toggle: fakebutton 39 | - platform: template 40 | name: "Running" 41 | filters: 42 | - delayed_off: 15s 43 | lambda: |- 44 | if (isnan(id(power).state)) { 45 | return {}; 46 | } else if (id(power).state > 4) { 47 | // Running 48 | return true; 49 | } else { 50 | // Not running 51 | return false; 52 | } 53 | 54 | switch: 55 | - platform: template 56 | name: "POW Relay" 57 | optimistic: true 58 | id: fakebutton 59 | turn_on_action: 60 | - switch.turn_on: relay 61 | - light.turn_on: led 62 | turn_off_action: 63 | - switch.turn_off: relay 64 | - light.turn_off: led 65 | - platform: gpio 66 | id: relay 67 | pin: GPIO12 68 | restore_mode: ALWAYS_ON 69 | 70 | output: 71 | - platform: esp8266_pwm 72 | id: pow_blue_led 73 | pin: 74 | number: GPIO13 75 | inverted: True 76 | 77 | light: 78 | - platform: monochromatic 79 | name: "Blue LED" 80 | output: pow_blue_led 81 | id: led 82 | restore_mode: ALWAYS_ON 83 | 84 | sensor: 85 | - platform: wifi_signal 86 | name: "WiFi Signal" 87 | update_interval: 60s 88 | - platform: uptime 89 | name: "Uptime" 90 | - platform: cse7766 91 | current: 92 | name: "Current" 93 | voltage: 94 | name: "Voltage" 95 | power: 96 | name: "POW Power" 97 | id: power 98 | on_value_range: 99 | - above: 4.0 100 | then: 101 | - light.turn_on: led 102 | - below: 4.0 103 | then: 104 | - light.turn_off: led 105 | 106 | text_sensor: 107 | - platform: version 108 | name: "ESPHome Version" 109 | -------------------------------------------------------------------------------- /k8s/prod/esphome/devices/tubeszb-upstairs.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | human_devicename: Zigbee Coordinator Upstairs 3 | esphome: 4 | name: tubeszb-upstairs 5 | project: 6 | name: tubezb.cc2652-poe-2023 7 | version: "1.0" 8 | on_boot: 9 | priority: 600 10 | then: 11 | - switch.turn_on: zRST_gpio 12 | - delay: 15ms 13 | - switch.turn_off: zRST_gpio 14 | 15 | esp32: 16 | board: esp-wrover-kit 17 | framework: 18 | type: arduino 19 | 20 | logger: 21 | 22 | api: 23 | encryption: 24 | key: !secret tubeszb_upstairs_key 25 | 26 | ota: 27 | - platform: esphome 28 | password: !secret ota_password 29 | 30 | external_components: 31 | - source: github://tube0013/esphome-stream-server-v2 32 | - source: github://syssi/esphome-zeroconf@main 33 | 34 | dashboard_import: 35 | package_import_url: github://tube0013/tube_gateways/models/current/tubeszb-cc2652-poe-2023/firmware/esphome/tubeszb-cc2652-poe-2023.yaml 36 | 37 | ethernet: 38 | type: LAN8720 39 | mdc_pin: GPIO23 40 | mdio_pin: GPIO18 41 | clk_mode: GPIO17_OUT 42 | phy_addr: 0 43 | power_pin: GPIO12 44 | domain: !secret wifi_domain 45 | 46 | web_server: 47 | 48 | preferences: 49 | flash_write_interval: 10min 50 | 51 | 52 | script: 53 | - id: fw_update_mode 54 | then: 55 | - switch.turn_on: zBSL 56 | - delay: 1s 57 | - switch.turn_on: zRST_gpio 58 | - delay: 1s 59 | - switch.turn_off: zRST_gpio 60 | - logger.log: "Delaying ~10 seconds for cc2652p2 to settle" 61 | - delay: 11s 62 | - switch.turn_off: zBSL 63 | - logger.log: "Please try update with cc-bsl tool now" 64 | - logger.log: "cc-bsl usage: cc2538-bsl.py -p socket://10.9.1.75:6638 -evw firmware.hex" 65 | 66 | switch: 67 | - platform: gpio 68 | pin: 5 69 | id: zRST_gpio 70 | inverted: yes 71 | restore_mode: ALWAYS_OFF 72 | 73 | - platform: gpio 74 | pin: 16 75 | name: "${human_devicename} Zigbee Module Bootloader Pin" 76 | id: zBSL 77 | inverted: yes 78 | restore_mode: ALWAYS_OFF 79 | disabled_by_default: true 80 | 81 | button: 82 | - platform: template 83 | name: "${human_devicename} Zigbee Module Reset" 84 | id: zRST 85 | on_press: 86 | - switch.turn_on: zRST_gpio 87 | - delay: 15ms 88 | - switch.turn_off: zRST_gpio 89 | 90 | - platform: template 91 | name: "${human_devicename} Trigger Zigbee Module Bootloader" 92 | disabled_by_default: true 93 | on_press: 94 | - script.execute: fw_update_mode 95 | 96 | uart: 97 | id: uart_bus 98 | rx_pin: GPIO36 99 | tx_pin: GPIO4 100 | baud_rate: 115200 101 | 102 | stream_server: 103 | uart_id: uart_bus 104 | port: 6638 105 | id: ss 106 | 107 | binary_sensor: 108 | - platform: stream_server 109 | stream_server: ss 110 | name: "${human_devicename} TubesZB Serial Connected" 111 | 112 | zeroconf: 113 | - service: "tubeszb" 114 | protocol: "tcp" 115 | port: 6638 116 | txt: 117 | version: 1.0 118 | name: TubesZB 119 | radio_type: znp 120 | baud_rate: 115200 121 | data_flow_control: software 122 | 123 | text_sensor: 124 | - platform: ethernet_info 125 | ip_address: 126 | name: "${human_devicename} IP Address" 127 | -------------------------------------------------------------------------------- /k8s/prod/esphome/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: esphome 4 | 5 | resources: 6 | - namespace.yaml 7 | - vault.yaml 8 | - esphome.yaml 9 | - tailscale.yaml 10 | 11 | configMapGenerator: 12 | - name: esphome 13 | files: 14 | - devices/front-porch-switch.yaml 15 | - devices/garage.yaml 16 | - devices/liam-room-starlights.yaml 17 | - devices/liamplug.yaml 18 | - devices/pergola.yaml 19 | - devices/plaato-airlock.yaml 20 | - devices/plaato-keg.yaml 21 | - devices/plug1.yaml 22 | - devices/plug2.yaml 23 | - devices/plug3.yaml 24 | - devices/tubeszb-upstairs.yaml 25 | - devices/ble_proxy.yaml 26 | -------------------------------------------------------------------------------- /k8s/prod/esphome/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: esphome 6 | labels: 7 | name: esphome 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/prod/esphome/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: esphome 5 | namespace: esphome 6 | spec: 7 | defaultBackend: 8 | service: 9 | name: esphome 10 | port: 11 | number: 6052 12 | ingressClassName: tailscale 13 | tls: 14 | - hosts: 15 | - esphome 16 | -------------------------------------------------------------------------------- /k8s/prod/esphome/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: esphome 6 | spec: 7 | vaultAuthRef: esphome 8 | mount: secret/ 9 | type: kv-v2 10 | path: esphome 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: esphome 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: esphome 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: esphome 25 | serviceAccount: default 26 | -------------------------------------------------------------------------------- /k8s/prod/hass/files/customize.yaml: -------------------------------------------------------------------------------- 1 | #todo 2 | -------------------------------------------------------------------------------- /k8s/prod/hass/files/groups.yaml: -------------------------------------------------------------------------------- 1 | #todo 2 | -------------------------------------------------------------------------------- /k8s/prod/hass/files/puck_js_bthome.js: -------------------------------------------------------------------------------- 1 | var slowTimeout; //< After 60s we revert to slow advertising 2 | 3 | // Update the data we're advertising here 4 | function updateAdvertising(buttonState) { 5 | NRF.setAdvertising(require("BTHome").getAdvertisement([ 6 | { 7 | type : "battery", 8 | v : E.getBattery() 9 | }, 10 | { 11 | type : "temperature", 12 | v : E.getTemperature() 13 | }, 14 | { 15 | type: "button_event", 16 | v: buttonState 17 | }, 18 | ]), { 19 | name : "Sensor", 20 | interval: (buttonState!="none")?20:2000, // fast when we have a button press, slow otherwise 21 | // not being connectable/scannable saves power (but you'll need to reboot to connect again with the IDE!) 22 | //connectable : false, scannable : false, 23 | }); 24 | /* After 60s, call updateAdvertising again to update battery/temp 25 | and to ensure we're advertising slowly */ 26 | if (slowTimeout) clearTimeout(slowTimeout); 27 | slowTimeout = setTimeout(function() { 28 | slowTimeout = undefined; 29 | updateAdvertising("none" /* no button pressed */); 30 | }, 60000); 31 | } 32 | 33 | // When a button is pressed, update advertising with the event 34 | setWatch(function(e) { 35 | var buttonState = ((e.time - e.lastTime) > 0.5) ? "long_press" : "press"; 36 | updateAdvertising(buttonState); 37 | digitalPulse(true?LED1:LED2,1,100); 38 | }, BTN, {edge:"falling", repeat:true}); 39 | 40 | // Update advertising now 41 | updateAdvertising("none"); 42 | 43 | // Enable highest power advertising (4 on nRF52, 8 on nRF52840) 44 | NRF.setTxPower(4); 45 | -------------------------------------------------------------------------------- /k8s/prod/hass/files/remove_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sleep 30 3 | if [[ -f "$1" ]]; then 4 | rm "$1" 5 | fi 6 | 7 | SLACK_WEBHOOK_URL=$(cat /config/secrets.yaml | grep slack_webhook_url | awk '{print $2}' | sed "s/'//g") 8 | dig hooks.zapier.com >/dev/null 2>&1 # sometimes this somehow fails unless you try to resolve it once before curl 9 | curl -XPOST "${SLACK_WEBHOOK_URL}" -d "{\"message\": \"Hass backup to minio complete.\"}" 10 | -------------------------------------------------------------------------------- /k8s/prod/hass/files/scenes.yaml: -------------------------------------------------------------------------------- 1 | #todo 2 | -------------------------------------------------------------------------------- /k8s/prod/hass/files/scripts.yaml: -------------------------------------------------------------------------------- 1 | front_door: 2 | alias: Front Door 3 | sequence: 4 | - if: 5 | - condition: state 6 | entity_id: lock.front_door 7 | state: unlocked 8 | then: 9 | - service: lock.lock 10 | data: {} 11 | target: 12 | entity_id: lock.front_door 13 | else: 14 | - service: lock.unlock 15 | data: {} 16 | target: 17 | entity_id: lock.front_door 18 | mode: single 19 | garage_door: 20 | alias: Garage Door 21 | sequence: 22 | - type: toggle 23 | device_id: 5cc3f414b31653c3b2a25597f11ffe0e 24 | entity_id: switch.garage 25 | domain: switch 26 | - delay: 27 | hours: 0 28 | minutes: 0 29 | seconds: 1 30 | milliseconds: 0 31 | - type: toggle 32 | device_id: 5cc3f414b31653c3b2a25597f11ffe0e 33 | entity_id: switch.garage 34 | domain: switch 35 | mode: single 36 | -------------------------------------------------------------------------------- /k8s/prod/hass/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: hass 4 | 5 | resources: 6 | - namespace.yaml 7 | - pv.yaml 8 | - pvc.yaml 9 | - vault.yaml 10 | - hass.yaml 11 | - tailscale.yaml 12 | 13 | configMapGenerator: 14 | - name: hass 15 | files: 16 | - files/automations.yaml 17 | - files/configuration.yaml 18 | - files/customize.yaml 19 | - files/groups.yaml 20 | - files/remove_backup.sh 21 | - files/scenes.yaml 22 | - files/scripts.yaml 23 | -------------------------------------------------------------------------------- /k8s/prod/hass/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: hass 6 | labels: 7 | name: hass 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/prod/hass/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: hass 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | capacity: 9 | storage: 128Mi 10 | hostPath: 11 | path: /var/mnt/storage/hass 12 | type: DirectoryOrCreate 13 | nodeAffinity: 14 | required: 15 | nodeSelectorTerms: 16 | - matchExpressions: 17 | - key: feature.node.kubernetes.io/usb-08_0bda_9210.present 18 | operator: Exists 19 | persistentVolumeReclaimPolicy: Retain 20 | storageClassName: local-path 21 | volumeMode: Filesystem 22 | -------------------------------------------------------------------------------- /k8s/prod/hass/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: hass 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: local-path 9 | resources: 10 | requests: 11 | storage: 128Mi 12 | volumeMode: Filesystem 13 | volumeName: hass 14 | -------------------------------------------------------------------------------- /k8s/prod/hass/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: hass 5 | namespace: hass 6 | annotations: 7 | tailscale.com/funnel: "true" 8 | spec: 9 | defaultBackend: 10 | service: 11 | name: hass-home-assistant 12 | port: 13 | number: 8123 14 | ingressClassName: tailscale 15 | tls: 16 | - hosts: 17 | - hass 18 | -------------------------------------------------------------------------------- /k8s/prod/hass/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: hass 6 | spec: 7 | vaultAuthRef: hass 8 | mount: secret/ 9 | type: kv-v2 10 | path: hass 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: hass 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: hass 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: hass 25 | serviceAccount: default 26 | -------------------------------------------------------------------------------- /k8s/prod/http-to-mqtt/.gitignore: -------------------------------------------------------------------------------- 1 | myauthsecret.yaml 2 | -------------------------------------------------------------------------------- /k8s/prod/http-to-mqtt/http-to-mqtt.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: http-to-mqtt 6 | app.kubernetes.io/name: http-to-mqtt 7 | name: http-to-mqtt 8 | namespace: http-to-mqtt 9 | spec: 10 | ports: 11 | - name: http 12 | port: 3000 13 | protocol: TCP 14 | targetPort: http 15 | selector: 16 | app.kubernetes.io/instance: http-to-mqtt 17 | app.kubernetes.io/name: http-to-mqtt 18 | type: ClusterIP 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | labels: 24 | app.kubernetes.io/instance: http-to-mqtt 25 | app.kubernetes.io/name: http-to-mqtt 26 | name: http-to-mqtt 27 | namespace: http-to-mqtt 28 | spec: 29 | replicas: 1 30 | selector: 31 | matchLabels: 32 | app.kubernetes.io/instance: http-to-mqtt 33 | app.kubernetes.io/name: http-to-mqtt 34 | strategy: 35 | type: Recreate 36 | template: 37 | metadata: 38 | labels: 39 | app.kubernetes.io/instance: http-to-mqtt 40 | app.kubernetes.io/name: http-to-mqtt 41 | spec: 42 | automountServiceAccountToken: true 43 | containers: 44 | - env: 45 | - name: BIND_PORT 46 | value: "3000" 47 | - name: MQTT_HOST 48 | value: "mosquitto.consul.marcyoung.us" 49 | envFrom: 50 | - secretRef: 51 | name: http-to-mqtt 52 | image: "ghcr.io/myoung34/http-to-mqtt:latest" 53 | imagePullPolicy: Always 54 | resources: 55 | requests: 56 | cpu: 100m 57 | memory: 256M 58 | livenessProbe: 59 | failureThreshold: 3 60 | initialDelaySeconds: 0 61 | periodSeconds: 10 62 | tcpSocket: 63 | port: 3000 64 | timeoutSeconds: 1 65 | name: http-to-mqtt 66 | ports: 67 | - containerPort: 3000 68 | name: http 69 | protocol: TCP 70 | readinessProbe: 71 | failureThreshold: 3 72 | initialDelaySeconds: 0 73 | periodSeconds: 10 74 | tcpSocket: 75 | port: 3000 76 | timeoutSeconds: 1 77 | startupProbe: 78 | failureThreshold: 30 79 | initialDelaySeconds: 0 80 | periodSeconds: 5 81 | tcpSocket: 82 | port: 3000 83 | timeoutSeconds: 1 84 | dnsPolicy: ClusterFirstWithHostNet 85 | enableServiceLinks: true 86 | serviceAccountName: default 87 | -------------------------------------------------------------------------------- /k8s/prod/http-to-mqtt/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: http-to-mqtt 4 | 5 | resources: 6 | - namespace.yaml 7 | - vault.yaml 8 | - http-to-mqtt.yaml 9 | - tailscale.yaml 10 | -------------------------------------------------------------------------------- /k8s/prod/http-to-mqtt/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: http-to-mqtt 6 | labels: 7 | name: http-to-mqtt 8 | -------------------------------------------------------------------------------- /k8s/prod/http-to-mqtt/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: http-to-mqtt 5 | namespace: http-to-mqtt 6 | annotations: 7 | tailscale.com/funnel: "true" 8 | spec: 9 | defaultBackend: 10 | service: 11 | name: http-to-mqtt 12 | port: 13 | number: 3000 14 | ingressClassName: tailscale 15 | tls: 16 | - hosts: 17 | - http-to-mqtt 18 | -------------------------------------------------------------------------------- /k8s/prod/http-to-mqtt/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: http-to-mqtt 6 | spec: 7 | vaultAuthRef: http-to-mqtt 8 | mount: secret/ 9 | type: kv-v2 10 | path: http-to-mqtt 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: http-to-mqtt 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: http-to-mqtt 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: http-to-mqtt 25 | serviceAccount: default 26 | -------------------------------------------------------------------------------- /k8s/prod/kube-state-metrics/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /k8s/prod/kube-state-metrics/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: kube-system 4 | 5 | helmCharts: 6 | - name: kube-state-metrics 7 | releaseName: kube-state-metrics 8 | namespace: kube-system 9 | version: 5.36.0 10 | includeCRDs: true 11 | repo: https://prometheus-community.github.io/helm-charts 12 | valuesInline: 13 | service: 14 | clusterIP: "10.96.0.255" 15 | -------------------------------------------------------------------------------- /k8s/prod/local-path-storage/chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: local-path-storage 5 | --- 6 | apiVersion: storage.k8s.io/v1 7 | kind: StorageClass 8 | metadata: 9 | name: local-path 10 | provisioner: rancher.io/local-path 11 | reclaimPolicy: Delete 12 | volumeBindingMode: WaitForFirstConsumer 13 | --- 14 | apiVersion: v1 15 | kind: ServiceAccount 16 | metadata: 17 | name: local-path-provisioner-service-account 18 | namespace: local-path-storage 19 | --- 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | kind: ClusterRole 22 | metadata: 23 | name: local-path-provisioner-role 24 | rules: 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - nodes 29 | - persistentvolumeclaims 30 | - configmaps 31 | verbs: 32 | - get 33 | - list 34 | - watch 35 | - apiGroups: 36 | - "" 37 | resources: 38 | - endpoints 39 | - persistentvolumes 40 | - pods 41 | verbs: 42 | - '*' 43 | - apiGroups: 44 | - "" 45 | resources: 46 | - events 47 | verbs: 48 | - create 49 | - patch 50 | - apiGroups: 51 | - storage.k8s.io 52 | resources: 53 | - storageclasses 54 | verbs: 55 | - get 56 | - list 57 | - watch 58 | --- 59 | apiVersion: rbac.authorization.k8s.io/v1 60 | kind: ClusterRoleBinding 61 | metadata: 62 | name: local-path-provisioner-bind 63 | roleRef: 64 | apiGroup: rbac.authorization.k8s.io 65 | kind: ClusterRole 66 | name: local-path-provisioner-role 67 | subjects: 68 | - kind: ServiceAccount 69 | name: local-path-provisioner-service-account 70 | namespace: local-path-storage 71 | --- 72 | apiVersion: v1 73 | data: 74 | config.json: |- 75 | { 76 | "nodePathMap":[ 77 | { 78 | "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", 79 | "paths":["/opt/local-path-provisioner"] 80 | } 81 | ] 82 | } 83 | helperPod.yaml: |- 84 | apiVersion: v1 85 | kind: Pod 86 | metadata: 87 | name: helper-pod 88 | spec: 89 | containers: 90 | - name: helper-pod 91 | image: busybox 92 | imagePullPolicy: IfNotPresent 93 | setup: |- 94 | #!/bin/sh 95 | set -eu 96 | mkdir -m 0777 -p "$VOL_DIR" 97 | teardown: |- 98 | #!/bin/sh 99 | set -eu 100 | rm -rf "$VOL_DIR" 101 | kind: ConfigMap 102 | metadata: 103 | name: local-path-config 104 | namespace: local-path-storage 105 | --- 106 | apiVersion: apps/v1 107 | kind: Deployment 108 | metadata: 109 | name: local-path-provisioner 110 | namespace: local-path-storage 111 | spec: 112 | replicas: 1 113 | selector: 114 | matchLabels: 115 | app: local-path-provisioner 116 | template: 117 | metadata: 118 | labels: 119 | app: local-path-provisioner 120 | spec: 121 | containers: 122 | - command: 123 | - local-path-provisioner 124 | - --debug 125 | - start 126 | - --config 127 | - /etc/config/config.json 128 | env: 129 | - name: POD_NAMESPACE 130 | valueFrom: 131 | fieldRef: 132 | fieldPath: metadata.namespace 133 | image: rancher/local-path-provisioner:v0.0.31 134 | imagePullPolicy: IfNotPresent 135 | name: local-path-provisioner 136 | volumeMounts: 137 | - mountPath: /etc/config/ 138 | name: config-volume 139 | serviceAccountName: local-path-provisioner-service-account 140 | volumes: 141 | - configMap: 142 | name: local-path-config 143 | name: config-volume 144 | -------------------------------------------------------------------------------- /k8s/prod/local-path-storage/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: local-path-storage 4 | 5 | # kustomize build "github.com/rancher/local-path-provisioner/deploy?ref=v0.0.23" >chart.yaml 6 | resources: 7 | - chart.yaml 8 | 9 | patches: 10 | - target: 11 | kind: ConfigMap 12 | name: local-path-config 13 | patch: |- 14 | - op: replace 15 | path: /data 16 | value: 17 | config.json: |- 18 | { 19 | "nodePathMap":[ 20 | { 21 | "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", 22 | "paths":["/var/mnt/storage"] 23 | } 24 | ] 25 | } 26 | helperPod.yaml: |- 27 | apiVersion: v1 28 | kind: Pod 29 | metadata: 30 | name: helper-pod 31 | spec: 32 | containers: 33 | - name: helper-pod 34 | image: busybox 35 | imagePullPolicy: IfNotPresent 36 | setup: |- 37 | #!/bin/sh 38 | set -eu 39 | mkdir -m 0777 -p "$VOL_DIR" 40 | teardown: |- 41 | #!/bin/sh 42 | set -eu 43 | rm -rf "$VOL_DIR" 44 | -------------------------------------------------------------------------------- /k8s/prod/longhorn/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /k8s/prod/longhorn/backuptargets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: longhorn.io/v1beta2 2 | kind: BackupTarget 3 | metadata: 4 | name: default 5 | spec: 6 | backupTargetURL: s3://longhorn@minio.consul.marcyoung.us/backups/ 7 | credentialSecret: longhorn # pragma: allowlist secret 8 | pollInterval: 5m0s 9 | -------------------------------------------------------------------------------- /k8s/prod/longhorn/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: longhorn-system 4 | 5 | resources: 6 | - namespace.yaml 7 | - tailscale.yaml 8 | - vault.yaml 9 | - backuptargets.yaml 10 | - recurringjobs.yaml 11 | 12 | helmCharts: 13 | - name: longhorn 14 | releaseName: longhorn 15 | version: 1.9.0 16 | includeCRDs: true 17 | repo: https://charts.longhorn.io 18 | valuesInline: 19 | longhornUI: 20 | replicas: 1 21 | csi: 22 | resizerReplicaCount: 1 23 | snapshotterReplicaCount: 1 24 | provisionerReplicaCount: 1 25 | attacherReplicaCount: 1 26 | -------------------------------------------------------------------------------- /k8s/prod/longhorn/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: longhorn 6 | labels: 7 | pod-security.kubernetes.io/enforce: privileged 8 | -------------------------------------------------------------------------------- /k8s/prod/longhorn/recurringjobs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: longhorn.io/v1beta2 3 | kind: RecurringJob 4 | metadata: 5 | name: snapshot-cleanup 6 | namespace: longhorn-system 7 | spec: 8 | concurrency: 1 9 | cron: 20 0 * * ? 10 | groups: 11 | - default 12 | labels: {} 13 | name: snapshot-cleanup 14 | parameters: {} 15 | retain: 0 16 | task: snapshot-cleanup 17 | --- 18 | apiVersion: longhorn.io/v1beta2 19 | kind: RecurringJob 20 | metadata: 21 | name: snapshot-delete 22 | namespace: longhorn-system 23 | spec: 24 | concurrency: 1 25 | cron: 30 0 * * ? 26 | groups: 27 | - default 28 | labels: {} 29 | name: snapshot-delete 30 | parameters: {} 31 | retain: 2 32 | task: snapshot-delete 33 | --- 34 | apiVersion: longhorn.io/v1beta2 35 | kind: RecurringJob 36 | metadata: 37 | name: snapshot 38 | namespace: longhorn-system 39 | spec: 40 | concurrency: 1 41 | cron: 0 0 * * ? 42 | groups: 43 | - default 44 | labels: {} 45 | name: snapshot 46 | parameters: {} 47 | retain: 3 48 | task: snapshot 49 | --- 50 | apiVersion: longhorn.io/v1beta2 51 | kind: RecurringJob 52 | metadata: 53 | name: backup 54 | namespace: longhorn-system 55 | spec: 56 | concurrency: 1 57 | cron: 15 0 * * ? 58 | groups: 59 | - default 60 | labels: {} 61 | name: backup 62 | parameters: {} 63 | retain: 3 64 | task: backup 65 | -------------------------------------------------------------------------------- /k8s/prod/longhorn/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: longhorn 5 | namespace: longhorn 6 | spec: 7 | defaultBackend: 8 | service: 9 | name: longhorn-frontend 10 | port: 11 | number: 80 12 | ingressClassName: tailscale 13 | tls: 14 | - hosts: 15 | - longhorn 16 | -------------------------------------------------------------------------------- /k8s/prod/longhorn/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: longhorn 6 | spec: 7 | vaultAuthRef: longhorn 8 | mount: secret/ 9 | type: kv-v2 10 | path: longhorn 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: longhorn 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: longhorn 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: longhorn 25 | serviceAccount: default 26 | -------------------------------------------------------------------------------- /k8s/prod/mailgun-to-paperless-ngx/.gitignore: -------------------------------------------------------------------------------- 1 | myauthsecret.yaml 2 | -------------------------------------------------------------------------------- /k8s/prod/mailgun-to-paperless-ngx/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: mailgun-to-paperless-ngx 4 | 5 | resources: 6 | - namespace.yaml 7 | - vault.yaml 8 | - mailgun-to-paperless-ngx.yaml 9 | - tailscale.yaml 10 | -------------------------------------------------------------------------------- /k8s/prod/mailgun-to-paperless-ngx/mailgun-to-paperless-ngx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: mailgun-to-paperless-ngx 6 | app.kubernetes.io/name: mailgun-to-paperless-ngx 7 | name: mailgun-to-paperless-ngx 8 | namespace: mailgun-to-paperless-ngx 9 | spec: 10 | ports: 11 | - name: http 12 | port: 3000 13 | protocol: TCP 14 | targetPort: http 15 | selector: 16 | app.kubernetes.io/instance: mailgun-to-paperless-ngx 17 | app.kubernetes.io/name: mailgun-to-paperless-ngx 18 | type: ClusterIP 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | labels: 24 | app.kubernetes.io/instance: mailgun-to-paperless-ngx 25 | app.kubernetes.io/name: mailgun-to-paperless-ngx 26 | name: mailgun-to-paperless-ngx 27 | namespace: mailgun-to-paperless-ngx 28 | spec: 29 | replicas: 1 30 | selector: 31 | matchLabels: 32 | app.kubernetes.io/instance: mailgun-to-paperless-ngx 33 | app.kubernetes.io/name: mailgun-to-paperless-ngx 34 | strategy: 35 | type: Recreate 36 | template: 37 | metadata: 38 | labels: 39 | app.kubernetes.io/instance: mailgun-to-paperless-ngx 40 | app.kubernetes.io/name: mailgun-to-paperless-ngx 41 | spec: 42 | automountServiceAccountToken: true 43 | containers: 44 | - env: 45 | - name: BIND_PORT 46 | value: "3000" 47 | envFrom: 48 | - secretRef: 49 | name: mailgun-to-paperless-ngx 50 | image: "ghcr.io/myoung34/mailgun-to-paperless-ngx:latest" 51 | imagePullPolicy: Always 52 | resources: 53 | requests: 54 | cpu: 100m 55 | memory: 256M 56 | livenessProbe: 57 | failureThreshold: 3 58 | initialDelaySeconds: 0 59 | periodSeconds: 10 60 | tcpSocket: 61 | port: 3000 62 | timeoutSeconds: 1 63 | name: mailgun-to-paperless-ngx 64 | ports: 65 | - containerPort: 3000 66 | name: http 67 | protocol: TCP 68 | readinessProbe: 69 | failureThreshold: 3 70 | initialDelaySeconds: 0 71 | periodSeconds: 10 72 | tcpSocket: 73 | port: 3000 74 | timeoutSeconds: 1 75 | startupProbe: 76 | failureThreshold: 30 77 | initialDelaySeconds: 0 78 | periodSeconds: 5 79 | tcpSocket: 80 | port: 3000 81 | timeoutSeconds: 1 82 | dnsPolicy: ClusterFirstWithHostNet 83 | enableServiceLinks: true 84 | serviceAccountName: default 85 | -------------------------------------------------------------------------------- /k8s/prod/mailgun-to-paperless-ngx/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: mailgun-to-paperless-ngx 6 | labels: 7 | name: mailgun-to-paperless-ngx 8 | -------------------------------------------------------------------------------- /k8s/prod/mailgun-to-paperless-ngx/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mailgun-to-paperless-ngx 5 | namespace: mailgun-to-paperless-ngx 6 | annotations: 7 | tailscale.com/funnel: "true" 8 | spec: 9 | defaultBackend: 10 | service: 11 | name: mailgun-to-paperless-ngx 12 | port: 13 | number: 3000 14 | ingressClassName: tailscale 15 | tls: 16 | - hosts: 17 | - mailgun-to-paperless-ngx 18 | -------------------------------------------------------------------------------- /k8s/prod/mailgun-to-paperless-ngx/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: mailgun-to-paperless-ngx 6 | spec: 7 | vaultAuthRef: mailgun-to-paperless-ngx 8 | mount: secret/ 9 | type: kv-v2 10 | path: mailgun-to-paperless-ngx 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: mailgun-to-paperless-ngx 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: mailgun-to-paperless-ngx 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: mailgun-to-paperless-ngx 25 | serviceAccount: default 26 | -------------------------------------------------------------------------------- /k8s/prod/mosquitto/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: mosquitto 4 | 5 | resources: 6 | - namespace.yaml 7 | - mosquitto.yaml 8 | -------------------------------------------------------------------------------- /k8s/prod/mosquitto/mosquitto.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | mosquitto.conf: | 4 | per_listener_settings true 5 | listener 1883 6 | allow_anonymous true 7 | kind: ConfigMap 8 | metadata: 9 | labels: 10 | app.kubernetes.io/instance: mosquitto 11 | app.kubernetes.io/managed-by: Helm 12 | app.kubernetes.io/name: mosquitto 13 | app.kubernetes.io/version: 2.0.14 14 | helm.sh/chart: mosquitto-4.8.2 15 | name: mosquitto-config 16 | namespace: mosquitto 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | labels: 22 | app.kubernetes.io/instance: mosquitto 23 | app.kubernetes.io/managed-by: Helm 24 | app.kubernetes.io/name: mosquitto 25 | app.kubernetes.io/version: 2.0.14 26 | helm.sh/chart: mosquitto-4.8.2 27 | name: mosquitto 28 | namespace: mosquitto 29 | spec: 30 | ports: 31 | - name: mqtt 32 | port: 1883 33 | protocol: TCP 34 | targetPort: mqtt 35 | selector: 36 | app.kubernetes.io/instance: mosquitto 37 | app.kubernetes.io/name: mosquitto 38 | type: ClusterIP 39 | --- 40 | apiVersion: apps/v1 41 | kind: Deployment 42 | metadata: 43 | labels: 44 | app.kubernetes.io/instance: mosquitto 45 | app.kubernetes.io/managed-by: Helm 46 | app.kubernetes.io/name: mosquitto 47 | app.kubernetes.io/version: 2.0.14 48 | helm.sh/chart: mosquitto-4.8.2 49 | name: mosquitto 50 | namespace: mosquitto 51 | spec: 52 | replicas: 1 53 | selector: 54 | matchLabels: 55 | app.kubernetes.io/instance: mosquitto 56 | app.kubernetes.io/name: mosquitto 57 | strategy: 58 | type: Recreate 59 | template: 60 | metadata: 61 | labels: 62 | app.kubernetes.io/instance: mosquitto 63 | app.kubernetes.io/name: mosquitto 64 | spec: 65 | automountServiceAccountToken: true 66 | containers: 67 | - name: mosquitto 68 | image: eclipse-mosquitto:2.0.21 69 | imagePullPolicy: IfNotPresent 70 | livenessProbe: 71 | failureThreshold: 3 72 | initialDelaySeconds: 0 73 | periodSeconds: 10 74 | tcpSocket: 75 | port: 1883 76 | timeoutSeconds: 1 77 | ports: 78 | - containerPort: 1883 79 | name: mqtt 80 | protocol: TCP 81 | readinessProbe: 82 | failureThreshold: 3 83 | initialDelaySeconds: 0 84 | periodSeconds: 10 85 | tcpSocket: 86 | port: 1883 87 | timeoutSeconds: 1 88 | startupProbe: 89 | failureThreshold: 30 90 | initialDelaySeconds: 0 91 | periodSeconds: 5 92 | tcpSocket: 93 | port: 1883 94 | timeoutSeconds: 1 95 | resources: 96 | requests: 97 | cpu: 15m 98 | memory: 105M 99 | limits: 100 | cpu: 15m 101 | memory: 105M 102 | volumeMounts: 103 | - mountPath: /mosquitto/config/mosquitto.conf 104 | name: mosquitto-config 105 | subPath: mosquitto.conf 106 | dnsPolicy: ClusterFirst 107 | enableServiceLinks: true 108 | nodeSelector: 109 | kubernetes.io/arch: arm64 110 | serviceAccountName: default 111 | volumes: 112 | - configMap: 113 | name: mosquitto-config 114 | name: mosquitto-config 115 | -------------------------------------------------------------------------------- /k8s/prod/mosquitto/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: mosquitto 6 | labels: 7 | name: mosquitto 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/prod/node-feature-discovery/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /k8s/prod/node-feature-discovery/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: node-feature-discovery 4 | 5 | resources: 6 | - namespace.yaml 7 | 8 | helmCharts: 9 | - name: node-feature-discovery 10 | includeCRDs: true 11 | namespace: node-feature-discovery 12 | releaseName: node-feature-discovery 13 | version: 0.17.3 14 | repo: https://kubernetes-sigs.github.io/node-feature-discovery/charts 15 | 16 | patches: 17 | - target: 18 | kind: ConfigMap 19 | name: node-feature-discovery-worker-conf 20 | patch: |- 21 | - op: replace 22 | path: /data/nfd-worker.conf 23 | value: |- 24 | sources: 25 | usb: 26 | deviceClassWhitelist: 27 | - "02" 28 | - "08" 29 | - "0e" 30 | - "ef" 31 | - "fe" 32 | - "ff" 33 | deviceLabelFields: 34 | - "class" 35 | - "vendor" 36 | - "device" 37 | pci: 38 | deviceLabelFields: 39 | - "class" 40 | - "vendor" 41 | - "device" 42 | - "subsystem_vendor" 43 | - "subsystem_device" 44 | -------------------------------------------------------------------------------- /k8s/prod/node-feature-discovery/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: node-feature-discovery 6 | labels: 7 | name: node-feature-discovery 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/prod/obsidian/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: obsidian 4 | 5 | resources: 6 | - namespace.yaml 7 | - vault.yaml 8 | - obsidian.yaml 9 | - tailscale.yaml 10 | -------------------------------------------------------------------------------- /k8s/prod/obsidian/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: obsidian 6 | labels: 7 | name: obsidian 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/prod/obsidian/obsidian.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: obsidian 6 | app.kubernetes.io/name: obsidian 7 | name: obsidian 8 | namespace: obsidian 9 | spec: 10 | ports: 11 | - name: http 12 | port: 8080 13 | protocol: TCP 14 | targetPort: http 15 | selector: 16 | app.kubernetes.io/instance: obsidian 17 | app.kubernetes.io/name: obsidian 18 | type: ClusterIP 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | annotations: 24 | configmap.reloader.stakater.com/reload: obsidian 25 | secret.reloader.stakater.com/reload: obsidian 26 | labels: 27 | app.kubernetes.io/instance: obsidian 28 | app.kubernetes.io/name: obsidian 29 | name: obsidian 30 | namespace: obsidian 31 | spec: 32 | replicas: 1 33 | selector: 34 | matchLabels: 35 | app.kubernetes.io/instance: obsidian 36 | app.kubernetes.io/name: obsidian 37 | strategy: 38 | type: Recreate 39 | template: 40 | metadata: 41 | labels: 42 | app.kubernetes.io/instance: obsidian 43 | app.kubernetes.io/name: obsidian 44 | spec: 45 | automountServiceAccountToken: true 46 | containers: 47 | - env: 48 | - name: AWS_REGION 49 | value: us-east 50 | - name: AWS_DEFAULT_REGION 51 | value: us-east 52 | - name: AWS_ACCESS_KEY_ID 53 | valueFrom: 54 | secretKeyRef: 55 | key: AWS_ACCESS_KEY_ID 56 | name: obsidian 57 | - name: AWS_SECRET_ACCESS_KEY 58 | valueFrom: 59 | secretKeyRef: 60 | key: AWS_SECRET_ACCESS_KEY 61 | name: obsidian 62 | image: "y4m4/s3www:v0.9.0" 63 | imagePullPolicy: IfNotPresent 64 | command: 65 | - "/s3www" 66 | - "-endpoint" 67 | - "http://minio.consul.marcyoung.us:9000" 68 | - "-bucket" 69 | - "obsidian-rendered" 70 | - "-address" 71 | - "0.0.0.0:8080" 72 | livenessProbe: 73 | failureThreshold: 3 74 | initialDelaySeconds: 0 75 | periodSeconds: 10 76 | tcpSocket: 77 | port: 8080 78 | timeoutSeconds: 1 79 | name: obsidian 80 | ports: 81 | - containerPort: 8080 82 | name: http 83 | protocol: TCP 84 | readinessProbe: 85 | failureThreshold: 3 86 | initialDelaySeconds: 0 87 | periodSeconds: 10 88 | tcpSocket: 89 | port: 8080 90 | timeoutSeconds: 1 91 | startupProbe: 92 | failureThreshold: 30 93 | initialDelaySeconds: 0 94 | periodSeconds: 5 95 | tcpSocket: 96 | port: 8080 97 | timeoutSeconds: 1 98 | serviceAccountName: default 99 | affinity: 100 | nodeAffinity: 101 | requiredDuringSchedulingIgnoredDuringExecution: 102 | nodeSelectorTerms: 103 | - matchExpressions: 104 | - key: kubernetes.io/arch 105 | operator: In 106 | values: 107 | - amd64 108 | -------------------------------------------------------------------------------- /k8s/prod/obsidian/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: obsidian 5 | namespace: obsidian 6 | spec: 7 | defaultBackend: 8 | service: 9 | name: obsidian 10 | port: 11 | number: 8080 12 | ingressClassName: tailscale 13 | tls: 14 | - hosts: 15 | - obsidian 16 | -------------------------------------------------------------------------------- /k8s/prod/obsidian/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: obsidian 6 | spec: 7 | vaultAuthRef: obsidian 8 | mount: secret/ 9 | type: kv-v2 10 | path: obsidian 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: obsidian 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: obsidian 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: obsidian 25 | serviceAccount: default 26 | -------------------------------------------------------------------------------- /k8s/prod/paperless-ngx/README.md: -------------------------------------------------------------------------------- 1 | Paperless 2 | ========= 3 | 4 | 5 | ## First time set up 6 | 7 | ### PSQL 8 | 9 | Sanity check the user 10 | ``` 11 | k -n paperless-ngx get secrets/paperless-ngx -oyaml | grep PAPERLESS_DBUSER | awk '{print $2}' | base64 -d 12 | changeme% 13 | ``` 14 | 15 | ``` 16 | CREATE user "changeme" with password 'changeme'; 17 | create database paperless; 18 | ALTER database paperless owner to "changeme"; 19 | ALTER SCHEMA public OWNER TO "changeme"; 20 | GRANT CONNECT ON DATABASE paperless TO "changeme"; 21 | GRANT USAGE ON SCHEMA public TO "changeme"; 22 | GRANT CREATE ON SCHEMA public TO "changeme"; 23 | GRANT ALL ON SCHEMA public TO "changeme"; 24 | ``` 25 | 26 | ### user 27 | 28 | From inside the container 29 | 30 | ``` 31 | # python3 manage.py createsuperuser 32 | Username (leave blank to use 'root'): foo 33 | Email address: foo@email.com 34 | Password: 35 | Password (again): 36 | Superuser created successfully. 37 | ``` 38 | 39 | 40 | ## Backup/restore 41 | 42 | **in progress** 43 | 44 | Restoring from psql + longhorn should be possible but its not ready yet 45 | 46 | The current lazy way: 47 | 48 | * exec into the pod 49 | * `export _date=$(date '+%Y%m%d%H%M%S')` 50 | * `mkdir /opt/paperless/backup/${_date}` 51 | * `chown 1000:1000 /opt/paperless/backup/${_date}` 52 | * `document_exporter /opt/paperless/backup/${_date}` 53 | * `kubectl cp paperless-ngx/{podname}:/opt/paperless/backup .` 54 | * `tar czf $(date '+%Y%m%d%H%M%S').tgz {backup}` 55 | * upload that to s3 56 | 57 | To restore: 58 | 59 | * drop database and re-run psql above, `rm -rf /opt/paperless/media /opt/paperless/data`, restart pod 60 | * `kubectl cp {backup} paperless-ngx/{podname}:/opt/paperless/backup` 61 | * `document_importer /opt/paperless/backup/{backup}` 62 | -------------------------------------------------------------------------------- /k8s/prod/paperless-ngx/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: paperless-ngx 4 | 5 | resources: 6 | - namespace.yaml 7 | - paperless-ngx.yaml 8 | - redis.yaml 9 | - tailscale.yaml 10 | - vault.yaml 11 | - pv.yaml 12 | - pvc.yaml 13 | -------------------------------------------------------------------------------- /k8s/prod/paperless-ngx/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: paperless-ngx 6 | labels: 7 | name: paperless-ngx 8 | -------------------------------------------------------------------------------- /k8s/prod/paperless-ngx/pv.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: paperless 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | capacity: 10 | storage: 2Gi 11 | csi: 12 | driver: driver.longhorn.io 13 | fsType: ext4 14 | volumeAttributes: 15 | diskSelector: "" 16 | nodeSelector: "" 17 | numberOfReplicas: "2" 18 | staleReplicaTimeout: "20" 19 | volumeHandle: paperless 20 | persistentVolumeReclaimPolicy: Retain 21 | storageClassName: longhorn-static 22 | volumeMode: Filesystem 23 | -------------------------------------------------------------------------------- /k8s/prod/paperless-ngx/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: paperless 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 2Gi 12 | storageClassName: longhorn-static 13 | volumeMode: Filesystem 14 | volumeName: paperless 15 | status: 16 | accessModes: 17 | - ReadWriteOnce 18 | capacity: 19 | storage: 2Gi 20 | -------------------------------------------------------------------------------- /k8s/prod/paperless-ngx/redis.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: redis 6 | app.kubernetes.io/name: redis 7 | name: redis 8 | spec: 9 | ports: 10 | - name: redis 11 | port: 6379 12 | protocol: TCP 13 | targetPort: redis 14 | selector: 15 | app.kubernetes.io/instance: redis 16 | app.kubernetes.io/name: redis 17 | type: ClusterIP 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | labels: 23 | app.kubernetes.io/instance: redis 24 | app.kubernetes.io/name: redis 25 | name: redis 26 | spec: 27 | replicas: 1 28 | selector: 29 | matchLabels: 30 | app.kubernetes.io/instance: redis 31 | app.kubernetes.io/name: redis 32 | strategy: 33 | type: Recreate 34 | template: 35 | metadata: 36 | labels: 37 | app.kubernetes.io/instance: redis 38 | app.kubernetes.io/name: redis 39 | spec: 40 | automountServiceAccountToken: true 41 | containers: 42 | - env: 43 | - name: PUID 44 | value: "1000" 45 | image: "redis:8" 46 | imagePullPolicy: IfNotPresent 47 | livenessProbe: 48 | failureThreshold: 3 49 | initialDelaySeconds: 0 50 | periodSeconds: 10 51 | tcpSocket: 52 | port: 6379 53 | timeoutSeconds: 1 54 | name: redis 55 | ports: 56 | - containerPort: 6379 57 | name: redis 58 | protocol: TCP 59 | readinessProbe: 60 | failureThreshold: 3 61 | initialDelaySeconds: 0 62 | periodSeconds: 10 63 | tcpSocket: 64 | port: 6379 65 | timeoutSeconds: 1 66 | startupProbe: 67 | failureThreshold: 30 68 | initialDelaySeconds: 0 69 | periodSeconds: 5 70 | tcpSocket: 71 | port: 6379 72 | timeoutSeconds: 1 73 | serviceAccountName: default 74 | -------------------------------------------------------------------------------- /k8s/prod/paperless-ngx/tailscale.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: paperless-ngx 6 | namespace: paperless-ngx 7 | spec: 8 | defaultBackend: 9 | service: 10 | name: paperless-ngx 11 | port: 12 | number: 8000 13 | ingressClassName: tailscale 14 | tls: 15 | - hosts: 16 | - docs 17 | -------------------------------------------------------------------------------- /k8s/prod/paperless-ngx/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: paperless-ngx 6 | spec: 7 | vaultAuthRef: paperless-ngx 8 | mount: secret/ 9 | type: kv-v2 10 | path: paperless-ngx 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: paperless-ngx 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: paperless-ngx 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: paperless-ngx 25 | serviceAccount: default 26 | -------------------------------------------------------------------------------- /k8s/prod/reloader/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /k8s/prod/reloader/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: reloader 4 | 5 | resources: 6 | - namespace.yaml 7 | 8 | helmCharts: 9 | - name: reloader 10 | includeCRDs: true 11 | namespace: reloader 12 | releaseName: reloader 13 | version: 2.1.3 14 | repo: https://stakater.github.io/stakater-charts 15 | valuesInline: 16 | reloader: 17 | reloadOnCreate: true 18 | ignoreSecrets: false 19 | patches: 20 | - target: 21 | group: apps 22 | version: v1 23 | kind: Deployment 24 | name: reloader-reloader 25 | namespace: reloader 26 | patch: |- 27 | - op: remove 28 | path: /spec/template/spec/containers/0/env 29 | -------------------------------------------------------------------------------- /k8s/prod/reloader/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: reloader 6 | labels: 7 | name: reloader 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/prod/ser2net/common.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | name: ser2net 6 | name: ser2net 7 | -------------------------------------------------------------------------------- /k8s/prod/ser2net/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: ser2net 4 | 5 | resources: 6 | - common.yaml 7 | - ser2net-zigbee.yaml 8 | - ser2net-zwave.yaml 9 | -------------------------------------------------------------------------------- /k8s/prod/ser2net/ser2net-zigbee.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: ser2net 5 | data: 6 | ser2net.yaml: | 7 | %YAML 1.1 8 | --- 9 | define: &confver 1.0 10 | define: &banner Connected to port \N(\d)\r\n 11 | default: 12 | name: local 13 | value: true 14 | class: serialdev 15 | default: 16 | name: mdns 17 | value: false 18 | default: 19 | name: mdns-sysattrs 20 | value: true 21 | default: 22 | name: speed 23 | value: 115200n81 24 | connection: &con01 25 | accepter: tcp,3001 26 | connector: serialdev,/dev/zig,115200n81,nobreak,local 27 | options: 28 | kickolduser: true 29 | --- 30 | kind: Deployment 31 | apiVersion: apps/v1 32 | metadata: 33 | name: ser2net 34 | namespace: home-automation 35 | labels: 36 | app.kubernetes.io/name: ser2net 37 | app.kubernetes.io/instance: rfxcom 38 | annotations: 39 | configmap.reloader.stakater.com/reload: "ser2net" 40 | spec: 41 | replicas: 1 42 | strategy: 43 | type: Recreate 44 | selector: 45 | matchLabels: 46 | app.kubernetes.io/name: ser2net 47 | app.kubernetes.io/instance: rfxcom 48 | template: 49 | metadata: 50 | labels: 51 | app.kubernetes.io/name: ser2net 52 | app.kubernetes.io/instance: rfxcom 53 | spec: 54 | containers: 55 | - name: ser2net 56 | image: jippi/ser2net:latest 57 | resources: {} 58 | securityContext: 59 | allowPrivilegeEscalation: true 60 | privileged: true 61 | ports: 62 | - containerPort: 3001 63 | protocol: TCP 64 | volumeMounts: 65 | - mountPath: /etc/ser2net 66 | name: configmap 67 | - name: usb-conbee 68 | mountPath: /dev/zig 69 | volumes: 70 | - configMap: 71 | name: ser2net 72 | name: configmap 73 | - name: usb-conbee 74 | hostPath: 75 | path: /dev/serial/by-id/usb-dresden_elektronik_ingenieurtechnik_GmbH_ConBee_II_DE2669726-if00 76 | affinity: 77 | nodeAffinity: 78 | requiredDuringSchedulingIgnoredDuringExecution: 79 | nodeSelectorTerms: 80 | - matchExpressions: 81 | - key: feature.node.kubernetes.io/usb-02_1cf1_0030.present 82 | operator: Exists 83 | --- 84 | kind: Service 85 | apiVersion: v1 86 | metadata: 87 | name: ser2net 88 | namespace: home-automation 89 | labels: 90 | app.kubernetes.io/name: ser2net 91 | app.kubernetes.io/instance: rfxcom 92 | spec: 93 | selector: 94 | app.kubernetes.io/name: ser2net 95 | app.kubernetes.io/instance: rfxcom 96 | ports: 97 | - name: http 98 | protocol: TCP 99 | port: 3001 100 | targetPort: 3001 101 | -------------------------------------------------------------------------------- /k8s/prod/ser2net/ser2net-zwave.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: ser2net-zwave 5 | data: 6 | ser2net.yaml: | 7 | %YAML 1.1 8 | --- 9 | define: &confver 1.0 10 | define: &banner Connected to port \N(\d)\r\n 11 | default: 12 | name: local 13 | value: true 14 | class: serialdev 15 | default: 16 | name: mdns 17 | value: false 18 | default: 19 | name: mdns-sysattrs 20 | value: true 21 | default: 22 | name: speed 23 | value: 115200n81 24 | connection: &con01 25 | accepter: tcp,3002 26 | connector: serialdev,/dev/zwave,115200N81 27 | options: 28 | kickolduser: true 29 | --- 30 | kind: Deployment 31 | apiVersion: apps/v1 32 | metadata: 33 | name: ser2net-zwave 34 | namespace: home-automation 35 | labels: 36 | app.kubernetes.io/name: ser2net-zwave 37 | app.kubernetes.io/instance: rfxcom 38 | annotations: 39 | configmap.reloader.stakater.com/reload: "ser2net-zwave" 40 | spec: 41 | replicas: 1 42 | strategy: 43 | type: Recreate 44 | selector: 45 | matchLabels: 46 | app.kubernetes.io/name: ser2net-zwave 47 | app.kubernetes.io/instance: rfxcom 48 | template: 49 | metadata: 50 | labels: 51 | app.kubernetes.io/name: ser2net-zwave 52 | app.kubernetes.io/instance: rfxcom 53 | spec: 54 | containers: 55 | - name: ser2net-zwave 56 | image: jippi/ser2net:latest 57 | resources: {} 58 | securityContext: 59 | allowPrivilegeEscalation: true 60 | privileged: true 61 | ports: 62 | - containerPort: 3002 63 | protocol: TCP 64 | volumeMounts: 65 | - mountPath: /etc/ser2net 66 | name: configmap 67 | - name: usb-zooz 68 | mountPath: /dev/zwave 69 | volumes: 70 | - configMap: 71 | name: ser2net-zwave 72 | name: configmap 73 | - name: usb-zooz 74 | hostPath: 75 | path: /dev/serial/by-id/usb-Silicon_Labs_Zooz_ZST10_700_Z-Wave_Stick_baf11d5406caec11b51361a341be1031-if00-port0 76 | affinity: 77 | nodeAffinity: 78 | requiredDuringSchedulingIgnoredDuringExecution: 79 | nodeSelectorTerms: 80 | - matchExpressions: 81 | - key: feature.node.kubernetes.io/usb-ff_10c4_ea60.present #zooz 82 | operator: Exists 83 | --- 84 | kind: Service 85 | apiVersion: v1 86 | metadata: 87 | name: ser2net-zwave 88 | namespace: home-automation 89 | labels: 90 | app.kubernetes.io/name: ser2net-zwave 91 | app.kubernetes.io/instance: rfxcom 92 | spec: 93 | selector: 94 | app.kubernetes.io/name: ser2net-zwave 95 | app.kubernetes.io/instance: rfxcom 96 | ports: 97 | - name: http 98 | protocol: TCP 99 | port: 3002 100 | targetPort: 3002 101 | -------------------------------------------------------------------------------- /k8s/prod/tailscale/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /k8s/prod/tailscale/cr.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: 3vilpenguin@gmail.com 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: cluster-admin 10 | subjects: 11 | - apiGroup: rbac.authorization.k8s.io 12 | kind: User 13 | name: 3vilpenguin@gmail.com 14 | -------------------------------------------------------------------------------- /k8s/prod/tailscale/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: tailscale 4 | 5 | resources: 6 | - namespace.yaml 7 | - vault.yaml 8 | - cr.yaml 9 | 10 | helmCharts: 11 | - name: tailscale-operator 12 | includeCRDs: true 13 | namespace: tailscale 14 | releaseName: tailscale 15 | version: 1.84.2 16 | repo: https://pkgs.tailscale.com/helmcharts 17 | valuesInline: 18 | apiServerProxyConfig: 19 | mode: "true" 20 | operatorConfig: 21 | hostname: "homelab-operator" 22 | -------------------------------------------------------------------------------- /k8s/prod/tailscale/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: tailscale 6 | labels: 7 | name: tailscale 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/prod/tailscale/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: tailscale 6 | spec: 7 | vaultAuthRef: tailscale 8 | mount: secret/ 9 | type: kv-v2 10 | path: tailscale 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: operator-oauth 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: tailscale 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: tailscale 25 | serviceAccount: operator 26 | -------------------------------------------------------------------------------- /k8s/prod/vault-secrets-operator/.gitignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | -------------------------------------------------------------------------------- /k8s/prod/vault-secrets-operator/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: vault-secrets-operator 4 | 5 | resources: 6 | - namespace.yaml 7 | - sa.yaml 8 | 9 | helmCharts: 10 | - name: vault-secrets-operator 11 | includeCRDs: true 12 | namespace: vault-secrets-operator 13 | releaseName: vault-secrets-operator 14 | version: 0.10.0 15 | repo: https://helm.releases.hashicorp.com 16 | valuesInline: 17 | defaultVaultConnection: 18 | enabled: true 19 | address: "http://192.168.3.2:8200" 20 | skipTLSVerify: true 21 | -------------------------------------------------------------------------------- /k8s/prod/vault-secrets-operator/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: vault-secrets-operator 6 | labels: 7 | name: vault-secrets-operator 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/prod/vault-secrets-operator/sa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: role-tokenreview-binding 6 | namespace: vault-secrets-operator 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: system:auth-delegator 11 | subjects: 12 | - kind: ServiceAccount 13 | name: vault-auth 14 | namespace: vault-secrets-operator 15 | --- 16 | apiVersion: v1 17 | kind: ServiceAccount 18 | metadata: 19 | name: vault-auth 20 | namespace: vault-secrets-operator 21 | --- 22 | apiVersion: v1 23 | kind: Secret 24 | metadata: 25 | name: vault-auth-token 26 | annotations: 27 | kubernetes.io/service-account.name: vault-auth 28 | type: kubernetes.io/service-account-token 29 | --- 30 | -------------------------------------------------------------------------------- /k8s/prod/vault-secrets-operator/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | kustomize build --enable-helm | kubectl apply -f - 3 | 4 | export K8S_VAULT_SA_SECRET="vault-auth-token" 5 | kubectl -n vault-secrets-operator get secret/${K8S_VAULT_SA_SECRET} -o json | jq -r '.data["ca.crt"]' | base64 -d > ca.crt 6 | kubectl -n vault-secrets-operator get secret/${K8S_VAULT_SA_SECRET} -o json | jq -r '.data["token"]' | base64 -d > sa.token 7 | vault write auth/kubernetes/config kubernetes_host=https://192.168.1.254:6443 kubernetes_ca_cert=@ca.crt token_reviewer_jwt=@sa.token disable_iss_validation=true 8 | 9 | # example 10 | #vault write auth/kubernetes/role/actions-runner \ 11 | # bound_service_account_names=actions-runner \ 12 | # bound_service_account_namespaces=actions-runner \ 13 | # policies=actions-runner \ 14 | # token_policies=actions-runner 15 | -------------------------------------------------------------------------------- /k8s/prod/workflows/default_sa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: default 6 | namespace: argocd 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - pods 12 | verbs: 13 | - create 14 | - get 15 | - watch 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - "argoproj.io" 21 | resources: 22 | - workflowtaskresults 23 | verbs: 24 | - create 25 | - get 26 | - watch 27 | - update 28 | - patch 29 | - delete 30 | --- 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: RoleBinding 33 | metadata: 34 | name: default 35 | namespace: argocd 36 | roleRef: 37 | apiGroup: rbac.authorization.k8s.io 38 | kind: Role 39 | name: default 40 | subjects: 41 | - kind: ServiceAccount 42 | name: default 43 | namespace: argocd 44 | -------------------------------------------------------------------------------- /k8s/prod/workflows/etcdsnapshot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: CronWorkflow 4 | metadata: 5 | name: etcd-backup 6 | spec: 7 | schedule: "0 0 1 * *" 8 | startingDeadlineSeconds: 0 9 | suspend: false 10 | workflowSpec: 11 | volumes: 12 | - name: talos-secrets 13 | secret: 14 | secretName: talos-secrets 15 | entrypoint: main 16 | templates: 17 | - name: main 18 | inputs: {} 19 | script: 20 | image: alpine:latest 21 | command: 22 | - sh 23 | - -c 24 | - | 25 | apk add -U curl aws-cli 26 | curl -sL https://github.com/siderolabs/talos/releases/download/v1.4.7/talosctl-linux-`uname -m | sed 's/aarch64/arm64/g' | sed 's/x86_64/amd64/g'` -o /usr/local/bin/talosctl 27 | chmod +x /usr/local/bin/talosctl 28 | aws configure set default.s3.signature_version s3v4 29 | talosctl -n 192.168.1.22 etcd snapshot $(date '+%Y%m%d%H%M%S').snapshot 30 | aws --endpoint-url http://minio.consul.marcyoung.us:9000 s3 cp *.snapshot s3://backups/talos-etcd/ 31 | curl -XPOST -H 'Content-Type: application/json' ${ZAPIER_WEBHOOK_URL} -d '{"message": "etcd snapshot complete"}' 32 | env: 33 | - name: ZAPIER_WEBHOOK_URL 34 | valueFrom: 35 | secretKeyRef: 36 | key: ZAPIER_WEBHOOK_URL 37 | name: argowf 38 | - name: AWS_ACCESS_KEY_ID 39 | valueFrom: 40 | secretKeyRef: 41 | key: MINIO_ACCESS_KEY_ID 42 | name: argowf 43 | - name: AWS_SECRET_ACCESS_KEY 44 | valueFrom: 45 | secretKeyRef: 46 | key: MINIO_SECRET_ACCESS_KEY 47 | name: argowf 48 | volumeMounts: 49 | - mountPath: /var/run/secrets/talos.dev 50 | name: talos-secrets 51 | --- 52 | apiVersion: talos.dev/v1alpha1 53 | kind: ServiceAccount 54 | metadata: 55 | name: talos-secrets 56 | spec: 57 | roles: 58 | - os:admin 59 | -------------------------------------------------------------------------------- /k8s/prod/workflows/get-token.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Bearer $(kubectl -n argocd get secret admin-user -oyaml | yq .data.token | base64 -d)" | pbcopy 3 | -------------------------------------------------------------------------------- /k8s/prod/workflows/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: argocd 4 | 5 | resources: 6 | - default_sa.yaml 7 | - vault.yaml 8 | - talos.yaml 9 | - etcdsnapshot.yaml 10 | - zwavejsui.yaml 11 | - zigbee2mqtt.yaml 12 | - psql-hass.yaml 13 | - psql-paperless.yaml 14 | -------------------------------------------------------------------------------- /k8s/prod/workflows/psql-hass.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: CronWorkflow 4 | metadata: 5 | name: psql-backup-hass 6 | spec: 7 | schedule: "0 0 1 * *" 8 | startingDeadlineSeconds: 0 9 | suspend: false 10 | workflowSpec: 11 | entrypoint: backup 12 | templates: 13 | - name: backup 14 | container: 15 | name: backup 16 | command: 17 | - sh 18 | - '-c' 19 | - | 20 | apk add -U bash curl aws-cli pigz >/dev/null 2>&1 21 | apk add -U postgresql16-client --repository=https://dl-cdn.alpinelinux.org/alpine/edge/main 22 | 23 | aws configure set default.s3.signature_version s3v4 24 | 25 | cat <run.sh 26 | 27 | _name="hass" 28 | _filename=/tmp/\${_name}-\$(date '+%Y%m%d%H%M%S').gz 29 | pg_dump \${HASS_PSQL_CONNECTION_STRING} | pigz --best > \${_filename} 30 | aws --endpoint-url http://minio.consul.marcyoung.us:9000 s3 cp \${_filename} s3://backups/psql/ 31 | curl -XPOST -H 'Content-Type: application/json' \${ZAPIER_WEBHOOK_URL} -d '{"message": "psql backup complete"}' 32 | EOF 33 | 34 | cat run.sh 35 | bash run.sh 36 | image: alpine:latest 37 | env: 38 | - name: ZAPIER_WEBHOOK_URL 39 | valueFrom: 40 | secretKeyRef: 41 | key: ZAPIER_WEBHOOK_URL 42 | name: argowf 43 | - name: AWS_ACCESS_KEY_ID 44 | valueFrom: 45 | secretKeyRef: 46 | key: MINIO_ACCESS_KEY_ID 47 | name: argowf 48 | - name: AWS_SECRET_ACCESS_KEY 49 | valueFrom: 50 | secretKeyRef: 51 | key: MINIO_SECRET_ACCESS_KEY 52 | name: argowf 53 | - name: HASS_PSQL_CONNECTION_STRING 54 | valueFrom: 55 | secretKeyRef: 56 | key: HASS_PSQL_CONNECTION_STRING 57 | name: argowf 58 | 59 | - name: "psql-backup" 60 | steps: 61 | - - name: "backup" 62 | template: backup 63 | -------------------------------------------------------------------------------- /k8s/prod/workflows/psql-paperless.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: CronWorkflow 4 | metadata: 5 | name: psql-backup-paperless 6 | spec: 7 | schedule: "0 0 1 * *" 8 | startingDeadlineSeconds: 0 9 | suspend: false 10 | workflowSpec: 11 | entrypoint: backup 12 | templates: 13 | - name: backup 14 | container: 15 | name: backup 16 | command: 17 | - sh 18 | - '-c' 19 | - | 20 | apk add -U bash curl aws-cli pigz >/dev/null 2>&1 21 | apk add -U postgresql16-client --repository=https://dl-cdn.alpinelinux.org/alpine/edge/main 22 | 23 | aws configure set default.s3.signature_version s3v4 24 | 25 | cat <run.sh 26 | 27 | _name="paperless" 28 | _filename=/tmp/\${_name}-\$(date '+%Y%m%d%H%M%S').gz 29 | pg_dump \${PAPERLESS_PSQL_CONNECTION_STRING} | pigz --best > \${_filename} 30 | aws --endpoint-url http://minio.consul.marcyoung.us:9000 s3 cp \${_filename} s3://backups/psql/ 31 | curl -XPOST -H 'Content-Type: application/json' \${ZAPIER_WEBHOOK_URL} -d '{"message": "psql backup complete"}' 32 | EOF 33 | 34 | cat run.sh 35 | bash run.sh 36 | image: alpine:latest 37 | env: 38 | - name: ZAPIER_WEBHOOK_URL 39 | valueFrom: 40 | secretKeyRef: 41 | key: ZAPIER_WEBHOOK_URL 42 | name: argowf 43 | - name: AWS_ACCESS_KEY_ID 44 | valueFrom: 45 | secretKeyRef: 46 | key: MINIO_ACCESS_KEY_ID 47 | name: argowf 48 | - name: AWS_SECRET_ACCESS_KEY 49 | valueFrom: 50 | secretKeyRef: 51 | key: MINIO_SECRET_ACCESS_KEY 52 | name: argowf 53 | - name: PAPERLESS_PSQL_CONNECTION_STRING 54 | valueFrom: 55 | secretKeyRef: 56 | key: PAPERLESS_PSQL_CONNECTION_STRING 57 | name: argowf 58 | 59 | - name: "psql-backup" 60 | steps: 61 | - - name: "backup" 62 | template: backup 63 | -------------------------------------------------------------------------------- /k8s/prod/workflows/talos.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: WorkflowTemplate 3 | metadata: 4 | name: talos-backup-template 5 | spec: 6 | serviceAccountName: argo 7 | templates: 8 | - name: main 9 | inputs: 10 | parameters: 11 | - name: slack_message 12 | - name: talos_directory 13 | - name: minio_path 14 | script: 15 | image: alpine:latest 16 | command: 17 | - sh 18 | - -c 19 | - | 20 | cat /etc/resolv.conf 21 | sleep 60 # TODO removeme 22 | cat /etc/resolv.conf 23 | apk add -U bash curl aws-cli 24 | curl -vv github.com 25 | 26 | _arch=$(uname -m | sed 's/aarch64/arm64/g' | sed 's/x86_64/amd64/g') 27 | _url="https://github.com/siderolabs/talos/releases/download/v1.9.4/talosctl-linux-${_arch}" 28 | 29 | echo "downloading from ${_url}" 30 | mkdir -p /usr/local/bin/ 31 | echo curl -L ${_url} -o /usr/local/bin/talosctl 32 | curl -L ${_url} -o /usr/local/bin/talosctl 33 | chmod +x /usr/local/bin/talosctl 34 | 35 | aws configure set default.s3.signature_version s3v4 36 | mkdir /tmp/backup 37 | cat <run.sh 38 | hosts=\$(talosctl -n \${NODES} get disks | grep -v sda | grep -v loop | grep -v '\*\$' | grep -v NODE | awk '{print \$1}') 39 | for host in \${hosts}; do 40 | echo "got host \${host}" 41 | talosctl -n \${host} list {{inputs.parameters.talos_directory}} >/dev/null 2>&1 42 | if [[ \$? -eq 0 ]]; then 43 | 44 | talosctl -n \${host} ls -Hr {{inputs.parameters.talos_directory}} 2>/dev/null | \ 45 | grep -v '^NODE' | cut -d' ' -f2- | sed 's/^ *//' | grep -v '^\.' | \ 46 | while IFS= read -r i; do 47 | echo "copying \${i} from \${host}" 48 | talosctl -n \${host} read "{{inputs.parameters.talos_directory}}/\${i}" >/dev/null 2>&1 49 | if [[ \$? -eq 0 ]]; then 50 | mkdir -p "/tmp/backup/\$(dirname "\${i}")" >/dev/null 2>&1 51 | talosctl -n \${host} read "{{inputs.parameters.talos_directory}}/\${i}" >"/tmp/backup/\${i}" 52 | else 53 | echo "${i} is a directory. skip it." 54 | fi 55 | done 56 | tar czf /tmp/\$(date '+%Y%m%d%H%M%S').tgz /tmp/backup 57 | aws --endpoint-url http://minio.consul.marcyoung.us:9000 s3 cp /tmp/*.tgz s3://backups/{{inputs.parameters.minio_path}}/ 58 | curl -XPOST -H 'Content-Type: application/json' \${ZAPIER_WEBHOOK_URL} -d '{"message": "{{inputs.parameters.slack_message}}"}' 59 | else 60 | echo "directory {{inputs.parameters.talos_directory}} not on \${host}" 61 | fi 62 | done 63 | EOF 64 | cat run.sh 65 | bash run.sh 66 | env: 67 | - name: NODES 68 | value: "192.168.1.19,192.168.1.21,192.168.1.22,192.168.1.23,192.168.1.24,192.168.1.25,192.168.1.26,192.168.1.27" 69 | - name: ZAPIER_WEBHOOK_URL 70 | valueFrom: 71 | secretKeyRef: 72 | key: ZAPIER_WEBHOOK_URL 73 | name: argowf 74 | - name: AWS_ACCESS_KEY_ID 75 | valueFrom: 76 | secretKeyRef: 77 | key: MINIO_ACCESS_KEY_ID 78 | name: argowf 79 | - name: AWS_SECRET_ACCESS_KEY 80 | valueFrom: 81 | secretKeyRef: 82 | key: MINIO_SECRET_ACCESS_KEY 83 | name: argowf 84 | volumeMounts: 85 | - mountPath: /var/run/secrets/talos.dev 86 | name: talos-secrets 87 | -------------------------------------------------------------------------------- /k8s/prod/workflows/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: argowf 6 | spec: 7 | vaultAuthRef: argowf 8 | mount: secret/ 9 | type: kv-v2 10 | path: argowf 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: argowf 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: argowf 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: argowf 25 | serviceAccount: default 26 | --- 27 | -------------------------------------------------------------------------------- /k8s/prod/workflows/zigbee2mqtt.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: CronWorkflow 4 | metadata: 5 | name: zigbee2mqtt-backup 6 | spec: 7 | schedule: "0 0 1 * *" 8 | startingDeadlineSeconds: 0 9 | suspend: false 10 | workflowSpec: 11 | volumes: 12 | - name: talos-secrets 13 | secret: 14 | secretName: talos-secrets 15 | entrypoint: main 16 | templates: 17 | - name: main 18 | steps: 19 | - - name: call-talos-template 20 | templateRef: 21 | name: talos-backup-template 22 | template: main 23 | arguments: 24 | parameters: 25 | - name: slack_message 26 | value: "zigbee2mqtt snapshot complete" 27 | - name: talos_directory 28 | value: "/var/mnt/storage/zigbee2mqtt" 29 | - name: minio_path 30 | value: "zigbee2mqtt" 31 | --- 32 | apiVersion: argoproj.io/v1alpha1 33 | kind: CronWorkflow 34 | metadata: 35 | name: zigbee2mqtt-upstairs-backup 36 | spec: 37 | schedule: "0 1 1 * *" 38 | startingDeadlineSeconds: 0 39 | suspend: false 40 | workflowSpec: 41 | volumes: 42 | - name: talos-secrets 43 | secret: 44 | secretName: talos-secrets 45 | entrypoint: main 46 | templates: 47 | - name: main 48 | steps: 49 | - - name: call-talos-template 50 | templateRef: 51 | name: talos-backup-template 52 | template: main 53 | arguments: 54 | parameters: 55 | - name: slack_message 56 | value: "zigbee2mqtt upstairs snapshot complete" 57 | - name: talos_directory 58 | value: "/var/mnt/storage/zigbee2mqtt-upstairs" 59 | - name: minio_path 60 | value: "zigbee2mqtt-upstairs" 61 | -------------------------------------------------------------------------------- /k8s/prod/workflows/zwavejsui.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: CronWorkflow 4 | metadata: 5 | name: zwave-backup 6 | spec: 7 | schedule: "0 0 1 * *" 8 | startingDeadlineSeconds: 0 9 | suspend: false 10 | workflowSpec: 11 | volumes: 12 | - name: talos-secrets 13 | secret: 14 | secretName: talos-secrets 15 | entrypoint: main 16 | templates: 17 | - name: main 18 | steps: 19 | - - name: call-talos-template 20 | templateRef: 21 | name: talos-backup-template 22 | template: main 23 | arguments: 24 | parameters: 25 | - name: slack_message 26 | value: "zwave-js-ui snapshot complete" 27 | - name: talos_directory 28 | value: "/var/mnt/storage/zwave/backups/store" 29 | - name: minio_path 30 | value: "zwave-js-ui" 31 | -------------------------------------------------------------------------------- /k8s/prod/zigbee2mqtt-upstairs/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: zigbee2mqtt 4 | 5 | resources: 6 | - configmap.yaml 7 | - zigbee2mqtt.yaml 8 | - pv.yaml 9 | - pvc.yaml 10 | - tailscale.yaml 11 | -------------------------------------------------------------------------------- /k8s/prod/zigbee2mqtt-upstairs/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: zigbee2mqtt-upstairs 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | capacity: 9 | storage: 128Mi 10 | hostPath: 11 | path: /var/mnt/storage/zigbee2mqtt-upstairs 12 | type: DirectoryOrCreate 13 | nodeAffinity: 14 | required: 15 | nodeSelectorTerms: 16 | - matchExpressions: 17 | - key: feature.node.kubernetes.io/usb-08_0bda_9210.present 18 | operator: Exists 19 | persistentVolumeReclaimPolicy: Retain 20 | storageClassName: local-path 21 | volumeMode: Filesystem 22 | -------------------------------------------------------------------------------- /k8s/prod/zigbee2mqtt-upstairs/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: zigbee2mqtt-upstairs 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: local-path 9 | resources: 10 | requests: 11 | storage: 128Mi 12 | volumeMode: Filesystem 13 | volumeName: zigbee2mqtt-upstairs 14 | -------------------------------------------------------------------------------- /k8s/prod/zigbee2mqtt-upstairs/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: zigbee2mqtt-upstairs 5 | namespace: zigbee2mqtt 6 | spec: 7 | defaultBackend: 8 | service: 9 | name: zigbee2mqtt-upstairs 10 | port: 11 | number: 8081 12 | ingressClassName: tailscale 13 | tls: 14 | - hosts: 15 | - zigbee2mqtt-upstairs 16 | -------------------------------------------------------------------------------- /k8s/prod/zigbee2mqtt/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: zigbee2mqtt 4 | 5 | resources: 6 | - namespace.yaml 7 | - configmap.yaml 8 | - vault.yaml 9 | - zigbee2mqtt.yaml 10 | - pv.yaml 11 | - pvc.yaml 12 | - tailscale.yaml 13 | -------------------------------------------------------------------------------- /k8s/prod/zigbee2mqtt/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | name: zigbee2mqtt 6 | goldilocks.fairwinds.com/enabled: "true" 7 | name: zigbee2mqtt 8 | -------------------------------------------------------------------------------- /k8s/prod/zigbee2mqtt/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: zigbee2mqtt 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | capacity: 9 | storage: 128Mi 10 | hostPath: 11 | path: /var/mnt/storage/zigbee2mqtt 12 | type: DirectoryOrCreate 13 | nodeAffinity: 14 | required: 15 | nodeSelectorTerms: 16 | - matchExpressions: 17 | - key: feature.node.kubernetes.io/usb-08_0bda_9210.present 18 | operator: Exists 19 | persistentVolumeReclaimPolicy: Retain 20 | storageClassName: local-path 21 | volumeMode: Filesystem 22 | -------------------------------------------------------------------------------- /k8s/prod/zigbee2mqtt/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: zigbee2mqtt 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: local-path 9 | resources: 10 | requests: 11 | storage: 128Mi 12 | volumeMode: Filesystem 13 | volumeName: zigbee2mqtt 14 | -------------------------------------------------------------------------------- /k8s/prod/zigbee2mqtt/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: zigbee2mqtt 5 | namespace: zigbee2mqtt 6 | spec: 7 | defaultBackend: 8 | service: 9 | name: zigbee2mqtt 10 | port: 11 | number: 8080 12 | ingressClassName: tailscale 13 | tls: 14 | - hosts: 15 | - zigbee2mqtt 16 | -------------------------------------------------------------------------------- /k8s/prod/zigbee2mqtt/vault.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: secrets.hashicorp.com/v1beta1 3 | kind: VaultStaticSecret 4 | metadata: 5 | name: zigbee2mqtt 6 | spec: 7 | vaultAuthRef: zigbee2mqtt 8 | mount: secret/ 9 | type: kv-v2 10 | path: zigbee2mqtt 11 | refreshAfter: 60s 12 | destination: 13 | create: true 14 | name: zigbee2mqtt 15 | --- 16 | apiVersion: secrets.hashicorp.com/v1beta1 17 | kind: VaultAuth 18 | metadata: 19 | name: zigbee2mqtt 20 | spec: 21 | method: kubernetes 22 | mount: kubernetes 23 | kubernetes: 24 | role: zigbee2mqtt 25 | serviceAccount: default 26 | -------------------------------------------------------------------------------- /k8s/prod/zwave-js-ui/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: zwave-js-ui 4 | 5 | resources: 6 | - namespace.yaml 7 | - zwave.yaml 8 | - pv.yaml 9 | - pvc.yaml 10 | - tailscale.yaml 11 | -------------------------------------------------------------------------------- /k8s/prod/zwave-js-ui/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: zwave-js-ui 6 | labels: 7 | name: zwave-js-ui 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/prod/zwave-js-ui/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: zwave-js-ui 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | capacity: 9 | storage: 128Mi 10 | hostPath: 11 | path: /var/mnt/storage/zwave 12 | type: DirectoryOrCreate 13 | nodeAffinity: 14 | required: 15 | nodeSelectorTerms: 16 | - matchExpressions: 17 | - key: feature.node.kubernetes.io/usb-08_0bda_9210.present 18 | operator: Exists 19 | persistentVolumeReclaimPolicy: Retain 20 | storageClassName: local-path 21 | volumeMode: Filesystem 22 | -------------------------------------------------------------------------------- /k8s/prod/zwave-js-ui/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: zwave-js-ui 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: local-path 9 | resources: 10 | requests: 11 | storage: 128Mi 12 | volumeMode: Filesystem 13 | volumeName: zwave-js-ui 14 | -------------------------------------------------------------------------------- /k8s/prod/zwave-js-ui/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: zwave-js-ui 5 | namespace: zwave-js-ui 6 | spec: 7 | defaultBackend: 8 | service: 9 | name: zwave-js-ui 10 | port: 11 | number: 8091 12 | ingressClassName: tailscale 13 | tls: 14 | - hosts: 15 | - zwave-js-ui 16 | -------------------------------------------------------------------------------- /k8s/prod/zwave-js-ui/zwave.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: zwave-js-ui/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: zwave-js-ui 7 | labels: 8 | app.kubernetes.io/name: zwave-js-ui 9 | --- 10 | # Source: zwave-js-ui/templates/service.yaml 11 | apiVersion: v1 12 | kind: Service 13 | metadata: 14 | name: zwave-js-ui 15 | labels: 16 | app.kubernetes.io/name: zwave-js-ui 17 | spec: 18 | type: ClusterIP 19 | ports: 20 | - port: 8091 21 | targetPort: 8091 22 | protocol: TCP 23 | name: http-ui 24 | - port: 3000 25 | targetPort: 3000 26 | protocol: TCP 27 | name: http-websocket 28 | selector: 29 | app.kubernetes.io/name: zwave-js-ui 30 | --- 31 | # Source: zwave-js-ui/templates/deployment.yaml 32 | apiVersion: apps/v1 33 | kind: Deployment 34 | metadata: 35 | name: zwave-js-ui 36 | labels: 37 | app.kubernetes.io/name: zwave-js-ui 38 | spec: 39 | selector: 40 | matchLabels: 41 | app.kubernetes.io/name: zwave-js-ui 42 | template: 43 | metadata: 44 | labels: 45 | app.kubernetes.io/name: zwave-js-ui 46 | spec: 47 | serviceAccountName: zwave-js-ui 48 | securityContext: 49 | {} 50 | containers: 51 | - name: zwave-js-ui 52 | securityContext: 53 | {} 54 | image: "zwavejs/zwave-js-ui:10.6.1" 55 | imagePullPolicy: IfNotPresent 56 | ports: 57 | - name: http-ui 58 | containerPort: 8091 59 | protocol: TCP 60 | - name: http-websocket 61 | containerPort: 3000 62 | protocol: TCP 63 | livenessProbe: 64 | httpGet: 65 | path: /health 66 | port: http-ui 67 | initialDelaySeconds: 15 68 | periodSeconds: 30 69 | readinessProbe: 70 | httpGet: 71 | path: /health 72 | port: http-ui 73 | initialDelaySeconds: 5 74 | periodSeconds: 30 75 | startupProbe: 76 | httpGet: 77 | path: /health 78 | port: http-ui 79 | initialDelaySeconds: 5 80 | periodSeconds: 30 81 | resources: 82 | limits: 83 | cpu: 300m 84 | memory: 256Mi 85 | requests: 86 | cpu: 30m 87 | memory: 192Mi 88 | volumeMounts: 89 | - name: store 90 | mountPath: /usr/src/app/store 91 | volumes: 92 | - name: store 93 | persistentVolumeClaim: 94 | claimName: zwave-js-ui 95 | -------------------------------------------------------------------------------- /k8s/stage/bluey/README.md: -------------------------------------------------------------------------------- 1 | Bluey VM 2 | ======== 3 | 4 | First you have to get a custom ubuntu image 5 | 6 | For me this is a local disk using `local-path-storage` CSI 7 | The default images for ubuntu are 2Gi, which are too small. 8 | 9 | A custom CDI controller will use this DataVolume to create a PVC with the same name and proper spec/annotations so that an import-specific controller detects it and launches an importer pod. This pod will gather the image specified in the source field. 10 | 11 | Because I'm copying a qcow2 .img I need two volumes (PV/PVC) 12 | * ubuntu-scratch (unpacking the image) 13 | * ubuntu (the raw image post unpack) 14 | 15 | ``` 16 | $ k apply -f dv_ubuntu.yaml 17 | ``` 18 | 19 | You can watch the status of the copy: 20 | 21 | ``` 22 | $ kubectl logs -f importer-ubuntu # importer-{name of importer} 23 | ``` 24 | 25 | When that's done, I can simply delete it all 26 | 27 | **NOTE** I use node local storage so deleting it retains the data. Make sure any set up does not clear the data. 28 | You might need to do a targeted delete. 29 | 30 | 31 | ``` 32 | $ k delete -f dv_ubuntu.yaml 33 | ``` 34 | 35 | Next I can simply point a PV/PVC at the directory (because NLD) and use that as a boot image + cloud-init 36 | 37 | ``` 38 | $ kustomize build | kubectl apply -f - 39 | ``` 40 | 41 | Next use virtctl to jump in and look around: 42 | 43 | ``` 44 | $ virtctl console testvm 45 | ``` 46 | 47 | 48 | USB Passthrough 49 | =============== 50 | 51 | You'll need to have a privileged container with /dev mounted to /dev from the host, 52 | 53 | Next you need to set spec.template.spec.domain.devices.clientPassthrough: {} on the VM 54 | 55 | You'll need RBAC to use the virtualmachine commands, Im lazy and allowed * to * in namespace * . Dont do this. 56 | 57 | For my USB info, it is in 1-6.1, so: 58 | 59 | ``` 60 | $ echo $(talosctl --talosconfig $(pwd)/talosconfig read /sys/bus/usb/devices/1-6.1/idVendor):$(talosctl --talosconfig $(pwd)/talosconfig read /sys/bus/usb/devices/1-6.1/idProduct) 61 | 62 | 0a12:0001 63 | ``` 64 | 65 | Lastly install virtctl and run `$ virtctl usbredir 0a12:0001 testvm` where the device id from talos: 66 | 67 | See usbredir.yaml for how this works in reality. That `usbredir` must stay up for as long as USB passthrough is needed. 68 | 69 | Now inside your VM you can utilize the usb as though its native. 70 | -------------------------------------------------------------------------------- /k8s/stage/bluey/dv_ubuntu.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: scratch 6 | spec: 7 | accessModes: 8 | - ReadWriteMany 9 | capacity: 10 | storage: 9Gi 11 | hostPath: 12 | path: /var/mnt/storage/kubevirt/scratch 13 | type: DirectoryOrCreate 14 | persistentVolumeReclaimPolicy: Retain 15 | storageClassName: local-path 16 | volumeMode: Filesystem 17 | --- 18 | apiVersion: v1 19 | kind: PersistentVolumeClaim 20 | metadata: 21 | name: ubuntu-scratch 22 | spec: 23 | accessModes: 24 | - ReadWriteMany 25 | storageClassName: local-path 26 | resources: 27 | requests: 28 | storage: 8Gi 29 | volumeMode: Filesystem 30 | volumeName: scratch 31 | --- 32 | apiVersion: v1 33 | kind: PersistentVolume 34 | metadata: 35 | name: ubuntu 36 | spec: 37 | accessModes: 38 | - ReadWriteMany 39 | capacity: 40 | storage: 9Gi 41 | hostPath: 42 | path: /var/mnt/storage/kubevirt/ubuntu 43 | type: DirectoryOrCreate 44 | persistentVolumeReclaimPolicy: Retain 45 | storageClassName: local-path 46 | volumeMode: Filesystem 47 | --- 48 | apiVersion: cdi.kubevirt.io/v1beta1 49 | kind: DataVolume 50 | metadata: 51 | name: "ubuntu" 52 | spec: 53 | storage: 54 | accessModes: 55 | - ReadWriteMany 56 | storageClassName: local-path 57 | volumeMode: Filesystem 58 | volumeName: ubuntu 59 | resources: 60 | requests: 61 | storage: 8Gi 62 | source: 63 | http: 64 | url: "https://cloud-images.ubuntu.com/jammy/20240227/jammy-server-cloudimg-amd64.img" 65 | -------------------------------------------------------------------------------- /k8s/stage/bluey/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: default 4 | 5 | resources: 6 | - ubuntu.yaml 7 | - usbredir.yaml 8 | -------------------------------------------------------------------------------- /k8s/stage/bluey/usbredir.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | automountServiceAccountToken: true 4 | kind: ServiceAccount 5 | metadata: 6 | name: usbredir 7 | namespace: default 8 | --- 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | kind: ClusterRole 11 | metadata: 12 | name: usbredir-cluster-read 13 | namespace: default 14 | rules: 15 | - apiGroups: 16 | - '*' 17 | resources: 18 | - '*' 19 | verbs: 20 | - '*' 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: ClusterRoleBinding 24 | metadata: 25 | name: usbredir-cluster-read-binding 26 | namespace: default 27 | subjects: 28 | - kind: ServiceAccount 29 | name: usbredir 30 | namespace: default 31 | roleRef: 32 | kind: ClusterRole 33 | name: usbredir-cluster-read 34 | apiGroup: rbac.authorization.k8s.io 35 | --- 36 | apiVersion: apps/v1 37 | kind: Deployment 38 | metadata: 39 | name: usbredir 40 | spec: 41 | selector: 42 | matchLabels: 43 | app: usbredir 44 | template: 45 | metadata: 46 | labels: 47 | app: usbredir 48 | spec: 49 | serviceAccountName: usbredir 50 | containers: 51 | - name: app 52 | image: alpine:latest 53 | command: 54 | - /bin/sh 55 | - -c 56 | - apk add --no-cache -U usbredir; wget 57 | https://github.com/kubevirt/kubevirt/releases/download/v1.2.0/virtctl-v1.2.0-linux-amd64 58 | -O /usr/bin/virtctl; chmod +x /usr/bin/virtctl; 59 | virtctl usbredir 0a12:0001 testvm 60 | securityContext: 61 | privileged: true 62 | volumeMounts: 63 | - name: dev 64 | mountPath: /dev 65 | volumes: 66 | - name: dev 67 | hostPath: 68 | path: /dev 69 | -------------------------------------------------------------------------------- /k8s/stage/goldilocks/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: goldilocks 4 | 5 | resources: 6 | - namespace.yaml 7 | - tailscale.yaml 8 | 9 | helmCharts: 10 | - name: goldilocks 11 | includeCRDs: true 12 | namespace: goldilocks 13 | releaseName: goldilocks 14 | version: 9.0.2 15 | repo: https://charts.fairwinds.com/stable 16 | valuesInline: 17 | dashboard: 18 | replicaCount: 1 19 | -------------------------------------------------------------------------------- /k8s/stage/goldilocks/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: goldilocks 6 | labels: 7 | name: goldilocks 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /k8s/stage/goldilocks/tailscale.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: goldilocks 5 | namespace: goldilocks 6 | spec: 7 | defaultBackend: 8 | service: 9 | name: goldilocks-dashboard 10 | port: 11 | number: 80 12 | ingressClassName: tailscale 13 | tls: 14 | - hosts: 15 | - goldilocks 16 | -------------------------------------------------------------------------------- /k8s/stage/kubevirt/cdi-cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cdi.kubevirt.io/v1beta1 2 | kind: CDI 3 | metadata: 4 | name: cdi 5 | spec: 6 | config: 7 | featureGates: 8 | - HonorWaitForFirstConsumer 9 | imagePullPolicy: IfNotPresent 10 | infra: 11 | nodeSelector: 12 | kubernetes.io/os: linux 13 | tolerations: 14 | - key: CriticalAddonsOnly 15 | operator: Exists 16 | workload: 17 | nodeSelector: 18 | kubernetes.io/os: linux 19 | -------------------------------------------------------------------------------- /k8s/stage/kubevirt/kubevirt-cr.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kubevirt.io/v1 3 | kind: KubeVirt 4 | metadata: 5 | name: kubevirt 6 | namespace: kubevirt 7 | spec: 8 | certificateRotateStrategy: {} 9 | configuration: 10 | developerConfiguration: 11 | featureGates: [] 12 | customizeComponents: {} 13 | imagePullPolicy: IfNotPresent 14 | workloadUpdateStrategy: {} 15 | -------------------------------------------------------------------------------- /k8s/stage/kubevirt/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: kubevirt 4 | 5 | resources: 6 | - kubevirt-operator.yaml 7 | - kubevirt-cr.yaml 8 | - cdi-operator.yaml 9 | - cdi-cr.yaml 10 | -------------------------------------------------------------------------------- /k8s/stage/vpa/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: vpa 4 | 5 | resources: 6 | - namespace.yaml 7 | 8 | helmCharts: 9 | - name: vpa 10 | includeCRDs: true 11 | namespace: vpa 12 | releaseName: vpa 13 | version: 4.7.2 14 | repo: https://charts.fairwinds.com/stable 15 | valuesInline: {} 16 | -------------------------------------------------------------------------------- /k8s/stage/vpa/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: vpa 6 | labels: 7 | name: vpa 8 | goldilocks.fairwinds.com/enabled: "true" 9 | -------------------------------------------------------------------------------- /misc/netboot.xyz-rpi4-sdcard.img: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/myoung34/homelab/68f06eb1734d8d77bfbd3bca7b04c89613f65f73/misc/netboot.xyz-rpi4-sdcard.img -------------------------------------------------------------------------------- /misc/netboot.xyz.img: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/myoung34/homelab/68f06eb1734d8d77bfbd3bca7b04c89613f65f73/misc/netboot.xyz.img -------------------------------------------------------------------------------- /misc/vault-backup.sh: -------------------------------------------------------------------------------- 1 | temp_file=$(mktemp) 2 | echo "[" >${temp_file} 3 | for item in $(vault kv list -format=json secret/ | jq -r '.[]'); do 4 | vault kv list -format=json secret/${item} >/dev/null 2>&1 5 | if [[ $? -eq 2 ]]; then 6 | echo "{\"${item}\": " >>${temp_file} 7 | vault kv get -format=json secret/${item} | jq .data.data >>${temp_file} 8 | echo "}," >>${temp_file} 9 | else 10 | for item2 in $(vault kv list -format=json secret/${item} | jq -r '.[]'); do 11 | echo "going deeper for ${item}/${item2}" 12 | vault kv get -format=json secret/${item}/${item2} | jq .data.data >>${temp_file} 13 | echo "," >>${temp_file} 14 | done 15 | fi 16 | done 17 | # remove the last line of the file because itll have a trailing comma 18 | sed -i '$ d' ${temp_file} 19 | # put }] properly at the end since we removed the last line with the trailing comma 20 | echo "}]" >>${temp_file} 21 | echo ${temp_file} | jq . 22 | #rm ${temp_file} 23 | -------------------------------------------------------------------------------- /nomad/fluentbit.hcl: -------------------------------------------------------------------------------- 1 | job "fluent-bit" { 2 | datacenters = ["dc1"] 3 | 4 | group "fluent-bit" { 5 | network { 6 | port "syslog" { 7 | static = "5044" 8 | } 9 | port "syslog2" { 10 | static = "5045" 11 | } 12 | port "syslog3" { 13 | static = "5046" 14 | } 15 | } 16 | task "fluent-bit" { 17 | 18 | driver = "docker" 19 | 20 | vault { 21 | policies = ["datadog"] 22 | } 23 | 24 | #resources { 25 | # cpu = 300 26 | # memory = 256 27 | #} 28 | 29 | env { 30 | } 31 | 32 | 33 | template { 34 | data = <[0-9]{1,5})\>(?[a-zA-Z]{3} [0-9]{1,2} [0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2})? ?(?.*) 39 | Time_Key time 40 | Time_Keep On 41 | EOH 42 | destination = "local/parsers.conf" 43 | } 44 | template { 45 | data = < range(start:-5m)' 51 | -------------------------------------------------------------------------------- /nomad/jackett.hcl: -------------------------------------------------------------------------------- 1 | job "jackett" { 2 | datacenters = ["dc1"] 3 | 4 | group "jackett" { 5 | network { 6 | port "http" { 7 | static = "9117" 8 | } 9 | } 10 | task "jackett" { 11 | driver = "docker" 12 | 13 | env { 14 | TZ = "America/Chicago" 15 | PGID = "100" 16 | PUID = "1026" 17 | } 18 | 19 | service { 20 | name = "jackett" 21 | port = "http" 22 | tags = [ 23 | "traefik.http.routers.jackett_https.entrypoints=https", 24 | "traefik.http.routers.jackett_https.tls=true", 25 | "traefik.http.routers.jackett_http.entrypoints=http", 26 | "traefik.http.routers.jackett_http.middlewares=jackett_https_redirect", 27 | "traefik.http.middlewares.jackett_https_redirect.redirectscheme.scheme=https", 28 | ] 29 | } 30 | 31 | config { 32 | ports = ["http"] 33 | image = "linuxserver/jackett:latest" 34 | force_pull = true 35 | volumes = [ 36 | "/volume1/torrent/jackett/downloads:/downloads", 37 | "/volume1/torrent/jackett/config:/config", 38 | ] 39 | } 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /nomad/minio.hcl: -------------------------------------------------------------------------------- 1 | job "minio" { 2 | datacenters = ["dc1"] 3 | 4 | group "minio" { 5 | network { 6 | port "api" { 7 | static = "9000" 8 | } 9 | port "http" { 10 | static = "9001" 11 | } 12 | } 13 | task "minio" { 14 | 15 | driver = "docker" 16 | resources { 17 | memory = 1024 18 | } 19 | 20 | env { 21 | TZ = "America/Chicago" 22 | } 23 | 24 | vault { 25 | policies = ["minio"] 26 | } 27 | 28 | template { 29 | data = <&1"] 32 | volumes = [ 33 | "/volume1/minio/obsidian:/opt/obsidian.git", 34 | ] 35 | privileged = true 36 | } 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /nomad/postgresql.hcl: -------------------------------------------------------------------------------- 1 | job "postgresql" { 2 | datacenters = ["dc1"] 3 | 4 | group "postgresql" { 5 | network { 6 | port "postgresql" { 7 | static = "5432" 8 | } 9 | } 10 | task "postgresql" { 11 | driver = "docker" 12 | vault { 13 | policies = ["postgresql"] 14 | } 15 | template { 16 | data = < network -> copy as cURL for clients 11 | $ curl 'https://192.168.2.1/proxy/network/v2/api/site/default/clients/active?includeTrafficUsage=true' \ 12 | ..... >clients.json 13 | $ ( echo $'|name|mac|network|ip|\n|----|----|----|----|'; cat clients.json | jq -r '.[] | select(.display_name!=null) | select(.use_fixedip==true) | "|" + .display_name + "|" + .mac + "|" + .network_name + "|" + .ip + "|"' ) | pbcopy 14 | ``` 15 | 16 | |name|mac|network|ip| 17 | |----|----|----|----| 18 | |cluster22|e4:5f:01:58:2d:7d|cluster|192.168.1.25| 19 | |ecobee|44:61:32:9c:01:00|WIFI|192.168.2.105| 20 | |cluster23|e4:5f:01:58:de:82|cluster|192.168.1.26| 21 | |front-porch-switch|ec:fa:bc:57:20:7c|IoT|192.168.4.103| 22 | |bigNASty|00:11:32:97:da:3c|NAS|192.168.3.2| 23 | |cluster11|00:e0:4c:88:0b:85|cluster|192.168.1.19| 24 | |plaato-keg|84:0d:8e:e3:01:78|IoT|192.168.4.110| 25 | |plaato-airlock|2c:f4:32:0f:78:68|IoT|192.168.4.111| 26 | |cluster12|d8:3a:dd:28:50:51|cluster|192.168.1.21| 27 | |cluster14|d8:3a:dd:55:cb:5e|cluster|192.168.1.23| 28 | |cluster24|d8:3a:dd:55:c8:e0|cluster|192.168.1.27| 29 | |liam-room-starlights|84:0d:8e:5c:12:b7|IoT|192.168.4.104| 30 | |office brother|3c:2a:f4:14:4f:e9|printer|192.168.6.9| 31 | |cluster13|dc:a6:32:d3:86:35|cluster|192.168.1.22| 32 | |garage-switch|68:c6:3a:9f:bb:55|IoT|192.168.4.101| 33 | |cluster21|00:e0:4c:88:00:cd|cluster|192.168.1.24| 34 | |pergola-lights|c4:4f:33:81:ba:cf|IoT|192.168.4.100| 35 | |tubeszb-upstairs|08:b6:1f:71:18:b7|IoT|192.168.4.108| 36 | |PicoW|28:cd:c1:08:08:09|WIFI|192.168.2.145| 37 | |barcaderator|f8:e4:e3:75:0e:3b|WIFI|192.168.2.70| 38 | |ble-proxy|64:e8:33:84:06:98|WIFI|192.168.4.109| 39 | |neon-lights|40:22:d8:e3:f4:d8|WIFI|192.168.4.250| 40 | |plug1|e8:68:e7:f3:23:a8|IoT|192.168.4.113| 41 | |plug2|a4:cf:12:b7:ff:5b|IoT|192.168.4.107| 42 | |plug3|48:3f:da:2a:d7:01|IoT|192.168.4.114| 43 | |liamplug|48:3f:da:27:7c:c3|IoT|192.168.4.112| 44 | -------------------------------------------------------------------------------- /terraform/unifi/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | bucket = "terraform" 4 | key = "unifi/terraform.tfstate" 5 | region = "us-east" 6 | endpoints = { 7 | s3 = "https://us-east.object.fastlystorage.app" 8 | } 9 | skip_credentials_validation = true 10 | skip_requesting_account_id = true 11 | skip_metadata_api_check = true 12 | skip_region_validation = true 13 | use_path_style = true 14 | } 15 | required_providers { 16 | unifi = { 17 | source = "paultyng/unifi" 18 | version = "~> 0.41" 19 | } 20 | } 21 | required_version = "1.12.1" 22 | } 23 | 24 | provider "unifi" { 25 | api_url = "https://192.168.1.1" 26 | allow_insecure = true 27 | } 28 | 29 | provider "aws" { 30 | endpoints { 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /terraform/unifi/device_garage_ap.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_device" "garage_ap" { 2 | name = "garage ap" 3 | 4 | allow_adoption = false 5 | forget_on_destroy = false 6 | } 7 | -------------------------------------------------------------------------------- /terraform/unifi/device_living_room_u6_iw.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_device" "living_room_u6_iw" { 2 | name = "U6-IW" 3 | 4 | allow_adoption = false 5 | forget_on_destroy = false 6 | } 7 | -------------------------------------------------------------------------------- /terraform/unifi/device_office_ap.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_device" "office_ap" { 2 | name = "office ap" 3 | 4 | allow_adoption = false 5 | forget_on_destroy = false 6 | } 7 | -------------------------------------------------------------------------------- /terraform/unifi/device_office_usw_lite.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_device" "office_usw_lite" { 2 | name = "office usw lite" 3 | 4 | allow_adoption = false 5 | forget_on_destroy = false 6 | 7 | port_override { 8 | name = "tubeszb_upstairs" 9 | number = 5 10 | op_mode = "switch" 11 | } 12 | port_override { 13 | name = "livingroom usw" 14 | number = 1 15 | } 16 | port_override { 17 | name = "office ap" 18 | number = 3 19 | } 20 | port_override { 21 | name = "printer" 22 | number = 4 23 | } 24 | port_override { 25 | name = "usw" 26 | number = 2 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /terraform/unifi/device_udm_pro.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_device" "udm_pro" { 2 | name = "Dream Machine Pro" 3 | 4 | allow_adoption = false 5 | forget_on_destroy = false 6 | 7 | port_override { 8 | number = 1 9 | } 10 | 11 | port_override { 12 | name = "office usw" 13 | number = 2 14 | } 15 | 16 | port_override { 17 | name = "bignasty" 18 | number = 4 19 | op_mode = "switch" 20 | } 21 | 22 | port_override { 23 | name = "Port 5" 24 | number = 5 25 | } 26 | 27 | port_override { 28 | name = "usw" 29 | number = 7 30 | } 31 | 32 | port_override { 33 | number = 8 34 | } 35 | 36 | lifecycle { 37 | ignore_changes = [ 38 | "port_override" 39 | ] 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /terraform/unifi/device_usw.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_device" "usw" { 2 | name = "usw" 3 | 4 | allow_adoption = false 5 | forget_on_destroy = false 6 | 7 | port_override { 8 | name = "Port 11" 9 | number = 11 10 | op_mode = "switch" 11 | } 12 | 13 | port_override { 14 | name = "cluster 1,1" 15 | number = 7 16 | } 17 | port_override { 18 | name = "cluster 1,2" 19 | number = 3 20 | } 21 | port_override { 22 | name = "cluster 1,3" 23 | number = 13 24 | } 25 | port_override { 26 | name = "cluster 1,4" 27 | number = 16 28 | } 29 | port_override { 30 | name = "cluster 2,1" 31 | number = 6 32 | } 33 | port_override { 34 | name = "cluster 2,2" 35 | number = 4 36 | } 37 | port_override { 38 | name = "cluster 2,3" 39 | number = 14 40 | } 41 | port_override { 42 | name = "cluster 2,4" 43 | number = 5 44 | } 45 | port_override { 46 | name = "garage ap" 47 | number = 2 48 | } 49 | port_override { 50 | name = "udm" 51 | number = 23 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /terraform/unifi/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | nextdns_servers = [ 3 | "45.90.28.103", 4 | "45.90.30.103", 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /terraform/unifi/port_forward.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_port_forward" "http" { 2 | dst_port = "80" 3 | fwd_ip = "192.168.250.100" 4 | fwd_port = "80" 5 | name = "marcyoung.us" 6 | port_forward_interface = "wan" 7 | protocol = "tcp" 8 | } 9 | 10 | resource "unifi_port_forward" "https" { 11 | dst_port = "443" 12 | fwd_ip = "192.168.250.100" 13 | fwd_port = "443" 14 | name = "marcyoung.us ssl" 15 | port_forward_interface = "wan" 16 | protocol = "tcp" 17 | } 18 | -------------------------------------------------------------------------------- /terraform/unifi/radius.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_radius_profile" "default" { 2 | name = "Default" 3 | interim_update_interval = 0 4 | use_usg_auth_server = true 5 | auth_server { 6 | ip = "192.168.0.1" 7 | port = 1812 8 | xsecret = "" 9 | } 10 | 11 | 12 | } 13 | -------------------------------------------------------------------------------- /terraform/unifi/settings.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_setting_mgmt" "default" { 2 | site = unifi_site.default.name 3 | auto_upgrade = true 4 | ssh_enabled = false 5 | } 6 | -------------------------------------------------------------------------------- /terraform/unifi/site.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_site" "default" { 2 | description = "Default" 3 | } 4 | -------------------------------------------------------------------------------- /terraform/unifi/usg.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_setting_usg" "usg" { 2 | site = unifi_site.default.name 3 | dhcp_relay_servers = [ 4 | ] 5 | firewall_guest_default_log = false 6 | firewall_lan_default_log = false 7 | firewall_wan_default_log = false 8 | multicast_dns_enabled = false 9 | 10 | } 11 | -------------------------------------------------------------------------------- /terraform/unifi/vlan_cluster.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_network" "cluster" { 2 | dhcp_dns = local.nextdns_servers 3 | dhcp_enabled = true 4 | dhcp_relay_enabled = false 5 | dhcp_start = "192.168.1.100" 6 | dhcp_stop = "192.168.1.254" 7 | dhcp_v6_dns = [] 8 | dhcp_v6_dns_auto = true 9 | dhcp_v6_enabled = false 10 | dhcp_v6_lease = 86400 11 | dhcp_v6_start = "::2" 12 | dhcp_v6_stop = "::7d1" 13 | dhcpd_boot_enabled = true 14 | dhcpd_boot_filename = "netboot.xyz.kpxe" 15 | dhcpd_boot_server = "192.168.3.2" 16 | igmp_snooping = false 17 | ipv6_pd_start = "::2" 18 | ipv6_pd_stop = "::7d1" 19 | ipv6_ra_enable = true 20 | ipv6_ra_preferred_lifetime = 14400 21 | ipv6_ra_valid_lifetime = 0 22 | ipv6_ra_priority = "high" 23 | multicast_dns = true 24 | name = "cluster" 25 | purpose = "corporate" 26 | site = "default" 27 | subnet = "192.168.1.0/24" 28 | vlan_id = 5 29 | wan_dns = [] 30 | } 31 | -------------------------------------------------------------------------------- /terraform/unifi/vlan_defaults.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_network" "default" { 2 | dhcp_start = "192.168.0.6" 3 | dhcp_stop = "192.168.0.254" 4 | dhcpd_boot_enabled = false 5 | domain_name = "localdomain" 6 | ipv6_ra_enable = true 7 | multicast_dns = false 8 | subnet = "192.168.0.0/24" 9 | vlan_id = 0 10 | dhcp_dns = local.nextdns_servers 11 | dhcp_enabled = true 12 | dhcp_relay_enabled = false 13 | dhcp_v6_dns = [] 14 | dhcp_v6_dns_auto = true 15 | dhcp_v6_enabled = false 16 | dhcp_v6_lease = 86400 17 | dhcp_v6_start = "::2" 18 | dhcp_v6_stop = "::7d1" 19 | igmp_snooping = false 20 | ipv6_pd_start = "::2" 21 | ipv6_pd_stop = "::7d1" 22 | ipv6_ra_preferred_lifetime = 14400 23 | ipv6_ra_valid_lifetime = 0 24 | ipv6_ra_priority = "high" 25 | name = "Default" 26 | purpose = "corporate" 27 | site = "default" 28 | wan_dns = [] 29 | } 30 | 31 | resource "unifi_network" "default_wan1" { 32 | dhcp_dns = local.nextdns_servers 33 | dhcpd_boot_enabled = false 34 | ipv6_ra_enable = false 35 | multicast_dns = false 36 | name = "Default (WAN1)" 37 | purpose = "wan" 38 | vlan_id = 0 39 | wan_dns = local.nextdns_servers 40 | network_group = "" 41 | wan_networkgroup = "WAN" 42 | wan_type = "dhcp" 43 | wan_type_v6 = "disabled" 44 | dhcp_v6_dns_auto = false 45 | dhcp_v6_lease = 0 46 | ipv6_ra_preferred_lifetime = 0 47 | ipv6_ra_valid_lifetime = 0 48 | lifecycle { 49 | ignore_changes = [ 50 | ipv6_interface_type, 51 | ] 52 | } 53 | } 54 | 55 | resource "unifi_network" "default_wan2" { 56 | dhcp_dns = local.nextdns_servers 57 | name = "Backup (WAN2)" 58 | purpose = "wan" 59 | vlan_id = 0 60 | wan_dns = local.nextdns_servers 61 | network_group = "" 62 | wan_networkgroup = "WAN2" 63 | wan_type = "dhcp" 64 | wan_type_v6 = "disabled" 65 | dhcp_v6_dns_auto = false 66 | dhcp_v6_lease = 0 67 | ipv6_ra_preferred_lifetime = 0 68 | ipv6_ra_valid_lifetime = 0 69 | lifecycle { 70 | ignore_changes = [ 71 | ipv6_interface_type, 72 | ] 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /terraform/unifi/vlan_iot.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_network" "iot" { 2 | dhcp_dns = local.nextdns_servers 3 | 4 | purpose = "corporate" 5 | dhcp_start = "192.168.4.100" 6 | dhcp_stop = "192.168.4.254" 7 | dhcpd_boot_enabled = false 8 | multicast_dns = false 9 | name = "IoT" 10 | site = "default" 11 | subnet = "192.168.4.0/24" 12 | vlan_id = 4 13 | dhcp_enabled = true 14 | dhcp_v6_dns_auto = true 15 | dhcp_v6_lease = 86400 16 | dhcp_v6_start = "::2" 17 | dhcp_v6_stop = "::7d1" 18 | ipv6_pd_start = "::2" 19 | ipv6_pd_stop = "::7d1" 20 | ipv6_ra_enable = true 21 | ipv6_ra_preferred_lifetime = 14400 22 | ipv6_ra_priority = "high" 23 | ipv6_ra_valid_lifetime = 0 24 | 25 | } 26 | -------------------------------------------------------------------------------- /terraform/unifi/vlan_nas.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_network" "nas" { 2 | dhcp_dns = local.nextdns_servers 3 | dhcp_enabled = true 4 | dhcp_relay_enabled = false 5 | dhcp_start = "192.168.3.100" 6 | dhcp_stop = "192.168.3.254" 7 | dhcp_v6_dns = [] 8 | dhcp_v6_dns_auto = true 9 | dhcp_v6_enabled = false 10 | dhcp_v6_lease = 86400 11 | dhcp_v6_start = "::2" 12 | dhcp_v6_stop = "::7d1" 13 | dhcpd_boot_enabled = false 14 | igmp_snooping = false 15 | ipv6_pd_start = "::2" 16 | ipv6_pd_stop = "::7d1" 17 | ipv6_ra_enable = true 18 | ipv6_ra_preferred_lifetime = 14400 19 | ipv6_ra_valid_lifetime = 0 20 | ipv6_ra_priority = "high" 21 | multicast_dns = false 22 | name = "NAS" 23 | purpose = "corporate" 24 | site = "default" 25 | subnet = "192.168.3.0/24" 26 | vlan_id = 3 27 | wan_dns = [] 28 | } 29 | -------------------------------------------------------------------------------- /terraform/unifi/vlan_printer.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_network" "printer" { 2 | dhcp_dns = local.nextdns_servers 3 | dhcp_enabled = true 4 | dhcp_relay_enabled = false 5 | dhcp_start = "192.168.6.100" 6 | dhcp_stop = "192.168.6.254" 7 | dhcp_v6_dns = [] 8 | dhcp_v6_dns_auto = true 9 | dhcp_v6_enabled = false 10 | dhcp_v6_lease = 86400 11 | dhcp_v6_start = "::2" 12 | dhcp_v6_stop = "::7d1" 13 | dhcpd_boot_enabled = false 14 | igmp_snooping = false 15 | ipv6_pd_start = "::2" 16 | ipv6_pd_stop = "::7d1" 17 | ipv6_ra_enable = true 18 | ipv6_ra_preferred_lifetime = 14400 19 | ipv6_ra_valid_lifetime = 0 20 | ipv6_ra_priority = "high" 21 | multicast_dns = false 22 | name = "printer" 23 | purpose = "corporate" 24 | site = "default" 25 | subnet = "192.168.6.0/24" 26 | vlan_id = 7 27 | wan_dns = [] 28 | 29 | } 30 | -------------------------------------------------------------------------------- /terraform/unifi/vlan_wifi.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_network" "wifi" { 2 | dhcp_dns = local.nextdns_servers 3 | dhcp_enabled = true 4 | dhcp_relay_enabled = false 5 | dhcp_v6_dns = [] 6 | dhcp_v6_dns_auto = true 7 | dhcp_v6_enabled = false 8 | dhcp_v6_lease = 86400 9 | dhcpd_boot_enabled = false 10 | dhcpd_boot_filename = "netboot.xyz.kpxe" 11 | dhcpd_boot_server = "192.168.3.2" 12 | igmp_snooping = false 13 | dhcp_v6_start = "::2" 14 | dhcp_v6_stop = "::7d1" 15 | ipv6_pd_start = "::2" 16 | ipv6_pd_stop = "::7d1" 17 | ipv6_ra_enable = true 18 | ipv6_ra_preferred_lifetime = 14400 19 | ipv6_ra_valid_lifetime = 0 20 | ipv6_ra_priority = "high" 21 | multicast_dns = true 22 | purpose = "corporate" 23 | site = "default" 24 | wan_dns = [] 25 | dhcp_start = "192.168.2.6" 26 | dhcp_stop = "192.168.2.254" 27 | name = "WIFI" 28 | subnet = "192.168.2.0/24" 29 | vlan_id = 2 30 | } 31 | -------------------------------------------------------------------------------- /terraform/unifi/vlan_your_mom.tf: -------------------------------------------------------------------------------- 1 | resource "unifi_network" "your_mom" { 2 | dhcp_dns = local.nextdns_servers 3 | dhcp_enabled = true 4 | dhcp_relay_enabled = false 5 | dhcp_start = "192.168.5.10" 6 | dhcp_stop = "192.168.5.32" 7 | dhcp_v6_dns = [] 8 | dhcp_v6_dns_auto = false 9 | dhcp_v6_enabled = false 10 | dhcp_v6_lease = 0 11 | dhcpd_boot_enabled = true 12 | dhcpd_boot_filename = "netboot.xyz.kpxe" 13 | dhcpd_boot_server = "192.168.3.2" 14 | igmp_snooping = false 15 | ipv6_pd_start = "::2" 16 | ipv6_pd_stop = "::7d1" 17 | ipv6_ra_enable = true 18 | ipv6_ra_preferred_lifetime = 0 19 | ipv6_ra_valid_lifetime = 0 20 | multicast_dns = true 21 | name = "yourmom" 22 | purpose = "corporate" 23 | site = "default" 24 | subnet = "192.168.5.0/26" # 192.168.5.1 -> 192.168.5.62 25 | # 192.168.5.0/26 192.168.5.1 -> 192.168.5.62 26 | # 192.168.5.64/26 192.168.5.65 -> 192.168.5.126 27 | # 192.168.5.128/26 192.168.5.129 -> 192.168.5.190 28 | # 192.168.5.193/26 192.168.5.193 -> 192.168.5.254 29 | vlan_id = 6 30 | wan_dns = [] 31 | } 32 | -------------------------------------------------------------------------------- /terraform/unifi/wlan.tf: -------------------------------------------------------------------------------- 1 | data "unifi_ap_group" "default" { 2 | } 3 | 4 | data "unifi_ap_group" "garage" { 5 | name = "Garage" 6 | } 7 | 8 | data "unifi_ap_group" "misc" { 9 | name = "misc" 10 | } 11 | 12 | data "unifi_user_group" "default" { 13 | } 14 | 15 | 16 | resource "unifi_wlan" "bill_wi_the_science_fi" { 17 | name = "Bill Wi The Science Fi" 18 | security = "wpapsk" 19 | no2ghz_oui = false 20 | wlan_band = "5g" 21 | multicast_enhance = true 22 | 23 | 24 | network_id = unifi_network.wifi.id 25 | ap_group_ids = [data.unifi_ap_group.default.id] 26 | user_group_id = data.unifi_user_group.default.id 27 | 28 | lifecycle { 29 | ignore_changes = [ 30 | passphrase 31 | ] 32 | } 33 | } 34 | 35 | resource "unifi_wlan" "fbi_van" { 36 | name = "FBI Van" 37 | security = "wpapsk" 38 | no2ghz_oui = false 39 | wlan_band = "both" 40 | 41 | network_id = unifi_network.your_mom.id 42 | ap_group_ids = [data.unifi_ap_group.default.id] 43 | user_group_id = data.unifi_user_group.default.id 44 | 45 | lifecycle { 46 | ignore_changes = [ 47 | passphrase 48 | ] 49 | } 50 | } 51 | 52 | resource "unifi_wlan" "it_burns_when_ip" { 53 | name = "It Burns When IP" 54 | security = "wpapsk" 55 | no2ghz_oui = false 56 | wlan_band = "both" 57 | 58 | network_id = unifi_network.iot.id 59 | ap_group_ids = [data.unifi_ap_group.default.id] 60 | user_group_id = data.unifi_user_group.default.id 61 | 62 | lifecycle { 63 | ignore_changes = [ 64 | passphrase 65 | ] 66 | } 67 | } 68 | 69 | resource "unifi_wlan" "the_lan_before_time" { 70 | name = "The LAN Before Time" 71 | security = "wpapsk" 72 | no2ghz_oui = false 73 | wlan_band = "both" 74 | 75 | network_id = unifi_network.wifi.id 76 | ap_group_ids = [ 77 | data.unifi_ap_group.default.id, 78 | ] 79 | user_group_id = data.unifi_user_group.default.id 80 | 81 | lifecycle { 82 | ignore_changes = [ 83 | passphrase 84 | ] 85 | } 86 | } 87 | --------------------------------------------------------------------------------