├── .gitattributes
├── .github
└── workflows
│ └── infracost.yml
├── .gitignore
├── .pre-commit-config.yaml
├── .salt-lint
├── .vscode
└── extensions.json
├── CHANGELOG.md
├── CNAME
├── LICENSE
├── README.md
├── TROUBLESHOOTING.md
├── UPGRADING.md
├── docs
├── _config.yml
├── accessing.md
├── cert-manager.md
├── concourse.md
├── diagram
│ ├── cluster_architecture_simple.png
│ ├── cluster_dns_architecture.png
│ ├── high_level_architecture.png
│ ├── kubernetes-cluster.png
│ ├── kubernetes-cluster.svg
│ ├── network_mesh_topology.png
│ └── service_layout.png
├── features.md
├── httpbin.md
├── index.md
├── keycloak-gatekeeper.md
├── keycloak.md
├── kubeless.md
├── mailhog.md
├── manifest.md
├── master.md
├── media
│ ├── argo.png
│ ├── azure.png
│ ├── bluprint.png
│ ├── calico.png
│ ├── canal.png
│ ├── ceph.png
│ ├── cilium.png
│ ├── cloudflare.png
│ ├── cni.png
│ ├── cockroach_labs.png
│ ├── concourse.png
│ ├── containerd.png
│ ├── contour.png
│ ├── coredns.png
│ ├── crio.png
│ ├── docker.png
│ ├── edgefs.png
│ ├── elasticsearch.png
│ ├── envoy.png
│ ├── export.png
│ ├── falco.png
│ ├── firewall.png
│ ├── fission.png
│ ├── flannel.png
│ ├── fluentd.png
│ ├── grafana.png
│ ├── haproxy.png
│ ├── harbor.png
│ ├── hetzner.png
│ ├── httpbin.png
│ ├── istio.png
│ ├── jenkins.png
│ ├── keycloak.png
│ ├── kibana.png
│ ├── knative.png
│ ├── kubeless.png
│ ├── kubernetes.png
│ ├── kubernetes_dashboard.png
│ ├── longhorn.png
│ ├── mailhog.png
│ ├── minio.png
│ ├── nats.png
│ ├── network_topology.png
│ ├── nginx.png
│ ├── nuclio.png
│ ├── octarinesec.png
│ ├── openebs.png
│ ├── openfaas.png
│ ├── ovh.png
│ ├── portworx.png
│ ├── prometheus.png
│ ├── proxyinjector.png
│ ├── rkt.png
│ ├── rook.png
│ ├── saltstack.png
│ ├── saltstack_server_topology.png
│ ├── saltstack_topology.png
│ ├── scaleway.png
│ ├── spinnaker.png
│ ├── tekton.png
│ ├── terraform.png
│ ├── tinyproxy.png
│ ├── traefik.png
│ ├── velero.png
│ ├── weaveworks.png
│ ├── wireguard.png
│ └── yugabytedb.png
├── minio-operator.md
├── node.md
├── passwords.md
├── prerequisits.md
├── prometheus_query.md
├── proxy.md
└── vistio.md
├── hack
├── README.md
├── kubeconfig.sh
├── libraries
│ └── custom-logger.sh
├── provision.sh
├── release.sh
└── terminate.sh
├── srv
├── pillar
│ ├── .gitignore
│ ├── cluster_config.sls.example
│ └── top.sls
└── salt
│ ├── _orchestrate
│ ├── 01-common.sls
│ ├── 02-load-balancer.sls
│ ├── 03-etcd.sls
│ ├── 04-control-plane_init.sls
│ ├── 05-control-plane_join.sls
│ ├── 06-node_join.sls
│ ├── 07-edge_join.sls
│ ├── 08-node-firewall.sls
│ ├── 09-kubernetes-apps.sls
│ └── init.sls
│ ├── _states
│ └── cloudflare.py
│ ├── common
│ ├── init.sls
│ ├── install.sls
│ └── repo.sls
│ ├── envoy
│ ├── config.sls
│ ├── defaults.yaml
│ ├── files
│ │ └── envoy.service
│ ├── init.sls
│ ├── install.sls
│ ├── map.jinja
│ ├── repo.sls
│ ├── scripts
│ │ └── envoy_repo_key.sh
│ ├── service.sls
│ └── templates
│ │ └── envoy.yaml.j2
│ ├── kubernetes
│ ├── addons
│ │ ├── default-cluster-role-binding
│ │ │ ├── README.md
│ │ │ ├── config.sls
│ │ │ ├── files
│ │ │ │ ├── kube-apiserver-crb.yaml
│ │ │ │ └── kubelet-crb.yaml
│ │ │ ├── init.sls
│ │ │ └── install.sls
│ │ ├── descheduler
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── rbac.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── templates
│ │ │ │ ├── configmap.yaml.j2
│ │ │ │ ├── cronjob.yaml.j2
│ │ │ │ └── job.yaml.j2
│ │ │ └── test.sls
│ │ ├── dns-horizontal-autoscaler
│ │ │ ├── config.sls
│ │ │ ├── files
│ │ │ │ ├── dha-deployment.yaml
│ │ │ │ └── dha-rbac.yaml
│ │ │ ├── init.sls
│ │ │ └── install.sls
│ │ ├── httpbin
│ │ │ ├── README.md
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── service.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── templates
│ │ │ │ ├── deployment.yaml.j2
│ │ │ │ └── ingress.yaml.j2
│ │ │ └── test.sls
│ │ ├── init.sls
│ │ ├── kube-scan
│ │ │ ├── README.md
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── namespace.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── templates
│ │ │ │ ├── deployment.yaml.j2
│ │ │ │ └── ingress.yaml.j2
│ │ │ └── test.sls
│ │ ├── node-problem-detector
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── node-problem-detector-config.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── templates
│ │ │ │ └── node-problem-detector.yaml.j2
│ │ │ └── test.sls
│ │ ├── open-policy-agent
│ │ │ ├── certs.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── required-labels-template.yaml
│ │ │ │ └── unique-ingress-host-template.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── policy.sls
│ │ │ └── templates
│ │ │ │ ├── blacklist-images-template.yaml.j2
│ │ │ │ └── gatekeeper.yaml.j2
│ │ ├── rook-cockroachdb
│ │ │ ├── README.md
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── prometheus-k8s-rbac.yaml
│ │ │ │ └── service-monitor.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── prometheus.sls
│ │ │ └── templates
│ │ │ │ ├── cluster.yaml.j2
│ │ │ │ └── operator.yaml.j2
│ │ ├── rook-yugabytedb
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── prometheus-k8s-rbac.yaml
│ │ │ │ └── service-monitor.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── prometheus.sls
│ │ │ └── templates
│ │ │ │ ├── cluster.yaml.j2
│ │ │ │ └── operator.yaml.j2
│ │ ├── tekton
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── operator_v1alpha1_dashboard_cr.yaml
│ │ │ │ ├── operator_v1alpha1_pipeline_cr.yaml
│ │ │ │ └── operator_v1alpha1_trigger_cr.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── osprep.sls
│ │ │ ├── repo.sls
│ │ │ ├── templates
│ │ │ │ ├── ingress.yaml.j2
│ │ │ │ └── release.yaml.j2
│ │ │ └── test.sls
│ │ └── weave-scope
│ │ │ ├── README.md
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── namespace.yaml
│ │ │ ├── psp.yaml
│ │ │ ├── sa.yaml
│ │ │ └── svc.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── templates
│ │ │ ├── deploy.yaml.j2
│ │ │ ├── ds-containerd.yaml.j2
│ │ │ ├── ds-crio.yaml.j2
│ │ │ ├── ds.yaml.j2
│ │ │ ├── ingress.yaml.j2
│ │ │ └── probe-deploy.yaml.j2
│ │ │ └── test.sls
│ ├── charts
│ │ ├── argo
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── oauth
│ │ │ │ └── keycloak
│ │ │ │ │ ├── init.sls
│ │ │ │ │ └── templates
│ │ │ │ │ └── workflows-secret.yaml.j2
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ ├── cd-values.yaml.j2
│ │ │ │ ├── events-values.yaml.j2
│ │ │ │ ├── workflows-minio-values.yaml.j2
│ │ │ │ └── workflows-values.yaml.j2
│ │ ├── concourse
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ ├── minio-values.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ ├── coredns
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ └── values.yaml.j2
│ │ ├── falco
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ └── values.yaml.j2
│ │ ├── fission
│ │ │ ├── charts.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── prometheus-k8s-rbac.yaml
│ │ │ │ └── service-monitor.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── prometheus.sls
│ │ │ ├── templates
│ │ │ │ └── values.yaml.j2
│ │ │ └── test.sls
│ │ ├── gitea
│ │ │ ├── charts.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── prometheus-k8s-rbac.yaml
│ │ │ │ └── service-monitor.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── minio.sls
│ │ │ ├── namespace.sls
│ │ │ ├── oauth.sls
│ │ │ ├── oauth
│ │ │ │ └── keycloak
│ │ │ │ │ ├── files
│ │ │ │ │ ├── admins-group.json
│ │ │ │ │ ├── client-scopes.json
│ │ │ │ │ ├── groups-protocolmapper.json
│ │ │ │ │ ├── protocolmapper.json
│ │ │ │ │ ├── userid-protocolmapper.json
│ │ │ │ │ ├── username-protocolmapper.json
│ │ │ │ │ └── users-group.json
│ │ │ │ │ ├── scripts
│ │ │ │ │ ├── kc-clientsecret-gitea.sh
│ │ │ │ │ └── kc-config-gitea.sh
│ │ │ │ │ └── templates
│ │ │ │ │ ├── client.json.j2
│ │ │ │ │ └── realms.json.j2
│ │ │ ├── prometheus.sls
│ │ │ ├── templates
│ │ │ │ ├── ingress.yaml.j2
│ │ │ │ ├── minioinstance.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ │ └── test.sls
│ │ ├── harbor
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── finalize.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ ├── auth-oidc.json.j2
│ │ │ │ ├── certificates.yaml.j2
│ │ │ │ ├── minio-values.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ ├── init.sls
│ │ ├── jenkins
│ │ │ ├── charts.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ └── rbac.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── templates
│ │ │ │ ├── ingress.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ │ └── test.sls
│ │ ├── keycloak-gatekeeper
│ │ │ ├── README.md
│ │ │ ├── charts.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── alertmanager-protocolmapper.json
│ │ │ │ ├── client-scopes.json
│ │ │ │ ├── groups-protocolmapper.json
│ │ │ │ ├── keycloak-kubernetes-admins-group.json
│ │ │ │ ├── keycloak-kubernetes-users-group.json
│ │ │ │ ├── kubernetes-dashboard-protocolmapper.json
│ │ │ │ ├── prometheus-protocolmapper.json
│ │ │ │ ├── rook-ceph-protocolmapper.json
│ │ │ │ └── weave-scope-protocolmapper.json
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── kube-prometheus.sls
│ │ │ ├── kubernetes-dashboard.sls
│ │ │ ├── map.jinja
│ │ │ ├── rook-ceph.sls
│ │ │ ├── scripts
│ │ │ │ └── kcgk-injector.sh
│ │ │ ├── templates
│ │ │ │ ├── alertmanager.json.j2
│ │ │ │ ├── keycloak-kubernetes-rbac.yaml.j2
│ │ │ │ ├── kubernetes-dashboard.json.j2
│ │ │ │ ├── prometheus.json.j2
│ │ │ │ ├── realms.json.j2
│ │ │ │ ├── rook-ceph.json.j2
│ │ │ │ └── weave-scope.json.j2
│ │ │ ├── test.sls
│ │ │ └── weave-scope.sls
│ │ ├── keycloak
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── realms
│ │ │ │ └── default
│ │ │ │ │ ├── headers.j2
│ │ │ │ │ ├── init.sls
│ │ │ │ │ ├── map.jinja
│ │ │ │ │ ├── realm.sls
│ │ │ │ │ └── templates
│ │ │ │ │ ├── realm-export.json
│ │ │ │ │ └── realms.json.j2
│ │ │ ├── repos.sls
│ │ │ ├── scripts
│ │ │ │ └── kc-clientsecret.sh
│ │ │ └── templates
│ │ │ │ └── values.yaml.j2
│ │ ├── kube-prometheus
│ │ │ ├── config.sls
│ │ │ ├── dashboards.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── cert-manager-grafana-dashboard-configmap.yaml
│ │ │ │ ├── cockroachdb-grafana-dashboard-configmap.yaml
│ │ │ │ ├── falco-grafana-dashboard-configmap.yaml
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── nats-grafana-dashboard-configmap.yaml
│ │ │ │ ├── nginx-ingress-grafana-dashboard-configmap.yaml
│ │ │ │ ├── openfaas-grafana-dashboard-configmap.yaml
│ │ │ │ ├── portworx-grafana-dashboard-configmap.yaml
│ │ │ │ ├── rook-ceph-grafana-dashboard-configmap.yaml
│ │ │ │ └── traefik-grafana-dashboard-configmap.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ └── values.yaml.j2
│ │ ├── mailhog
│ │ │ ├── README.md
│ │ │ ├── charts.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── namespace.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ └── templates
│ │ │ │ ├── ingress.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ ├── metrics-server
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ └── values.yaml.j2
│ │ ├── nats-operator
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── namespace.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ ├── nats-cluster.yaml.j2
│ │ │ │ ├── stan-cluster.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ ├── nuclio
│ │ │ ├── charts.sls
│ │ │ ├── clean.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── prometheus-k8s-rbac.yaml
│ │ │ │ └── service-monitor.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── prometheus.sls
│ │ │ ├── registry.sls
│ │ │ └── templates
│ │ │ │ ├── dockerhhub-secret.yaml.j2
│ │ │ │ ├── harbor-secret.yaml.j2
│ │ │ │ ├── ingress.yaml.j2
│ │ │ │ ├── quay-secret.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ ├── openfaas
│ │ │ ├── charts.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── gateway-metrics-service.yaml
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── prometheus-k8s-rbac.yaml
│ │ │ │ └── servicemonitor.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── oauth.sls
│ │ │ ├── oauth
│ │ │ │ └── keycloak
│ │ │ │ │ ├── files
│ │ │ │ │ ├── admins-group.json
│ │ │ │ │ ├── client-scopes.json
│ │ │ │ │ ├── groups-protocolmapper.json
│ │ │ │ │ ├── protocolmapper.json
│ │ │ │ │ ├── userid-protocolmapper.json
│ │ │ │ │ ├── username-protocolmapper.json
│ │ │ │ │ └── users-group.json
│ │ │ │ │ ├── scripts
│ │ │ │ │ ├── kc-clientsecret-openfaas.sh
│ │ │ │ │ └── kc-config-openfaas.sh
│ │ │ │ │ └── templates
│ │ │ │ │ ├── client.json.j2
│ │ │ │ │ └── realms.json.j2
│ │ │ ├── prometheus.sls
│ │ │ ├── templates
│ │ │ │ ├── ingress.yaml.j2
│ │ │ │ ├── nats-connector-deployment.yaml.j2
│ │ │ │ ├── secrets.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ │ └── test.sls
│ │ ├── ory
│ │ │ ├── README.md
│ │ │ ├── charts.sls
│ │ │ ├── clean.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── prometheus-k8s-rbac.yaml
│ │ │ │ └── service-monitor.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── kratos-selfservice-ui-node.sls
│ │ │ ├── kratos.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── oathkeeper.sls
│ │ │ ├── prometheus.sls
│ │ │ ├── templates
│ │ │ │ ├── hydra-cockroachdb-values.yaml.j2
│ │ │ │ ├── hydra-cockroachdb.yaml.j2
│ │ │ │ ├── hydra-ingress.yaml.j2
│ │ │ │ ├── hydra-secrets.yaml.j2
│ │ │ │ ├── hydra-values.yaml.j2
│ │ │ │ ├── idp-values.yaml.j2
│ │ │ │ ├── kratos-cockroachdb-values.yaml.j2
│ │ │ │ ├── kratos-cockroachdb.yaml.j2
│ │ │ │ ├── kratos-ingress.yaml.j2
│ │ │ │ ├── kratos-mailslurper.yaml.j2
│ │ │ │ ├── kratos-secrets.yaml.j2
│ │ │ │ ├── kratos-selfservice-ui-node-values.yaml.j2
│ │ │ │ ├── kratos-values.yaml.j2
│ │ │ │ ├── oathkeeper-ingress.yaml.j2
│ │ │ │ └── oathkeeper-values.yaml.j2
│ │ │ └── test.sls
│ │ ├── proxyinjector
│ │ │ ├── README.md
│ │ │ ├── charts.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── namespace.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── oauth.sls
│ │ │ ├── oauth
│ │ │ │ └── keycloak
│ │ │ │ │ ├── files
│ │ │ │ │ ├── admins-group.json
│ │ │ │ │ ├── client-scopes.json
│ │ │ │ │ ├── groups-protocolmapper.json
│ │ │ │ │ ├── protocolmapper.json
│ │ │ │ │ └── users-group.json
│ │ │ │ │ ├── scripts
│ │ │ │ │ ├── kc-clientsecret-demo.sh
│ │ │ │ │ └── kc-config-demo.sh
│ │ │ │ │ └── templates
│ │ │ │ │ ├── client.json.j2
│ │ │ │ │ └── realms.json.j2
│ │ │ ├── patch
│ │ │ │ └── deployment.yaml
│ │ │ ├── templates
│ │ │ │ ├── ingress.yaml.j2
│ │ │ │ ├── kubehttpbin-values.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ │ └── test.sls
│ │ ├── spinnaker
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ ├── minio-values.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ ├── velero
│ │ │ ├── README.md
│ │ │ ├── charts.sls
│ │ │ ├── clean.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── prometheus-k8s-rbac.yaml
│ │ │ │ └── service-monitor.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── minio.sls
│ │ │ ├── namespace.sls
│ │ │ ├── prometheus.sls
│ │ │ ├── templates
│ │ │ │ ├── ingress.yaml.j2
│ │ │ │ ├── minioinstance.yaml.j2
│ │ │ │ ├── secrets.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ │ └── test.sls
│ │ └── vistio
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ ├── values-mesh-only.yaml
│ │ │ └── values-with-ingress.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── templates
│ │ │ └── ingress.yaml.j2
│ │ │ └── test.sls
│ ├── cni
│ │ ├── 99-loopback.conf
│ │ ├── calico
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ └── values.yaml.j2
│ │ ├── cilium
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ └── values.yaml.j2
│ │ ├── flannel
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ └── templates
│ │ │ │ └── flannel.yaml.j2
│ │ ├── init.sls
│ │ └── weave
│ │ │ ├── README.md
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ └── templates
│ │ │ └── weave.yaml.j2
│ ├── cri
│ │ ├── containerd
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── containerd.conf
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repo.sls
│ │ │ └── templates
│ │ │ │ └── config.toml.j2
│ │ ├── crictl.yaml
│ │ ├── crio
│ │ │ ├── README.md
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── crio-shutdown.service
│ │ │ │ ├── crio.conf
│ │ │ │ ├── crio.service
│ │ │ │ ├── policy.json
│ │ │ │ └── seccomp.json
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── pkg.sls
│ │ │ └── repo.sls
│ │ ├── docker
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── daemon.json
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ └── repo.sls
│ │ └── init.sls
│ ├── csi
│ │ ├── init.sls
│ │ ├── longhorn
│ │ │ ├── README.md
│ │ │ ├── clean.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── namespace.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── node-label.sls
│ │ │ ├── storageclass.sls
│ │ │ └── templates
│ │ │ │ ├── ingress.yaml.j2
│ │ │ │ └── longhorn.yaml.j2
│ │ ├── minio
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ └── values.yaml.j2
│ │ ├── openebs
│ │ │ ├── README.md
│ │ │ ├── blockdevice.sls
│ │ │ ├── clean.sls
│ │ │ ├── config.sls
│ │ │ ├── cstor-storageclass.sls
│ │ │ ├── cstor.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── driver.sls
│ │ │ ├── files
│ │ │ │ └── namespace.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── jiva.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ └── templates
│ │ │ │ ├── csi-operator.yaml.j2
│ │ │ │ ├── cstor-pool-claim.yaml.j2
│ │ │ │ ├── cstor-storage-class.yaml.j2
│ │ │ │ ├── jiva-csi.yaml.j2
│ │ │ │ ├── ndm-blockdevice.yaml.j2
│ │ │ │ ├── openebs-operator.yaml.j2
│ │ │ │ └── operator.yaml.j2
│ │ ├── portworx
│ │ │ ├── README.md
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── external-etcd-cert.sls
│ │ │ ├── files
│ │ │ │ ├── namespace.yaml
│ │ │ │ ├── prometheus-k8s-rbac.yaml
│ │ │ │ └── service-monitor.yaml
│ │ │ ├── firewall.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── namespace.sls
│ │ │ ├── prometheus.sls
│ │ │ ├── storageclass.sls
│ │ │ └── templates
│ │ │ │ └── storage-class.yaml.j2
│ │ └── rook-ceph
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ └── namespace.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ ├── cluster-values.yaml.j2
│ │ │ └── values.yaml.j2
│ ├── helm
│ │ ├── README.md
│ │ ├── config.sls
│ │ ├── defaults.yaml
│ │ ├── init.sls
│ │ ├── install.sls
│ │ ├── map.jinja
│ │ └── repo.sls
│ ├── ingress
│ │ ├── ambassador
│ │ │ ├── init.sls
│ │ │ └── templates
│ │ │ │ └── ambassador-rbac.yaml.j2
│ │ ├── cert-manager
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ ├── prometheus-k8s-rbac.yaml
│ │ │ │ └── servicemonitor.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ ├── cloudflare.yaml.j2
│ │ │ │ ├── clusterissuer.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ ├── contour
│ │ │ ├── certificate.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ │ ├── certificate.yaml.j2
│ │ │ │ └── values.yaml.j2
│ │ ├── init.sls
│ │ ├── istio
│ │ │ ├── README.md
│ │ │ ├── certificate.sls
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── demo.sls
│ │ │ ├── files
│ │ │ │ └── namespace.yaml
│ │ │ ├── ingress.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── monitoring.sls
│ │ │ ├── namespace.sls
│ │ │ └── templates
│ │ │ │ ├── bookinfo-ingress.yaml.j2
│ │ │ │ ├── certificate.yaml.j2
│ │ │ │ ├── ingress.yaml.j2
│ │ │ │ └── istio-config.yaml.j2
│ │ └── metallb
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── map.jinja
│ │ │ ├── repos.sls
│ │ │ └── templates
│ │ │ ├── ipaddresspool.yaml.j2
│ │ │ └── values.yaml.j2
│ ├── map.jinja
│ └── role
│ │ ├── edge
│ │ ├── files
│ │ │ └── acme.json
│ │ ├── init.sls
│ │ └── kubeadm
│ │ │ ├── defaults.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── kubeadm-join.sls
│ │ │ ├── map.jinja
│ │ │ ├── osprep.sls
│ │ │ ├── repo.sls
│ │ │ └── templates
│ │ │ ├── kubeadm-node.v1beta2.yaml.j2
│ │ │ └── kubeadm-node.v1beta3.yaml.j2
│ │ ├── etcd
│ │ ├── ca.sls
│ │ ├── certs.sls
│ │ ├── clean.sls
│ │ ├── config.sls
│ │ ├── defaults.yaml
│ │ ├── etcdadm
│ │ │ ├── config.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── etcdadm-init.sls
│ │ │ ├── etcdadm-join.sls
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ └── map.jinja
│ │ ├── files
│ │ │ └── etcd.service
│ │ ├── init.sls
│ │ ├── install.sls
│ │ ├── map.jinja
│ │ ├── templates
│ │ │ └── etcd.env.j2
│ │ └── test.sls
│ │ ├── master
│ │ ├── init.sls
│ │ └── kubeadm
│ │ │ ├── README.md
│ │ │ ├── audit-policy.sls
│ │ │ ├── defaults.yaml
│ │ │ ├── encryption.sls
│ │ │ ├── external-etcd-cert.sls
│ │ │ ├── files
│ │ │ └── kube-apiserver-audit-policy.yaml
│ │ │ ├── init.sls
│ │ │ ├── install.sls
│ │ │ ├── kubeadm-init.sls
│ │ │ ├── kubeadm-join.sls
│ │ │ ├── map.jinja
│ │ │ ├── osprep.sls
│ │ │ ├── refresh-token.sls
│ │ │ ├── repo.sls
│ │ │ └── templates
│ │ │ ├── encryption-config.yaml.j2
│ │ │ ├── kubeadm-config.v1beta2.yaml.j2
│ │ │ ├── kubeadm-config.v1beta3.yaml.j2
│ │ │ ├── kubeadm-controlplane.v1beta2.yaml.j2
│ │ │ └── kubeadm-controlplane.v1beta3.yaml.j2
│ │ └── node
│ │ ├── init.sls
│ │ └── kubeadm
│ │ ├── defaults.yaml
│ │ ├── init.sls
│ │ ├── install.sls
│ │ ├── kubeadm-join.sls
│ │ ├── map.jinja
│ │ ├── osprep.sls
│ │ ├── repo.sls
│ │ └── templates
│ │ ├── kubeadm-node.v1beta2.yaml.j2
│ │ └── kubeadm-node.v1beta3.yaml.j2
│ └── loopback-iscsi
│ ├── clean.sls
│ ├── config.sls
│ ├── defaults.yaml
│ ├── init.sls
│ ├── install.sls
│ ├── label.sls
│ ├── map.jinja
│ ├── mount.sls
│ ├── osprep.sls
│ └── templates
│ ├── loopback-iscsi.conf.j2
│ └── tgt.service.j2
└── terraform
├── .gitignore
├── .terraform.lock.hcl
├── backend.tf.exemple
├── dns
├── cloudflare
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
└── ovh
│ ├── main.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
├── encryption
└── cfssl
│ ├── README.md
│ ├── main.tf
│ ├── scripts
│ └── cfssl.sh
│ ├── templates
│ ├── admin-csr.json
│ ├── ca-config.json
│ ├── ca-csr.json
│ ├── dashboard-csr.json
│ ├── etcd-ca-config.json
│ ├── etcd-ca-csr.json
│ ├── etcd-csr.json
│ ├── fanneld-csr.json
│ ├── kube-apiserver-csr.json
│ ├── kube-proxy-csr.json
│ ├── master-csr.json
│ └── node-csr.json
│ ├── variables.tf
│ └── versions.tf
├── main.tf
├── management
├── salt-master
│ ├── README.md
│ ├── main.tf
│ ├── templates
│ │ └── master.conf
│ ├── variables.tf
│ └── versions.tf
└── salt-minion
│ ├── README.md
│ ├── main.tf
│ ├── templates
│ └── master.conf
│ ├── variables.tf
│ └── versions.tf
├── outputs.tf
├── provider
├── cloud-init
│ ├── edge_user-data.yaml
│ ├── etcd_user-data.yaml
│ ├── master_user-data.yaml
│ └── node_user-data.yaml
├── hcloud
│ ├── README.md
│ ├── edge.tf
│ ├── etcd.tf
│ ├── master.tf
│ ├── network.tf
│ ├── node.tf
│ ├── outputs.tf
│ ├── variables.tf
│ └── versions.tf
└── scaleway
│ ├── README.md
│ ├── edge.tf
│ ├── etcd.tf
│ ├── master.tf
│ ├── node.tf
│ ├── outputs.tf
│ ├── scaleway.tf
│ ├── variables.tf
│ └── versions.tf
├── routing
├── README.md
├── main.tf
├── scripts
│ └── wireguard_config.sh
├── templates
│ └── default-route-vpn.service
├── variables.tf
└── versions.tf
├── security
├── proxy-exceptions
│ ├── README.md
│ ├── main.tf
│ ├── variables.tf
│ └── versions.tf
├── ufw
│ ├── edge
│ │ ├── README.md
│ │ ├── files
│ │ │ └── edge.ufw
│ │ ├── main.tf
│ │ ├── scripts
│ │ │ └── ufw.sh
│ │ └── variables.tf
│ ├── etcd
│ │ ├── README.md
│ │ ├── files
│ │ │ └── etcd.ufw
│ │ ├── main.tf
│ │ ├── scripts
│ │ │ └── ufw.sh
│ │ ├── variables.tf
│ │ └── versions.tf
│ ├── master
│ │ ├── README.md
│ │ ├── files
│ │ │ └── master.ufw
│ │ ├── main.tf
│ │ ├── scripts
│ │ │ └── ufw.sh
│ │ ├── variables.tf
│ │ └── versions.tf
│ └── node
│ │ ├── README.md
│ │ ├── files
│ │ └── node.ufw
│ │ ├── main.tf
│ │ ├── scripts
│ │ └── ufw.sh
│ │ ├── variables.tf
│ │ └── versions.tf
└── wireguard
│ ├── README.md
│ ├── main.tf
│ ├── outputs.tf
│ ├── scripts
│ ├── gen_keys.sh
│ └── install-kernel-headers.sh
│ ├── templates
│ ├── interface.conf
│ ├── overlay-route.service
│ └── peer.conf
│ ├── variables.tf
│ └── versions.tf
├── terraform.tfvars.example
└── variables.tf
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.sh text eol=lf
2 | *.ps1 text eol=crlf
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | host.map
2 | certs/*.csr
3 | certs/*.crt
4 | certs/*.pem
5 | *.ppm
6 | *.mp4
7 | .vscode/*
8 | /.gtm/
9 | gource.cfg
10 | *.mp4
11 | fetch_gravatar.pl
12 | bendy.png
13 | gravatar.sh
14 | !.vscode/extensions.json
15 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # For use with pre-commit.
4 | # See usage instructions at http://pre-commit.com
5 |
6 | - id: salt-lint
7 | name: Salt-lint
8 | description: This hook runs salt-lint.
9 | entry: salt-lint
10 | language: python3
11 | files: \.(sls)$
--------------------------------------------------------------------------------
/.salt-lint:
--------------------------------------------------------------------------------
1 | ---
2 | exclude_paths:
3 | - docs/
4 | - terraform/
5 | skip_list:
6 | - 207
7 | - 208
8 | tags:
9 | - formatting
10 | verbosity: 1
11 | rules:
12 | formatting:
13 | ignore: |
14 | ignore/this/directory/*.sls
15 | *.jinja
16 | *.j2
17 | 210:
18 | ignore: 'exclude_this_file.sls'
19 | severity: True
--------------------------------------------------------------------------------
/.vscode/extensions.json:
--------------------------------------------------------------------------------
1 | {
2 | "recommendations": [
3 | "warpnet.salt-lint",
4 | "warpnet.saltstack-extension-pack",
5 | "ms-python.python",
6 | "donjayamanne.python-extension-pack",
7 | "samuelcolvin.jinjahtml",
8 | "artymaury.template-finder"
9 | ]
10 | }
--------------------------------------------------------------------------------
/CNAME:
--------------------------------------------------------------------------------
1 | saltstack-kubernetes.swiftsure.io
--------------------------------------------------------------------------------
/docs/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-slate
--------------------------------------------------------------------------------
/docs/diagram/cluster_architecture_simple.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/diagram/cluster_architecture_simple.png
--------------------------------------------------------------------------------
/docs/diagram/cluster_dns_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/diagram/cluster_dns_architecture.png
--------------------------------------------------------------------------------
/docs/diagram/high_level_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/diagram/high_level_architecture.png
--------------------------------------------------------------------------------
/docs/diagram/kubernetes-cluster.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/diagram/kubernetes-cluster.png
--------------------------------------------------------------------------------
/docs/diagram/network_mesh_topology.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/diagram/network_mesh_topology.png
--------------------------------------------------------------------------------
/docs/diagram/service_layout.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/diagram/service_layout.png
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Welcome to saltstack-kubernetes
--------------------------------------------------------------------------------
/docs/manifest.md:
--------------------------------------------------------------------------------
1 | # Introduction
2 |
3 | The following pro
--------------------------------------------------------------------------------
/docs/media/argo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/argo.png
--------------------------------------------------------------------------------
/docs/media/azure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/azure.png
--------------------------------------------------------------------------------
/docs/media/bluprint.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/bluprint.png
--------------------------------------------------------------------------------
/docs/media/calico.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/calico.png
--------------------------------------------------------------------------------
/docs/media/canal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/canal.png
--------------------------------------------------------------------------------
/docs/media/ceph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/ceph.png
--------------------------------------------------------------------------------
/docs/media/cilium.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/cilium.png
--------------------------------------------------------------------------------
/docs/media/cloudflare.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/cloudflare.png
--------------------------------------------------------------------------------
/docs/media/cni.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/cni.png
--------------------------------------------------------------------------------
/docs/media/cockroach_labs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/cockroach_labs.png
--------------------------------------------------------------------------------
/docs/media/concourse.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/concourse.png
--------------------------------------------------------------------------------
/docs/media/containerd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/containerd.png
--------------------------------------------------------------------------------
/docs/media/contour.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/contour.png
--------------------------------------------------------------------------------
/docs/media/coredns.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/coredns.png
--------------------------------------------------------------------------------
/docs/media/crio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/crio.png
--------------------------------------------------------------------------------
/docs/media/docker.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/docker.png
--------------------------------------------------------------------------------
/docs/media/edgefs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/edgefs.png
--------------------------------------------------------------------------------
/docs/media/elasticsearch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/elasticsearch.png
--------------------------------------------------------------------------------
/docs/media/envoy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/envoy.png
--------------------------------------------------------------------------------
/docs/media/export.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/export.png
--------------------------------------------------------------------------------
/docs/media/falco.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/falco.png
--------------------------------------------------------------------------------
/docs/media/firewall.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/firewall.png
--------------------------------------------------------------------------------
/docs/media/fission.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/fission.png
--------------------------------------------------------------------------------
/docs/media/flannel.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/flannel.png
--------------------------------------------------------------------------------
/docs/media/fluentd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/fluentd.png
--------------------------------------------------------------------------------
/docs/media/grafana.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/grafana.png
--------------------------------------------------------------------------------
/docs/media/haproxy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/haproxy.png
--------------------------------------------------------------------------------
/docs/media/harbor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/harbor.png
--------------------------------------------------------------------------------
/docs/media/hetzner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/hetzner.png
--------------------------------------------------------------------------------
/docs/media/httpbin.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/httpbin.png
--------------------------------------------------------------------------------
/docs/media/istio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/istio.png
--------------------------------------------------------------------------------
/docs/media/jenkins.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/jenkins.png
--------------------------------------------------------------------------------
/docs/media/keycloak.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/keycloak.png
--------------------------------------------------------------------------------
/docs/media/kibana.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/kibana.png
--------------------------------------------------------------------------------
/docs/media/knative.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/knative.png
--------------------------------------------------------------------------------
/docs/media/kubeless.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/kubeless.png
--------------------------------------------------------------------------------
/docs/media/kubernetes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/kubernetes.png
--------------------------------------------------------------------------------
/docs/media/kubernetes_dashboard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/kubernetes_dashboard.png
--------------------------------------------------------------------------------
/docs/media/longhorn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/longhorn.png
--------------------------------------------------------------------------------
/docs/media/mailhog.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/mailhog.png
--------------------------------------------------------------------------------
/docs/media/minio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/minio.png
--------------------------------------------------------------------------------
/docs/media/nats.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/nats.png
--------------------------------------------------------------------------------
/docs/media/network_topology.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/network_topology.png
--------------------------------------------------------------------------------
/docs/media/nginx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/nginx.png
--------------------------------------------------------------------------------
/docs/media/nuclio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/nuclio.png
--------------------------------------------------------------------------------
/docs/media/octarinesec.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/octarinesec.png
--------------------------------------------------------------------------------
/docs/media/openebs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/openebs.png
--------------------------------------------------------------------------------
/docs/media/openfaas.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/openfaas.png
--------------------------------------------------------------------------------
/docs/media/ovh.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/ovh.png
--------------------------------------------------------------------------------
/docs/media/portworx.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/portworx.png
--------------------------------------------------------------------------------
/docs/media/prometheus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/prometheus.png
--------------------------------------------------------------------------------
/docs/media/proxyinjector.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/proxyinjector.png
--------------------------------------------------------------------------------
/docs/media/rkt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/rkt.png
--------------------------------------------------------------------------------
/docs/media/rook.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/rook.png
--------------------------------------------------------------------------------
/docs/media/saltstack.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/saltstack.png
--------------------------------------------------------------------------------
/docs/media/saltstack_server_topology.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/saltstack_server_topology.png
--------------------------------------------------------------------------------
/docs/media/saltstack_topology.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/saltstack_topology.png
--------------------------------------------------------------------------------
/docs/media/scaleway.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/scaleway.png
--------------------------------------------------------------------------------
/docs/media/spinnaker.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/spinnaker.png
--------------------------------------------------------------------------------
/docs/media/tekton.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/tekton.png
--------------------------------------------------------------------------------
/docs/media/terraform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/terraform.png
--------------------------------------------------------------------------------
/docs/media/tinyproxy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/tinyproxy.png
--------------------------------------------------------------------------------
/docs/media/traefik.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/traefik.png
--------------------------------------------------------------------------------
/docs/media/velero.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/velero.png
--------------------------------------------------------------------------------
/docs/media/weaveworks.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/weaveworks.png
--------------------------------------------------------------------------------
/docs/media/wireguard.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/wireguard.png
--------------------------------------------------------------------------------
/docs/media/yugabytedb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/docs/media/yugabytedb.png
--------------------------------------------------------------------------------
/docs/prometheus_query.md:
--------------------------------------------------------------------------------
1 | # Prometheus Query
2 |
3 | sum by (container_name) (rate(container_cpu_usage_seconds_total{namespace!="kube-system" or container_name!=""}[1m]))
--------------------------------------------------------------------------------
/hack/kubeconfig.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -euo pipefail
4 |
5 | source hack/libraries/custom-logger.sh
6 |
7 | PUBLIC_DOMAIN=${PUBLIC_DOMAIN:-$(cat srv/pillar/*.sls | grep 'public-domain' | cut -d ' ' -f 2)}
8 |
9 | scp master01:/etc/kubernetes/admin.conf ~/.kube/config
10 |
11 | public-domain: testruction.io
--------------------------------------------------------------------------------
/hack/provision.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -euo pipefail
4 |
5 | source hack/libraries/custom-logger.sh
6 |
7 | pushd terraform/
8 | terraform init
9 | terraform plan
10 | terraform apply -auto-approve
11 | popd
12 | eok "Provisionned infrastructure."
13 |
--------------------------------------------------------------------------------
/hack/release.sh:
--------------------------------------------------------------------------------
1 | #/bin/bash
2 | set -euo pipefail
3 |
4 | source hack/libraries/custom-logger.sh
5 |
6 | PUBLIC_DOMAIN=${PUBLIC_DOMAIN:-$(cat srv/pillar/*.sls | grep 'public-domain' | cut -d ' ' -f 2)}
7 |
8 | rsync -viva ./srv/salt edge:/srv/
9 | rsync -viva ./srv/pillar edge:/srv/
10 |
11 | eok "Published salt pillar and states configuration to Salt-Master."
12 |
--------------------------------------------------------------------------------
/hack/terminate.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -euo pipefail
4 |
5 | source hack/libraries/custom-logger.sh
6 |
7 | pushd terraform/
8 | terraform destroy -auto-approve
9 | popd
10 | eok "Provisionned infrastructure."
11 |
--------------------------------------------------------------------------------
/srv/pillar/.gitignore:
--------------------------------------------------------------------------------
1 | cluster_config.sls
2 | cloudflare.sls
3 | cluster_config.sls.backup
4 |
--------------------------------------------------------------------------------
/srv/pillar/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | {% if "etcd" in grains.get('role', []) or "master" or "node" in grains.get('role', []) %}
4 | - cluster_config
5 | {% endif %}
6 |
--------------------------------------------------------------------------------
/srv/salt/_orchestrate/01-common.sls:
--------------------------------------------------------------------------------
1 | common:
2 | salt.state:
3 | - tgt: 'G@role:edge or G@role:etcd or G@role:master or G@role:node'
4 | - tgt_type: compound
5 | - sls: common
6 | - queue: True
7 |
8 | helm:
9 | salt.state:
10 | - tgt: 'G@role:master'
11 | - tgt_type: compound
12 | - sls: kubernetes.helm
13 | - queue: True
14 |
15 | loopback-iscsi:
16 | salt.state:
17 | - tgt: 'G@role:node'
18 | - tgt_type: compound
19 | - sls: loopback-iscsi
20 | - queue: True
21 | - require:
22 | - salt: common
23 |
--------------------------------------------------------------------------------
/srv/salt/_orchestrate/02-load-balancer.sls:
--------------------------------------------------------------------------------
1 | envoy:
2 | salt.state:
3 | - tgt: 'role:edge'
4 | - tgt_type: grain
5 | - sls: envoy
6 | - queue: True
7 |
--------------------------------------------------------------------------------
/srv/salt/_orchestrate/08-node-firewall.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import storage with context -%}
2 |
3 | {%- if storage.get('portworx', {'enabled': False}).enabled %}
4 | compute_allow_portworx:
5 | salt.state:
6 | - tgt: 'G@role:node'
7 | - tgt_type: compound
8 | - sls: kubernetes.csi.portworx.firewall
9 | {%- endif %}
--------------------------------------------------------------------------------
/srv/salt/_orchestrate/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .01-common
3 | - .02-load-balancer
4 | - .03-etcd
5 | - .04-control-plane_init
6 | - .05-control-plane_join
7 | - .06-node_join
8 | - .07-edge_join
9 | - .08-node-firewall
10 | - .09-kubernetes-apps
11 |
--------------------------------------------------------------------------------
/srv/salt/common/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .repo
3 | - .install
--------------------------------------------------------------------------------
/srv/salt/common/repo.sls:
--------------------------------------------------------------------------------
1 | azure-cli:
2 | pkgrepo.managed:
3 | - name: deb https://packages.microsoft.com/repos/azure-cli/ jammy main
4 | - dist: jammy
5 | - file: /etc/apt/sources.list.d/azure-cli.list
6 | - gpgcheck: 1
7 | - key_url: https://packages.microsoft.com/keys/microsoft.asc
--------------------------------------------------------------------------------
/srv/salt/envoy/defaults.yaml:
--------------------------------------------------------------------------------
1 | envoy:
2 | enabled: true
3 | version: latest
4 | distribution: jammy
5 | kubernetes_api_port: 6443
6 | overwrite: True
7 | service: envoy
8 | admin_port: 58080
9 | gpg_key: https://deb.dl.getenvoy.io/public/gpg.8115BA8E629CC074.key
10 | gpg_key_hash: 26f75d4df6c08a9e26593910ca0948325e742b05b5fe70369fa2cfdc3cb57ce4
--------------------------------------------------------------------------------
/srv/salt/envoy/files/envoy.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Envoy Proxy
3 | Requires=network-online.target
4 | After=network-online.target
5 |
6 | [Service]
7 | ExecStart=/usr/bin/envoy -c /etc/envoy/envoy.yaml
8 | Restart=always
9 | RestartSec=5
10 | KillMode=mixed
11 | SyslogIdentifier=envoy
12 | LimitNOFILE=640000
13 |
14 | [Install]
15 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/srv/salt/envoy/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .repo
3 | - .install
4 | - .config
5 | - .service
--------------------------------------------------------------------------------
/srv/salt/envoy/install.sls:
--------------------------------------------------------------------------------
1 | {%- from tpldir ~ "/map.jinja" import envoy with context -%}
2 |
3 |
4 | getenvoy-envoy:
5 | require:
6 | - pkg: envoy-package-repository
7 | pkg.installed:
8 | - version: {{ envoy.version | safe }}
9 |
10 |
--------------------------------------------------------------------------------
/srv/salt/envoy/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set envoy = salt['pillar.get']('envoy', default=defaults['envoy'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/envoy/scripts/envoy_repo_key.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | curl -sL 'https://deb.dl.getenvoy.io/public/gpg.8115BA8E629CC074.key' \
4 | | sudo gpg --dearmor -o /usr/share/keyrings/getenvoy-keyring.gpg
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/default-cluster-role-binding/README.md:
--------------------------------------------------------------------------------
1 | *
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/default-cluster-role-binding/files/kubelet-crb.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | annotations:
5 | rbac.authorization.kubernetes.io/autoupdate: "true"
6 | labels:
7 | kubernetes.io/bootstrapping: rbac-defaults
8 | name: system:node
9 | roleRef:
10 | apiGroup: rbac.authorization.k8s.io
11 | kind: ClusterRole
12 | name: system:node
13 | subjects:
14 | - apiGroup: rbac.authorization.k8s.io
15 | kind: Group
16 | name: system:nodes
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/default-cluster-role-binding/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.addons.default-cluster-role-binding.config
3 | - kubernetes.addons.default-cluster-role-binding.install
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/descheduler/defaults.yaml:
--------------------------------------------------------------------------------
1 | descheduler:
2 | cron_schedule: "*/2 * * * *"
3 | image: eu.gcr.io/k8s-artifacts-prod/descheduler/descheduler:v0.18.0
4 | remove_duplicates: True
5 | remove_pods_violating_inter_pod_anti_affinity: True
6 | low_node_utilization:
7 | enabled: True
8 | cpu_threshold: 20
9 | cpu_target: 50
10 | mem_threshold: 20
11 | mem_target: 50
12 | pod_threshold: 20
13 | pod_target: 30
14 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/descheduler/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.addons.descheduler.config
3 | - kubernetes.addons.descheduler.install
4 | - kubernetes.addons.descheduler.test
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/descheduler/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set descheduler = salt['pillar.get']('kubernetes:common:addons:descheduler', default=defaults['descheduler'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/descheduler/test.sls:
--------------------------------------------------------------------------------
1 | descheduler-job:
2 | cmd.run:
3 | - watch:
4 | - cmd: descheduler-rbac
5 | - cmd: descheduler-configmap
6 | - file: /srv/kubernetes/manifests/descheduler/job.yaml
7 | - runas: root
8 | - name: |
9 | kubectl apply -f /srv/kubernetes/manifests/descheduler/job.yaml
10 | - onlyif: http --verify false https://localhost:6443/livez?verbose
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/dns-horizontal-autoscaler/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/dha-rbac.yaml:
2 | file.managed:
3 | - source: salt://{{ tpldir }}/files/dha-rbac.yaml
4 | - user: root
5 | - template: jinja
6 | - group: root
7 | - mode: "0644"
8 | - context:
9 | tpldir: {{ tpldir }}
10 |
11 | /srv/kubernetes/manifests/dha-deployment.yaml:
12 | file.managed:
13 | - source: salt://{{ tpldir }}/files/dha-deployment.yaml
14 | - user: root
15 | - template: jinja
16 | - group: root
17 | - mode: "0644"
18 | - context:
19 | tpldir: {{ tpldir }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/dns-horizontal-autoscaler/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.addons.dns-horizontal-autoscaler.config
3 | - kubernetes.addons.dns-horizontal-autoscaler.install
4 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/httpbin/README.md:
--------------------------------------------------------------------------------
1 | # HTTPBIN
2 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/httpbin/defaults.yaml:
--------------------------------------------------------------------------------
1 | httpbin:
2 | image: docker.io/kennethreitz/httpbin
3 | ingress_host: httpbin
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/httpbin/ingress.sls:
--------------------------------------------------------------------------------
1 | httpbin-ingress:
2 | file.managed:
3 | - name: /srv/kubernetes/manifests/httpbin/ingress.yaml
4 | - source: salt://{{ tpldir }}/templates/ingress.yaml.j2
5 | - require:
6 | - file: /srv/kubernetes/manifests/httpbin
7 | - user: root
8 | - template: jinja
9 | - group: root
10 | - mode: "0644"
11 | - context:
12 | tpldir: {{ tpldir }}
13 | cmd.run:
14 | - watch:
15 | - file: /srv/kubernetes/manifests/httpbin/ingress.yaml
16 | - runas: root
17 | - name: kubectl apply -f /srv/kubernetes/manifests/httpbin/ingress.yaml
18 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/httpbin/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.addons.httpbin.config
3 | - kubernetes.addons.httpbin.install
4 | - kubernetes.addons.httpbin.ingress
5 | - kubernetes.addons.httpbin.test
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/httpbin/install.sls:
--------------------------------------------------------------------------------
1 | httpbin:
2 | cmd.run:
3 | - watch:
4 | - file: /srv/kubernetes/manifests/httpbin/deployment.yaml
5 | - file: /srv/kubernetes/manifests/httpbin/service.yaml
6 | - name: |
7 | kubectl apply -f /srv/kubernetes/manifests/httpbin/deployment.yaml
8 | kubectl apply -f /srv/kubernetes/manifests/httpbin/service.yaml
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/httpbin/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set httpbin = salt['pillar.get']('kubernetes:common:addons:httpbin', default=defaults['httpbin'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/httpbin/test.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import httpbin with context %}
6 | {%- set public_domain = pillar['public-domain'] -%}
7 |
8 | query-httpbin:
9 | http.wait_for_successful_query:
10 | - watch:
11 | - cmd: httpbin
12 | - cmd: httpbin-ingress
13 | - name: https://{{ httpbin.ingress_host }}.{{ public_domain }}
14 | - wait_for: 120
15 | - request_interval: 5
16 | - status: 200
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/kube-scan/README.md:
--------------------------------------------------------------------------------
1 | # Kube-Scan
2 |
3 | Kubernetes workload security scanner
4 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/kube-scan/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/kube-scan:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/kube-scan/deployment.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/kube-scan
12 | - source: salt://{{ tpldir }}/templates/deployment.yaml.j2
13 | - user: root
14 | - group: root
15 | - mode: "0644"
16 | - template: jinja
17 | - context:
18 | tpldir: {{ tpldir }}
19 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/kube-scan/defaults.yaml:
--------------------------------------------------------------------------------
1 | kube_scan:
2 | scanner_image: docker.io/octarinesec/kubescan-scanner:20.5
3 | ui_image: docker.io/octarinesec/kubescan-scanner-ui:20.5
4 | ingress_host: kube-scan
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/kube-scan/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: kube-scan
5 | ---
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/kube-scan/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .namespace
4 | - .install
5 | - .ingress
6 | - .test
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/kube-scan/install.sls:
--------------------------------------------------------------------------------
1 | kube-scan:
2 | cmd.run:
3 | - watch:
4 | - file: /srv/kubernetes/manifests/kube-scan/deployment.yaml
5 | - name: |
6 | kubectl apply -f /srv/kubernetes/manifests/kube-scan/deployment.yaml
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/kube-scan/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set kube_scan = salt['pillar.get']('kubernetes:common:addons:kube_scan', default=defaults['kube_scan'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/kube-scan/test.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import kube_scan with context %}
6 | {%- set public_domain = pillar['public-domain'] -%}
7 |
8 | query-kube-scan:
9 | http.wait_for_successful_query:
10 | - watch:
11 | - cmd: kube-scan
12 | - cmd: kube-scan-ingress
13 | - name: https://{{ kube_scan.ingress_host }}.{{ public_domain }}
14 | - wait_for: 120
15 | - request_interval: 5
16 | - status: 200
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/node-problem-detector/defaults.yaml:
--------------------------------------------------------------------------------
1 | node_problem_detector:
2 | image: k8s.gcr.io/node-problem-detector/node-problem-detector:v0.8.7
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/node-problem-detector/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.addons.node-problem-detector.config
3 | - kubernetes.addons.node-problem-detector.install
4 | - kubernetes.addons.node-problem-detector.test
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/node-problem-detector/install.sls:
--------------------------------------------------------------------------------
1 | node-problem-detector:
2 | cmd.run:
3 | - watch:
4 | - file: /srv/kubernetes/manifests/node-problem-detector/node-problem-detector-config.yaml
5 | - file: /srv/kubernetes/manifests/node-problem-detector/node-problem-detector.yaml
6 | - name: |
7 | kubectl apply -f /srv/kubernetes/manifests/node-problem-detector/
8 | - onlyif: http --verify false https://localhost:6443/livez?verbose
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/node-problem-detector/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set node_problem_detector = salt['pillar.get']('kubernetes:common:addons:node_problem_detector', default=defaults['node_problem_detector'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/node-problem-detector/test.sls:
--------------------------------------------------------------------------------
1 | node-problem-detector-wait:
2 | cmd.run:
3 | - require:
4 | - cmd: node-problem-detector
5 | - runas: root
6 | - name: |
7 | kubectl -n kube-system wait pod --for=condition=Ready -l app=node-problem-detector
8 | - timeout: 180
9 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/open-policy-agent/defaults.yaml:
--------------------------------------------------------------------------------
1 | opa:
2 | gatekeeper_image: docker.io/openpolicyagent/gatekeeper:v3.3.0
3 | trusted_registries:
4 | - docker.io
5 | - quay.io
6 | - k8s.grc.io
7 | - docker.elastic.co
8 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/open-policy-agent/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | admission.gatekeeper.sh/ignore: no-self-managing
6 | control-plane: controller-manager
7 | gatekeeper.sh/system: "yes"
8 | name: gatekeeper-system
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/open-policy-agent/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 |
3 | include:
4 | - .config
5 | - .namespace
6 | - .install
7 | - .policy
8 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/open-policy-agent/install.sls:
--------------------------------------------------------------------------------
1 | open-policy-agent:
2 | cmd.run:
3 | - watch:
4 | - file: /srv/kubernetes/manifests/open-policy-agent/gatekeeper.yaml
5 | - cmd: open-policy-agent-namespace
6 | - runas: root
7 | - name: |
8 | kubectl apply -f /srv/kubernetes/manifests/open-policy-agent/gatekeeper.yaml
9 | - onlyif: http --verify false https://localhost:6443/livez?verbose
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/open-policy-agent/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set opa = salt['pillar.get']('kubernetes:common:addons:opa', default=defaults['opa'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/rook-cockroachdb/README.md:
--------------------------------------------------------------------------------
1 | # Rook-Cockroachdb
2 |
3 | ## References
4 |
5 | *
6 | *
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/rook-cockroachdb/defaults.yaml:
--------------------------------------------------------------------------------
1 | rook_cockroachdb:
2 | version: 1.5.8
3 | node_count: 3
4 | capacity: 4Gi
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/rook-cockroachdb/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: rook-cockroachdb-system
6 | ---
7 | apiVersion: v1
8 | kind: Namespace
9 | metadata:
10 | name: rook-cockroachdb
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/rook-cockroachdb/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 |
3 | include:
4 | - .config
5 | - .namespace
6 | {%- if common.addons.get('kube_prometheus', {'enabled': False}).enabled %}
7 | - .prometheus
8 | {%- endif %}
9 | - .install
10 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/rook-cockroachdb/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set rook_cockroachdb = salt['pillar.get']('kubernetes:common:addons:rook_cockroachdb', default=defaults['rook_cockroachdb'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/rook-yugabytedb/defaults.yaml:
--------------------------------------------------------------------------------
1 | rook_yugabytedb:
2 | version: 1.5.8
3 | node_count: 3
4 | capacity: 4Gi
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/rook-yugabytedb/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: rook-yugabytedb-system
6 | ---
7 | apiVersion: v1
8 | kind: Namespace
9 | metadata:
10 | name: rook-yugabytedb
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/rook-yugabytedb/files/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | kind: ServiceMonitor
2 | apiVersion: monitoring.coreos.com/v1
3 | metadata:
4 | name: rook-yugabytedb
5 | namespace: rook-yugabytedb
6 | spec:
7 | endpoints:
8 | - interval: 30s
9 | port: http
10 | path: /metrcis
11 | selector:
12 | matchLabels:
13 | app.kubernetes.io/instance: velero
14 | app.kubernetes.io/name: velero
15 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/rook-yugabytedb/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 |
3 | include:
4 | - .config
5 | - .namespace
6 | {%- if common.addons.get('kube_prometheus', {'enabled': False}).enabled %}
7 | - .prometheus
8 | {%- endif %}
9 | - .install
10 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/rook-yugabytedb/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set rook_yugabytedb = salt['pillar.get']('kubernetes:common:addons:rook_yugabytedb', default=defaults['rook_yugabytedb'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/tekton/defaults.yaml:
--------------------------------------------------------------------------------
1 | tekton:
2 | version: 0.23.0-1
3 | operator_image: gcr.io/tekton-releases/github.com/tektoncd/operator/cmd/kubernetes/operator:0.23.0-1
4 | proxy_image: gcr.io/tekton-releases/github.com/tektoncd/operator/cmd/kubernetes/webhook:0.23.0-1
5 | ingress_host: tekton
6 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/tekton/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: tekton-operator
6 | ---
7 | apiVersion: v1
8 | kind: Namespace
9 | metadata:
10 | name: tekton-pipelines
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/tekton/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 |
3 | include:
4 | - .repo
5 | - .osprep
6 | - .config
7 | - .namespace
8 | # {%- if common.addons.get('kube_prometheus', {'enabled': False}).enabled %}
9 | # - .prometheus
10 | # {%- endif %}
11 | - .install
12 | - .ingress
13 | - .test
14 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/tekton/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set tekton = salt['pillar.get']('kubernetes:common:addons:tekton', default=defaults['tekton'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/tekton/osprep.sls:
--------------------------------------------------------------------------------
1 | tektoncd-cli:
2 | pkg.latest
3 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/tekton/repo.sls:
--------------------------------------------------------------------------------
1 | tekton-repo:
2 | pkgrepo.managed:
3 | - name: deb http://ppa.launchpad.net/tektoncd/cli/ubuntu jammy main
4 | - dist: jammy
5 | - file: /etc/apt/sources.list.d/tektoncd-ubuntu-cli.list
6 | - gpgcheck: 1
7 | - keyserver: keyserver.ubuntu.com
8 | - keyid: a40e52296e4cf9f90dd1e3bb3efe0e0a2f2f60aa
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/tekton/test.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import tekton with context %}
6 | {%- set public_domain = pillar['public-domain'] -%}
7 |
8 | query-tekton:
9 | http.wait_for_successful_query:
10 | - watch:
11 | - cmd: tekton
12 | - cmd: tekton-ingress
13 | - name: https://{{ tekton.ingress_host }}.{{ public_domain }}
14 | - wait_for: 120
15 | - request_interval: 5
16 | - status: 200
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/weave-scope/README.md:
--------------------------------------------------------------------------------
1 | # Weave scope
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/weave-scope/defaults.yaml:
--------------------------------------------------------------------------------
1 | weave_scope:
2 | image: docker.io/weaveworks/scope:1.13.2
3 | ingress_host: scope
4 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/weave-scope/files/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | # borrowed from https://cloud.weave.works/k8s/scope.yaml?k8s-version=1.9.3
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: weave-scope
6 | labels:
7 | name: weave-scope
8 | namespace: weave
9 | roleRef:
10 | kind: ClusterRole
11 | name: weave-scope
12 | apiGroup: rbac.authorization.k8s.io
13 | subjects:
14 | - kind: ServiceAccount
15 | name: weave-scope
16 | namespace: weave
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/weave-scope/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | # borrowed from https://cloud.weave.works/k8s/scope.yaml?k8s-version=1.9.3
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: weave
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/weave-scope/files/psp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodSecurityPolicy
3 | metadata:
4 | name: weave-scope
5 | spec:
6 | privileged: true
7 | hostPID: true
8 | hostNetwork: true
9 | allowedCapabilities:
10 | - 'NET_ADMIN'
11 | fsGroup:
12 | rule: RunAsAny
13 | runAsUser:
14 | rule: RunAsAny
15 | seLinux:
16 | rule: RunAsAny
17 | supplementalGroups:
18 | rule: RunAsAny
19 | volumes:
20 | - secret
21 | - hostPath
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/weave-scope/files/sa.yaml:
--------------------------------------------------------------------------------
1 | # borrowed from https://cloud.weave.works/k8s/scope.yaml?k8s-version=1.9.3
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: weave-scope
6 | labels:
7 | name: weave-scope
8 | namespace: weave
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/weave-scope/files/svc.yaml:
--------------------------------------------------------------------------------
1 | # borrowed from https://cloud.weave.works/k8s/scope.yaml?k8s-version=1.9.3
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: weave-scope-app
6 | labels:
7 | name: weave-scope-app
8 | app: weave-scope
9 | weave-cloud-component: scope
10 | weave-scope-component: app
11 | namespace: weave
12 | spec:
13 | ports:
14 | - name: app
15 | port: 80
16 | protocol: TCP
17 | targetPort: 4040
18 | selector:
19 | name: weave-scope-app
20 | app: weave-scope
21 | weave-cloud-component: scope
22 | weave-scope-component: app
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/weave-scope/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.addons.weave-scope.config
3 | - kubernetes.addons.weave-scope.namespace
4 | - kubernetes.addons.weave-scope.install
5 | - kubernetes.addons.weave-scope.ingress
6 | - kubernetes.addons.weave-scope.test
7 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/addons/weave-scope/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set weave_scope = salt['pillar.get']('kubernetes:common:addons:weave_scope', default=defaults['weave_scope'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/argo/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/argo/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set argo = salt['pillar.get']('kubernetes:charts:argo', default=defaults['argo'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/argo/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import argo with context %}
5 |
6 | argo-repos:
7 | helm.repo_managed:
8 | {%- if argo.enabled %}
9 | - present:
10 | - name: argo
11 | url: {{ argo.url }}
12 | {%- else %}
13 | - absent:
14 | - argo
15 | {%- endif %}
16 |
17 | {%- if argo.enabled %}
18 | argo-repos-update:
19 | helm.repo_updated:
20 | - name: argo
21 | {%- endif %}
22 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/concourse/config.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import concourse with context %}
5 |
6 | {% set file_state = 'absent' %}
7 | {% if concourse.enabled -%}
8 | {% set file_state = 'managed' -%}
9 | {% endif %}
10 |
11 | /srv/kubernetes/charts/concourse:
12 | file.directory:
13 | - user: root
14 | - group: root
15 | - dir_mode: "0750"
16 | - makedirs: True
17 |
18 | /srv/kubernetes/manifests/concourse:
19 | file.directory:
20 | - user: root
21 | - group: root
22 | - dir_mode: "0750"
23 | - makedirs: True
24 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/concourse/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/concourse/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set concourse = salt['pillar.get']('kubernetes:charts:concourse', default=defaults['concourse'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/concourse/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import concourse with context %}
5 |
6 | concourse-repos:
7 | helm.repo_managed:
8 | {%- if concourse.enabled %}
9 | - present:
10 | - name: concourse
11 | url: {{ concourse.url }}
12 | {%- else %}
13 | - absent:
14 | - concourse
15 | {%- endif %}
16 |
17 | {%- if concourse.enabled %}
18 | concourse-repos-update:
19 | helm.repo_updated:
20 | - name: concourse
21 | {%- endif %}
22 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/coredns/defaults.yaml:
--------------------------------------------------------------------------------
1 | coredns:
2 | enabled: true
3 | version: 1.10.1
4 | chart_version: 1.22.0
5 | url: https://coredns.github.io/helm/
6 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/coredns/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/coredns/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set coredns = salt['pillar.get']('kubernetes:charts:coredns', default=defaults['coredns'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/coredns/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import coredns with context %}
5 |
6 | coredns-repos:
7 | helm.repo_managed:
8 | {%- if coredns.enabled %}
9 | - present:
10 | - name: coredns
11 | url: {{ coredns.url }}
12 | {%- else %}
13 | - absent:
14 | - coredns
15 | {%- endif %}
16 |
17 | {%- if coredns.enabled %}
18 | coredns-repos-update:
19 | helm.repo_updated:
20 | - name: coredns
21 | {%- endif %}
22 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/coredns/templates/values.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import coredns with context %}
5 |
6 | image:
7 | tag: ${{ coredns.version }}
8 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/falco/config.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import falco with context %}
5 |
6 | {% set state = 'absent' %}
7 | {% if falco.enabled -%}
8 | {% set state = 'managed' -%}
9 | {% endif %}
10 |
11 | /srv/kubernetes/charts/falco:
12 | file.directory:
13 | - user: root
14 | - group: root
15 | - dir_mode: "0750"
16 | - makedirs: True
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/falco/defaults.yaml:
--------------------------------------------------------------------------------
1 | falco:
2 | enabled: false
3 | version: 0.34.1
4 | chart_version: 3.1.5
5 | url: https://falcosecurity.github.io/charts/
6 | pod_security_policy: false
7 | http_proxy: ""
8 | https_proxy: ""
9 | no_proxy: ""
10 | ebpf:
11 | enable: false
12 | audit_log:
13 | enable: false
14 | dynamic_backend_enable: false
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/falco/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/falco/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set falco = salt['pillar.get']('kubernetes:charts:falco', default=defaults['falco'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/falco/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import falco with context %}
5 |
6 | falcosecrutiy-repos:
7 | helm.repo_managed:
8 | {%- if falco.enabled %}
9 | - present:
10 | - name: falcosecurity
11 | url: {{ falco.url }}
12 | {%- else %}
13 | - absent:
14 | - falcosecurity
15 | {%- endif %}
16 |
17 | {%- if falco.enabled %}
18 | falcosecrutiy-repos-update:
19 | helm.repo_updated:
20 | - name: falcosecrutiy
21 | {%- endif %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/fission/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/fission:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/fission/values.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/fission
12 | - source: salt://{{ tpldir }}/templates/values.yaml.j2
13 | - template: jinja
14 | - user: root
15 | - group: root
16 | - mode: "0644"
17 | - context:
18 | tpldir: {{ tpldir }}
19 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/fission/defaults.yaml:
--------------------------------------------------------------------------------
1 | fission:
2 | version: 1.12.0
3 | client_version: 1.12.0
4 | workflows_version: 0.6.0
5 | fluentd_version: 1.0.4
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/fission/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: fission
6 | ---
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/fission/init.sls:
--------------------------------------------------------------------------------
1 | {%- set public_domain = pillar['public-domain'] -%}
2 | {%- from "kubernetes/map.jinja" import charts with context -%}
3 | {%- from "kubernetes/map.jinja" import common with context -%}
4 |
5 | include:
6 | - .config
7 | - .charts
8 | - .namespace
9 | - .install
10 | # - .ingress
11 | {%- if common.addons.get('kube_prometheus', {'enabled': False}).enabled %}
12 | - .prometheus
13 | {%- endif %}
14 | - .test
15 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/fission/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set fission = salt['pillar.get']('kubernetes:charts:fission', default=defaults['fission'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/fission/namespace.sls:
--------------------------------------------------------------------------------
1 | fission-namespace:
2 | file.managed:
3 | - require:
4 | - file: /srv/kubernetes/manifests/fission
5 | - name: /srv/kubernetes/manifests/fission/namespace.yaml
6 | - source: salt://{{ tpldir }}/files/namespace.yaml
7 | - user: root
8 | - group: root
9 | - mode: "0644"
10 | - context:
11 | tpldir: {{ tpldir }}
12 | cmd.run:
13 | - runas: root
14 | - watch:
15 | - file: /srv/kubernetes/manifests/fission/namespace.yaml
16 | - name: |
17 | kubectl apply -f /srv/kubernetes/manifests/fission/namespace.yaml
18 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/charts.sls:
--------------------------------------------------------------------------------
1 | gitea-remove-charts:
2 | file.absent:
3 | - name: /srv/kubernetes/manifests/gitea/gitea
4 |
5 | gitea-fetch-charts:
6 | cmd.run:
7 | - runas: root
8 | - require:
9 | - file: gitea-remove-charts
10 | - file: /srv/kubernetes/manifests/gitea
11 | - cwd: /srv/kubernetes/manifests/gitea
12 | - name: |
13 | helm repo add gitea https://dl.gitea.io/charts/
14 | helm fetch --untar gitea/gitea
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/gitea:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/gitea/values.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/gitea
12 | - source: salt://{{ tpldir }}/templates/values.yaml.j2
13 | - template: jinja
14 | - user: root
15 | - group: root
16 | - mode: "0644"
17 | - context:
18 | tpldir: {{ tpldir }}
19 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/defaults.yaml:
--------------------------------------------------------------------------------
1 | gitea:
2 | version: 1.13.2
3 | username: gitea
4 | password: V3ry1ns3cur3P4ssw0rd
5 | useremail: gitea@example.com
6 | ingress_host: gitea
7 | minio_image: docker.io/minio/minio:latest
8 | # oauth:
9 | # provider: keycloak
10 | # keycloak:
11 | # realm: default
12 | # s3:
13 | # accesskey: argo
14 | # secretkey: P4ssw0rd
15 | # bucket: argo-artifacts
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: gitea
6 | ---
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/files/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: gitea
5 | namespace: gitea
6 | spec:
7 | namespaceSelector:
8 | matchNames:
9 | - gitea
10 | selector:
11 | matchLabels:
12 | app: gitea
13 | endpoints:
14 | - port: prometheus
15 | path: /
16 | interval: 5s
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set gitea = salt['pillar.get']('kubernetes:charts:gitea', default=defaults['gitea'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/namespace.sls:
--------------------------------------------------------------------------------
1 | gitea-namespace:
2 | file.managed:
3 | - require:
4 | - file: /srv/kubernetes/manifests/gitea
5 | - name: /srv/kubernetes/manifests/gitea/namespace.yaml
6 | - source: salt://{{ tpldir }}/files/namespace.yaml
7 | - user: root
8 | - group: root
9 | - mode: "0644"
10 | - context:
11 | tpldir: {{ tpldir }}
12 | cmd.run:
13 | - runas: root
14 | - watch:
15 | - file: /srv/kubernetes/manifests/gitea/namespace.yaml
16 | - name: |
17 | kubectl apply -f /srv/kubernetes/manifests/gitea/namespace.yaml
18 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/oauth/keycloak/files/admins-group.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "gitea-admins",
3 | "attributes": {},
4 | "realmRoles": [],
5 | "clientRoles": {},
6 | "subGroups": []
7 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/oauth/keycloak/files/client-scopes.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "allowed-services",
3 | "description": "List of approved client ids",
4 | "protocol": "openid-connect",
5 | "attributes": {
6 | "include.in.token.scope": "true",
7 | "display.on.consent.screen": "true"
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/oauth/keycloak/files/groups-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "groups",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-group-membership-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "full.path": "true",
8 | "id.token.claim": "true",
9 | "access.token.claim": "true",
10 | "claim.name": "groups",
11 | "userinfo.token.claim": "true"
12 | }
13 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/oauth/keycloak/files/protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "audience-gitea",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-audience-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "included.client.audience": "gitea",
8 | "id.token.claim": "false",
9 | "access.token.claim": "true"
10 | }
11 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/oauth/keycloak/files/userid-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "userid",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-usermodel-property-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "userinfo.token.claim": "true",
8 | "user.attribute": "id",
9 | "id.token.claim": "true",
10 | "access.token.claim": "true",
11 | "claim.name": "userid",
12 | "jsonType.label": "String"
13 | }
14 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/oauth/keycloak/files/username-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "username",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-usermodel-property-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "userinfo.token.claim": "true",
8 | "user.attribute": "username",
9 | "id.token.claim": "true",
10 | "access.token.claim": "true",
11 | "claim.name": "username",
12 | "jsonType.label": "String"
13 | }
14 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/oauth/keycloak/files/users-group.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "gitea-users",
3 | "attributes": {},
4 | "realmRoles": [],
5 | "clientRoles": {},
6 | "subGroups": []
7 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/gitea/test.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import gitea with context %}
6 | {%- set public_domain = pillar['public-domain'] -%}
7 | {%- from "kubernetes/map.jinja" import charts with context -%}
8 |
9 | query-gitea:
10 | http.wait_for_successful_query:
11 | - name: "https://{{ gitea.ingress_host }}.{{ public_domain }}/"
12 | - wait_for: 180
13 | - request_interval: 5
14 | - status: 200
15 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/harbor/config.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import harbor with context %}
5 |
6 | {% set state = 'absent' %}
7 | {% if harbor.enabled -%}
8 | {% set state = 'managed' -%}
9 | {% endif %}
10 |
11 | /srv/kubernetes/charts/harbor:
12 | file.directory:
13 | - user: root
14 | - group: root
15 | - dir_mode: "0750"
16 | - makedirs: True
17 |
18 | /srv/kubernetes/manifests/harbor:
19 | file.directory:
20 | - user: root
21 | - group: root
22 | - dir_mode: "0750"
23 | - makedirs: True
24 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/harbor/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import charts with context -%}
2 |
3 | include:
4 | - .config
5 | - .repos
6 | - .install
7 | {%- if charts.get('keycloak', {'enabled': False}).enabled %}
8 | - .finalize
9 | {% endif %}
10 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/harbor/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set harbor = salt['pillar.get']('kubernetes:charts:harbor', default=defaults['harbor'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/harbor/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import harbor with context %}
5 |
6 | harbor-repos:
7 | helm.repo_managed:
8 | {%- if harbor.enabled %}
9 | - present:
10 | - name: harbor
11 | url: {{ harbor.url }}
12 | repo_update: True
13 | {%- else%}
14 | - absent:
15 | - harbor
16 | {%- endif %}
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/jenkins/charts.sls:
--------------------------------------------------------------------------------
1 | jenkins-remove-charts:
2 | file.absent:
3 | - name: /srv/kubernetes/manifests/jenkins/jenkins
4 |
5 | jenkins-fetch-charts:
6 | cmd.run:
7 | - runas: root
8 | - require:
9 | - file: jenkins-remove-charts
10 | - file: /srv/kubernetes/manifests/jenkins
11 | - cwd: /srv/kubernetes/manifests/jenkins
12 | - name: |
13 | helm repo add jenkinsci https://charts.jenkins.io
14 | helm fetch --untar jenkinsci/jenkins
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/jenkins/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/jenkins:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/jenkins/values.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/jenkins
12 | - source: salt://{{ tpldir }}/templates/values.yaml.j2
13 | - template: jinja
14 | - user: root
15 | - group: root
16 | - mode: "0644"
17 | - context:
18 | tpldir: {{ tpldir }}
19 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/jenkins/defaults.yaml:
--------------------------------------------------------------------------------
1 | jenkins:
2 | version: 11.0.2
3 | username: jenkins
4 | password: V3ry1ns3cur3P4ssw0rd
5 | ingress_host: jenkins
6 | plugins:
7 | - kubernetes:1.25.7
8 | - workflow-job:2.39
9 | - workflow-aggregator:2.6
10 | - credentials-binding:1.23
11 | - git:4.2.2
12 | - configuration-as-code:1.43
13 | - oic-auth:1.8
14 | - openid:2.4
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/jenkins/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: jenkins
6 | ---
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/jenkins/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .charts
4 | - .namespace
5 | - .install
6 | - .ingress
7 | - .test
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/jenkins/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set jenkins = salt['pillar.get']('kubernetes:charts:jenkins', default=defaults['jenkins'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/jenkins/namespace.sls:
--------------------------------------------------------------------------------
1 | jenkins-namespace:
2 | file.managed:
3 | - require:
4 | - file: /srv/kubernetes/manifests/jenkins
5 | - name: /srv/kubernetes/manifests/jenkins/namespace.yaml
6 | - source: salt://{{ tpldir }}/files/namespace.yaml
7 | - user: root
8 | - group: root
9 | - mode: "0644"
10 | - context:
11 | tpldir: {{ tpldir }}
12 | cmd.run:
13 | - runas: root
14 | - watch:
15 | - file: /srv/kubernetes/manifests/jenkins/namespace.yaml
16 | - name: |
17 | kubectl apply -f /srv/kubernetes/manifests/jenkins/namespace.yaml
18 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/jenkins/test.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import jenkins with context %}
6 | {%- set public_domain = pillar['public-domain'] -%}
7 | {%- from "kubernetes/map.jinja" import charts with context -%}
8 |
9 | query-jenkins:
10 | http.wait_for_successful_query:
11 | - name: "https://{{ jenkins.ingress_host }}.{{ public_domain }}/"
12 | - wait_for: 180
13 | - request_interval: 5
14 | - status: 200
15 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/README.md:
--------------------------------------------------------------------------------
1 | # Keycloak GateKeeper
2 |
3 | Insert Keycloak Gatekeeper authentication proxy to services that do not support OIDC authentication.
4 |
5 | * https://www.keycloak.org/docs-api/5.0/rest-api/index.html
6 | * https://httpie.org/doc
7 | * https://www.ctl.io/developers/blog/post/curl-vs-httpie-http-apis
8 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/charts.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import keycloak_gatekeeper with context %}
6 |
7 | helm-charts:
8 | git.latest:
9 | - name: https://github.com/fjudith/charts
10 | - target: /srv/kubernetes/charts
11 | - force_reset: True
12 | - rev: {{ keycloak_gatekeeper.version }}
13 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/defaults.yaml:
--------------------------------------------------------------------------------
1 | keycloak_gatekeeper:
2 | version: "master"
3 | realm: "default"
4 | groups:
5 | - "kubernetes-admins,cluster-admin"
6 | - "kubernetes-users,view"
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/files/alertmanager-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "audience-alertmanager",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-audience-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "included.client.audience": "alertmanager",
8 | "id.token.claim": "false",
9 | "access.token.claim": "true"
10 | }
11 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/files/client-scopes.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "allowed-services",
3 | "description": "List of approved client ids",
4 | "protocol": "openid-connect",
5 | "attributes": {
6 | "include.in.token.scope": "true",
7 | "display.on.consent.screen": "true"
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/files/groups-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "groups",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-group-membership-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "full.path": "true",
8 | "id.token.claim": "true",
9 | "access.token.claim": "true",
10 | "claim.name": "groups",
11 | "userinfo.token.claim": "true"
12 | }
13 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/files/keycloak-kubernetes-admins-group.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "kubernetes-admins",
3 | "attributes": {},
4 | "realmRoles": [],
5 | "clientRoles": {},
6 | "subGroups": []
7 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/files/keycloak-kubernetes-users-group.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "kubernetes-users",
3 | "attributes": {},
4 | "realmRoles": [],
5 | "clientRoles": {},
6 | "subGroups": []
7 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/files/kubernetes-dashboard-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "audience-kubernetes",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-audience-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "included.client.audience": "kubernetes",
8 | "id.token.claim": "false",
9 | "access.token.claim": "true"
10 | }
11 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/files/prometheus-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "audience-prometheus",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-audience-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "included.client.audience": "prometheus",
8 | "id.token.claim": "false",
9 | "access.token.claim": "true"
10 | }
11 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/files/rook-ceph-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "audience-rook-ceph",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-audience-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "included.client.audience": "rook-ceph",
8 | "id.token.claim": "false",
9 | "access.token.claim": "true"
10 | }
11 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/files/weave-scope-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "audience-weave-scope",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-audience-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "included.client.audience": "weave-scope",
8 | "id.token.claim": "false",
9 | "access.token.claim": "true"
10 | }
11 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set keycloak_gatekeeper = salt['pillar.get']('kubernetes:charts:keycloak_gatekeeper', default=defaults['keycloak_gatekeeper'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak-gatekeeper/templates/keycloak-kubernetes-rbac.yaml.j2:
--------------------------------------------------------------------------------
1 | {% from tpldir ~ "/map.jinja" import keycloak_gatekeeper with context %}
2 |
3 | {%- for item in keycloak_gatekeeper.groups -%}
4 | {%- set entry = item.split(',') -%}
5 | kind: ClusterRoleBinding
6 | apiVersion: rbac.authorization.k8s.io/v1
7 | metadata:
8 | name: keycloak-{{ entry[0] }}-group
9 | roleRef:
10 | apiGroup: rbac.authorization.k8s.io
11 | kind: ClusterRole
12 | name: {{ entry[1] }}
13 | subjects:
14 | - kind: Group
15 | name: /{{ entry[0] }}
16 | {%- if not loop.last %}
17 | ---
18 | {% endif %}
19 | {%- endfor -%}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak/defaults.yaml:
--------------------------------------------------------------------------------
1 | keycloak:
2 | enabled: true
3 | version: 21.1.1
4 | chart_version: 15.0.1
5 | url: https://charts.bitnami.com/bitnami
6 | admin_password: 1nS3cur3P4ssw0rd
7 | ingress:
8 | enabled: true
9 | host: keycloak
10 | realms:
11 | - name: default
12 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set keycloak = salt['pillar.get']('kubernetes:charts:keycloak', default=defaults['keycloak'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak/realms/default/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .realm
3 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak/realms/default/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/../../defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set keycloak = salt['pillar.get']('kubernetes:charts:keycloak', default=defaults['keycloak'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/keycloak/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import keycloak with context %}
5 |
6 | bitnami-repos:
7 | helm.repo_managed:
8 | {%- if keycloak.enabled %}
9 | - present:
10 | - name: bitnami
11 | url: {{ keycloak.url }}
12 | {%- else%}
13 | - absent:
14 | - bitnami
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/kube-prometheus/defaults.yaml:
--------------------------------------------------------------------------------
1 | kube_prometheus:
2 | enabled: True
3 | version: 0.63.0
4 | chart_version: 45.25.0
5 | url: https://prometheus-community.github.io/helm-charts/
6 | ingress:
7 | alertmanager:
8 | enabled: False
9 | host: alertmanager
10 | prometheus:
11 | enabled: False
12 | host: prometheus
13 | grafana:
14 | enabled: True
15 | host: grafana
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/kube-prometheus/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | apiVersion: v1
4 | kind: Namespace
5 | metadata:
6 | name: monitoring
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/kube-prometheus/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .dashboards
4 | - .repos
5 | - .install
6 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/kube-prometheus/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set kube_prometheus = salt['pillar.get']('kubernetes:charts:kube_prometheus', default=defaults['kube_prometheus'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/kube-prometheus/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import kube_prometheus with context %}
5 |
6 | kube-prometheus-repos:
7 | helm.repo_managed:
8 | {%- if kube_prometheus.enabled %}
9 | - present:
10 | - name: prometheus-community
11 | url: {{ kube_prometheus.url }}
12 | {%- else%}
13 | - absent:
14 | - prometheus-community
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/mailhog/README.md:
--------------------------------------------------------------------------------
1 | # Mailhog
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/mailhog/charts.sls:
--------------------------------------------------------------------------------
1 | mailhog-remove-charts:
2 | file.absent:
3 | - name: /srv/kubernetes/manifests/mailhog/mailhog
4 |
5 | mailhog-fetch-charts:
6 | cmd.run:
7 | - runas: root
8 | - require:
9 | - file: mailhog-remove-charts
10 | - file: /srv/kubernetes/manifests/mailhog
11 | - cwd: /srv/kubernetes/manifests/mailhog
12 | - name: |
13 | helm repo add codecentric https://codecentric.github.io/helm-charts
14 | helm fetch --untar codecentric/mailhog
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/mailhog/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/mailhog:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/mailhog/values.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/mailhog
12 | - source: salt://{{ tpldir }}/templates/values.yaml.j2
13 | - template: jinja
14 | - user: root
15 | - group: root
16 | - mode: "0644"
17 | - context:
18 | tpldir: {{ tpldir }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/mailhog/defaults.yaml:
--------------------------------------------------------------------------------
1 | mailhog:
2 | version: 1.0.1
3 | ingress_host: mail
4 | users:
5 | admin: V3ry1nS3cur3P4ssW0rd
6 | user1: Us3r1
7 | user2: Us3r2
8 | user3: Us3r3
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/mailhog/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: mailhog
6 | ---
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/mailhog/ingress.sls:
--------------------------------------------------------------------------------
1 | mailhog-ingress:
2 | file.managed:
3 | - name: /srv/kubernetes/manifests/mailhog/ingress.yaml
4 | - source: salt://{{ tpldir }}/templates/ingress.yaml.j2
5 | - user: root
6 | - template: jinja
7 | - group: root
8 | - mode: "0644"
9 | - context:
10 | tpldir: {{ tpldir }}
11 | cmd.run:
12 | - watch:
13 | - file: /srv/kubernetes/manifests/mailhog/ingress.yaml
14 | - cmd: mailhog-namespace
15 | - runas: root
16 | - name: kubectl apply -f /srv/kubernetes/manifests/mailhog/ingress.yaml
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/mailhog/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.charts.mailhog.config
3 | - kubernetes.charts.mailhog.charts
4 | - kubernetes.charts.mailhog.namespace
5 | - kubernetes.charts.mailhog.install
6 | - kubernetes.charts.mailhog.ingress
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/mailhog/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set mailhog = salt['pillar.get']('kubernetes:charts:mailhog', default=defaults['mailhog'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/mailhog/namespace.sls:
--------------------------------------------------------------------------------
1 | mailhog-namespace:
2 | file.managed:
3 | - require:
4 | - file: /srv/kubernetes/manifests/mailhog
5 | - name: /srv/kubernetes/manifests/mailhog/namespace.yaml
6 | - source: salt://{{ tpldir }}/files/namespace.yaml
7 | - user: root
8 | - group: root
9 | - mode: "0644"
10 | - context:
11 | tpldir: {{ tpldir }}
12 | cmd.run:
13 | - runas: root
14 | - watch:
15 | - file: /srv/kubernetes/manifests/mailhog/namespace.yaml
16 | - name: |
17 | kubectl apply -f /srv/kubernetes/manifests/mailhog/namespace.yaml
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/mailhog/templates/values.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import mailhog with context %}
6 | {%- set public_domain = pillar['public-domain'] -%}
7 |
8 | auth:
9 | enabled: True
10 | existingSecret: mailhog-users
11 | fileName: auth.txt
12 |
13 | extraEnv:
14 | - name: MH_HOSTNAME
15 | value: "{{ mailhog.ingress_host }}.{{ public_domain }}"
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/metrics-server/config.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import metrics_server with context %}
5 |
6 | {% set state = 'absent' %}
7 | {% if metrics_server.enabled -%}
8 | {% set state = 'managed' -%}
9 | {% endif %}
10 |
11 | /srv/kubernetes/charts/metrics-server:
12 | file.directory:
13 | - user: root
14 | - group: root
15 | - dir_mode: "0750"
16 | - makedirs: True
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/metrics-server/defaults.yaml:
--------------------------------------------------------------------------------
1 | metrics_server:
2 | enabled: true
3 | version: 0.6.3
4 | chart_version: 3.10.0
5 | url: https://kubernetes-sigs.github.io/metrics-server/
6 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/metrics-server/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/metrics-server/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set metrics_server = salt['pillar.get']('kubernetes:charts:metrics_server', default=defaults['metrics_server'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/metrics-server/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import metrics_server with context %}
5 |
6 | metrics-server-repos:
7 | helm.repo_managed:
8 | {%- if metrics_server.enabled %}
9 | - present:
10 | - name: metrics-server
11 | url: {{ metrics_server.url }}
12 | {%- else%}
13 | - absent:
14 | - metrics-server
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/metrics-server/templates/values.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import metrics_server with context %}
5 |
6 | image:
7 | tag: v{{ metrics_server.version }}
8 |
9 | args:
10 | - --kubelet-insecure-tls
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nats-operator/defaults.yaml:
--------------------------------------------------------------------------------
1 | nats_operator:
2 | enabled: true
3 | version: 0.8.3
4 | chart_version: 0.8.3
5 | api_version: nats.io/v1alpha2
6 | url: https://nats-io.github.io/k8s/helm/charts/
7 | nats:
8 | version: 2.9.16
9 | stan:
10 | version: 0.24.1
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nats-operator/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | apiVersion: v1
4 | kind: Namespace
5 | metadata:
6 | name: nats-io
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nats-operator/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nats-operator/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set nats_operator = salt['pillar.get']('kubernetes:charts:nats_operator', default=defaults['nats_operator'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nats-operator/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import nats_operator with context %}
5 |
6 | nats-repos:
7 | helm.repo_managed:
8 | {%- if nats_operator.enabled %}
9 | - present:
10 | - name: nats
11 | url: {{ nats_operator.url }}
12 | {%- else%}
13 | - absent:
14 | - nats
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nats-operator/templates/nats-cluster.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import nats_operator with context %}
6 |
7 | apiVersion: nats.io/v1alpha2
8 | kind: NatsCluster
9 | metadata:
10 | name: nats-cluster
11 | namespace: nats-io
12 | spec:
13 | size: 3
14 | version: {{ nats_operator.nats.version }}
15 | pod:
16 | enableMetrics: true
17 | metricsImage: "synadia/prometheus-nats-exporter"
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nats-operator/templates/stan-cluster.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import nats_operator with context %}
6 |
7 | apiVersion: streaming.nats.io/v1alpha1
8 | kind: NatsStreamingCluster
9 | metadata:
10 | name: stan-cluster
11 | namespace: nats-io
12 | spec:
13 | size: 3
14 | natsSvc: nats-cluster
15 | version: {{ nats_operator.stan.version }}
16 | config:
17 | ftgroup: "stan"
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nats-operator/templates/values.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import nats_operator with context %}
5 | {%- from "kubernetes/map.jinja" import common with context -%}
6 |
7 | image:
8 | tag: {{ nats_operator.version }}
9 |
10 | metrics:
11 | {%- if common.addons.get('kube_prometheus', {'enabled': False}).enabled %}
12 | enabled: true
13 | serviceMonitor:
14 | enabled: true
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nuclio/charts.sls:
--------------------------------------------------------------------------------
1 | nuclio-remove-charts:
2 | file.absent:
3 | - name: /srv/kubernetes/manifests/nuclio/nuclio
4 |
5 | nuclio-fetch-charts:
6 | cmd.run:
7 | - runas: root
8 | - require:
9 | - file: nuclio-remove-charts
10 | - file: /srv/kubernetes/manifests/nuclio
11 | - cwd: /srv/kubernetes/manifests/nuclio
12 | - name: |
13 | helm repo add nuclio https://nuclio.github.io/nuclio/charts
14 | helm
15 | helm fetch --untar nuclio/nuclio
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nuclio/clean.sls:
--------------------------------------------------------------------------------
1 | nuclio-teardown:
2 | cmd.run:
3 | - runas: root
4 | - group: root
5 | - name: |
6 | helm delete -n nuclio nuclio
7 | kubectl delete crd nucliofunctionevents.nuclio.io
8 | kubectl delete crd nucliofunctions.nuclio.io
9 | kubectl delete crd nuclioprojects.nuclio.io
10 | kubectl -n nuclio delete secret registry-harbor-local
11 | kubectl -n nuclio delete secret registry-docker-hub
12 | kubectl -n nuclio delete secret registry-quay
13 | kubectl -n nuclio delete secret registy-harbor
14 |
15 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nuclio/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/nuclio:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/nuclio/values.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/nuclio
12 | - source: salt://{{ tpldir }}/templates/values.yaml.j2
13 | - user: root
14 | - group: root
15 | - mode: "0755"
16 | - template: jinja
17 | - context:
18 | tpldir: {{ tpldir }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nuclio/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: nuclio
5 | ---
6 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nuclio/files/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: nuclio-system
5 | namespace: nuclio
6 | spec:
7 | namespaceSelector:
8 | matchNames:
9 | - nuclio
10 | selector:
11 | matchLabels:
12 | nuclio.io/class: function
13 | endpoints:
14 | - port: metrics
15 | path: /metrics
16 | interval: 5s
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nuclio/ingress.sls:
--------------------------------------------------------------------------------
1 | nuclio-ingress:
2 | file.managed:
3 | - name: /srv/kubernetes/manifests/nuclio/ingress.yaml
4 | - source: salt://{{ tpldir }}/templates/ingress.yaml.j2
5 | - user: root
6 | - template: jinja
7 | - group: root
8 | - mode: "0644"
9 | - context:
10 | tpldir: {{ tpldir }}
11 | cmd.run:
12 | - watch:
13 | - file: /srv/kubernetes/manifests/nuclio/ingress.yaml
14 | - cmd: nuclio-namespace
15 | - runas: root
16 | - name: kubectl apply -f /srv/kubernetes/manifests/nuclio/ingress.yaml
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nuclio/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 |
3 | include:
4 | - .config
5 | - .charts
6 | - .namespace
7 | - .ingress
8 | - .registry
9 | - .install
10 | {%- if common.addons.get('kube_prometheus', {'enabled': False}).enabled %}
11 | - .prometheus
12 | {%- endif %}
13 |
14 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nuclio/install.sls:
--------------------------------------------------------------------------------
1 | nuclio:
2 | cmd.run:
3 | - runas: root
4 | - require:
5 | - cmd: nuclio-namespace
6 | - watch:
7 | - file: /srv/kubernetes/manifests/nuclio/values.yaml
8 | - cmd: nuclio-fetch-charts
9 | - cwd: /srv/kubernetes/manifests/nuclio/nuclio
10 | - name: |
11 | helm upgrade --install nuclio --namespace nuclio \
12 | --values /srv/kubernetes/manifests/nuclio/values.yaml \
13 | "./" --wait --timeout 5m
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nuclio/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set nuclio = salt['pillar.get']('kubernetes:charts:nuclio', default=defaults['nuclio'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/nuclio/namespace.sls:
--------------------------------------------------------------------------------
1 | nuclio-namespace:
2 | file.managed:
3 | - require:
4 | - file: /srv/kubernetes/manifests/nuclio
5 | - name: /srv/kubernetes/manifests/nuclio/namespace.yaml
6 | - source: salt://{{ tpldir }}/files/namespace.yaml
7 | - user: root
8 | - group: root
9 | - mode: "0644"
10 | - context:
11 | tpldir: {{ tpldir }}
12 | cmd.run:
13 | - runas: root
14 | - watch:
15 | - file: /srv/kubernetes/manifests/nuclio/namespace.yaml
16 | - name: |
17 | kubectl apply -f /srv/kubernetes/manifests/nuclio/namespace.yaml
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/defaults.yaml:
--------------------------------------------------------------------------------
1 | openfaas:
2 | client_version: 0.13.1
3 | client_source_hash: 3c4da3cd862ae606f61b4e725357f805145fd64cc4a8860cc244cd424f0ee666
4 | oauth2_image: docker.io/openfaas/openfaas-oidc-plugin:0.3.8
5 | nats_connector_image: docker.io/openfaas/nats-connector:0.2.2
6 | stan_image: docker.io/amd64/nats-streaming:0.20.0-scratch
7 | ingress_host: openfaas
8 | username: admin
9 | password: V3ry1ns3cur3
10 | nats_topics: "nats_test,faas-request,"
11 | oauth:
12 | enabled: False
13 | provider: keycloak
14 | keycloak:
15 | realm: default
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/files/gateway-metrics-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: gateway-metrics
5 | namespace: openfaas
6 | labels:
7 | app: openfaas
8 | component: gateway
9 | spec:
10 | ports:
11 | - name: metrics
12 | port: 8082
13 | protocol: TCP
14 | targetPort: 8082
15 | selector:
16 | app: gateway
17 | sessionAffinity: None
18 | type: ClusterIP
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: openfaas
5 | labels:
6 | role: openfaas-system
7 | access: openfaas-system
8 | ---
9 | apiVersion: v1
10 | kind: Namespace
11 | metadata:
12 | name: openfaas-fn
13 | labels:
14 | role: openfaas-fn
15 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/files/servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: gateway
5 | namespace: openfaas
6 | labels:
7 | app: gateway
8 | spec:
9 | endpoints:
10 | - port: metrics
11 | path: /metrics
12 | interval: 5s
13 | scrapeTimeout: 3s
14 | selector:
15 | matchLabels:
16 | app: openfaas
17 | component: gateway
18 | namespaceSelector:
19 | matchNames:
20 | - openfaas
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/ingress.sls:
--------------------------------------------------------------------------------
1 | openfaas-ingress:
2 | file.managed:
3 | - name: /srv/kubernetes/manifests/openfaas/ingress.yaml
4 | - source: salt://{{ tpldir }}/templates/ingress.yaml.j2
5 | - user: root
6 | - template: jinja
7 | - group: root
8 | - mode: "0644"
9 | - context:
10 | tpldir: {{ tpldir }}
11 | cmd.run:
12 | - watch:
13 | - file: /srv/kubernetes/manifests/openfaas/ingress.yaml
14 | - cmd: openfaas-namespace
15 | - runas: root
16 | - name: kubectl apply -f /srv/kubernetes/manifests/openfaas/ingress.yaml
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set openfaas = salt['pillar.get']('kubernetes:charts:openfaas', default=defaults['openfaas'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/namespace.sls:
--------------------------------------------------------------------------------
1 | openfaas-namespace:
2 | file.managed:
3 | - require:
4 | - file: /srv/kubernetes/manifests/openfaas
5 | - name: /srv/kubernetes/manifests/openfaas/namespace.yaml
6 | - source: salt://{{ tpldir }}/files/namespace.yaml
7 | - user: root
8 | - group: root
9 | - mode: "0644"
10 | - context:
11 | tpldir: {{ tpldir }}
12 | cmd.run:
13 | - runas: root
14 | - watch:
15 | - file: /srv/kubernetes/manifests/openfaas/namespace.yaml
16 | - name: |
17 | kubectl apply -f /srv/kubernetes/manifests/openfaas/namespace.yaml
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/oauth/keycloak/files/admins-group.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "openfaas-admins",
3 | "attributes": {},
4 | "realmRoles": [],
5 | "clientRoles": {},
6 | "subGroups": []
7 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/oauth/keycloak/files/client-scopes.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "allowed-services",
3 | "description": "List of approved client ids",
4 | "protocol": "openid-connect",
5 | "attributes": {
6 | "include.in.token.scope": "true",
7 | "display.on.consent.screen": "true"
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/oauth/keycloak/files/groups-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "groups",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-group-membership-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "full.path": "true",
8 | "id.token.claim": "true",
9 | "access.token.claim": "true",
10 | "claim.name": "groups",
11 | "userinfo.token.claim": "true"
12 | }
13 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/oauth/keycloak/files/protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "audience-openfaas",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-audience-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "included.client.audience": "openfaas",
8 | "id.token.claim": "false",
9 | "access.token.claim": "true"
10 | }
11 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/oauth/keycloak/files/userid-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "userid",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-usermodel-property-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "userinfo.token.claim": "true",
8 | "user.attribute": "id",
9 | "id.token.claim": "true",
10 | "access.token.claim": "true",
11 | "claim.name": "userid",
12 | "jsonType.label": "String"
13 | }
14 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/oauth/keycloak/files/username-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "username",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-usermodel-property-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "userinfo.token.claim": "true",
8 | "user.attribute": "username",
9 | "id.token.claim": "true",
10 | "access.token.claim": "true",
11 | "claim.name": "username",
12 | "jsonType.label": "String"
13 | }
14 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/oauth/keycloak/files/users-group.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "openfaas-users",
3 | "attributes": {},
4 | "realmRoles": [],
5 | "clientRoles": {},
6 | "subGroups": []
7 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/templates/secrets.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import openfaas with context %}
6 |
7 | apiVersion: v1
8 | kind: Secret
9 | metadata:
10 | name: basic-auth
11 | namespace: openfaas
12 | labels:
13 | app: openfaas
14 | component: gateway
15 | type: Opaque
16 | data:
17 | basic-auth-user: {{ openfaas.username | base64_encode }}
18 | basic-auth-password: {{ openfaas.password | base64_encode }}
19 |
20 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/openfaas/test.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import openfaas with context %}
6 | {%- set public_domain = pillar['public-domain'] -%}
7 |
8 | query-openfaas-web:
9 | http.wait_for_successful_query:
10 | - watch:
11 | - cmd: openfaas
12 | - cmd: openfaas-ingress
13 | - name: https://{{ openfaas.ingress_host }}.{{ public_domain }}/healthz
14 | - match: OK
15 | - match_type: string
16 | - wait_for: 120
17 | - request_interval: 5
18 | - status: 200
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/ory/README.md:
--------------------------------------------------------------------------------
1 | # Kratos
2 |
3 | ## Known issue
4 |
5 | This state leverage the CockroachDB Helm Chart instead of the Rook-CockroachDB operator.
6 | The reason comes from that statefulset name is static with the Rook oprator, leading to conflict between Hydra and other instances localted in the same namespace.
7 |
8 | ## References
9 |
10 | *
11 | *
12 | *
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/ory/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: ory
6 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/ory/ingress.sls:
--------------------------------------------------------------------------------
1 | hydra-ingress:
2 | file.managed:
3 | - name: /srv/kubernetes/manifests/hydra-ingress.yaml
4 | - source: salt://{{ tpldir }}/templates/hydra-ingress.yaml.j2
5 | - user: root
6 | - template: jinja
7 | - group: root
8 | - mode: "0644"
9 | - context:
10 | tpldir: {{ tpldir }}
11 | cmd.run:
12 | - watch:
13 | - file: /srv/kubernetes/manifests/hydra-ingress.yaml
14 | - cmd: ory-namespace
15 | - runas: root
16 | - name: kubectl apply -f /srv/kubernetes/manifests/hydra-ingress.yaml
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/ory/kratos-selfservice-ui-node.sls:
--------------------------------------------------------------------------------
1 | kratos-selfservice-ui-node:
2 | cmd.run:
3 | - runas: root
4 | - watch:
5 | - file: /srv/kubernetes/manifests/ory/kratos-selfservice-ui-node-values.yaml
6 | - cmd: ory-namespace
7 | - cmd: kratos-selfservice-ui-node-fetch-charts
8 | - cwd: /srv/kubernetes/manifests/ory/kratos-selfservice-ui-node
9 | - name: |
10 | helm upgrade --install kratos-selfservice-ui-node --namespace ory \
11 | --values /srv/kubernetes/manifests/ory/kratos-selfservice-ui-node-values.yaml \
12 | "./" --wait --timeout 3m
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/ory/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set ory = salt['pillar.get']('kubernetes:charts:ory', default=defaults['ory'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/ory/namespace.sls:
--------------------------------------------------------------------------------
1 | ory-namespace:
2 | file.managed:
3 | - require:
4 | - file: /srv/kubernetes/manifests/ory
5 | - name: /srv/kubernetes/manifests/ory/namespace.yaml
6 | - source: salt://{{ tpldir }}/files/namespace.yaml
7 | - user: root
8 | - group: root
9 | - mode: "0644"
10 | - context:
11 | tpldir: {{ tpldir }}
12 | cmd.run:
13 | - runas: root
14 | - watch:
15 | - file: /srv/kubernetes/manifests/ory/namespace.yaml
16 | - name: |
17 | kubectl apply -f /srv/kubernetes/manifests/ory/namespace.yaml
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/ory/templates/hydra-cockroachdb-values.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import ory with context %}
6 |
7 | image:
8 | tag: v{{ ory.hydra.cockroachdb.version }}
9 |
10 | statefulset:
11 | replicas: {{ ory.hydra.cockroachdb.node_count }}
12 |
13 | storage:
14 | persistentVolume:
15 | enabled: true
16 | size: {{ ory.hydra.cockroachdb.capacity }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/ory/templates/idp-values.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import ory with context %}
6 | {%- set public_domain = pillar['public-domain'] -%}
7 |
8 | image:
9 | tag: v{{ ory.hydra.version }}
10 |
11 | hydraAdminUrl: https://{{ ory.hydra.ingress_host }}-admin.{{ public_domain }}
12 | hydraPublicUrl: https://{{ ory.hydra.ingress_host }}-public.{{ public_domain }}
13 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/ory/templates/kratos-cockroachdb-values.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import ory with context %}
6 |
7 | image:
8 | tag: v{{ ory.hydra.cockroachdb.version }}
9 |
10 | statefulset:
11 | replicas: {{ ory.hydra.cockroachdb.node_count }}
12 |
13 | storage:
14 | persistentVolume:
15 | enabled: true
16 | size: {{ ory.hydra.cockroachdb.capacity }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/README.md:
--------------------------------------------------------------------------------
1 | # Proxy Injector
2 |
3 | A Kubernetes controller addon to inject a Keycloak Gatekeeper authentiction proxy container to relevant pods.
4 |
5 | * https://github.com/stakater/ProxyInjector
6 | * https://www.keycloak.org/docs-api/7.0/rest-api/index.html
7 | * https://httpie.org/doc
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/defaults.yaml:
--------------------------------------------------------------------------------
1 | proxyinjector:
2 | version: 0.0.23
3 | image: keycloak/keycloak-gatekeeper:7.0.0
4 | ingress_host: kubehttpbin
5 | oauth:
6 | provider: keycloak
7 | keycloak:
8 | realm: default
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: kubehttpbin
6 | ---
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import charts with context -%}
2 |
3 | include:
4 | - .config
5 | - .charts
6 | {%- if charts.get('keycloak', {'enabled': False}).enabled %}
7 | - .oauth
8 | {%- endif %}
9 | - .namespace
10 | - .install
11 | - .ingress
12 | - .test
13 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/install.sls:
--------------------------------------------------------------------------------
1 | proxyinjector:
2 | cmd.run:
3 | - watch:
4 | - file: /srv/kubernetes/manifests/proxyinjector/values.yaml
5 | - cwd: /srv/kubernetes/manifests/proxyinjector/helm/deployments/kubernetes/chart/proxyinjector
6 | - runas: root
7 | - name: |
8 | helm upgrade --install proxyinjector \
9 | --namespace default \
10 | --values /srv/kubernetes/manifests/proxyinjector/values.yaml \
11 | ./ --wait --timeout 3m
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set proxyinjector = salt['pillar.get']('kubernetes:charts:proxyinjector', default=defaults['proxyinjector'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/oauth/keycloak/files/admins-group.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "demo-admins",
3 | "attributes": {},
4 | "realmRoles": [],
5 | "clientRoles": {},
6 | "subGroups": []
7 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/oauth/keycloak/files/client-scopes.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "allowed-services",
3 | "description": "List of approved client ids",
4 | "protocol": "openid-connect",
5 | "attributes": {
6 | "include.in.token.scope": "true",
7 | "display.on.consent.screen": "true"
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/oauth/keycloak/files/groups-protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "groups",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-group-membership-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "full.path": "true",
8 | "id.token.claim": "true",
9 | "access.token.claim": "true",
10 | "claim.name": "groups",
11 | "userinfo.token.claim": "true"
12 | }
13 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/oauth/keycloak/files/protocolmapper.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "audience-demo",
3 | "protocol": "openid-connect",
4 | "protocolMapper": "oidc-audience-mapper",
5 | "consentRequired": false,
6 | "config": {
7 | "included.client.audience": "demo",
8 | "id.token.claim": "false",
9 | "access.token.claim": "true"
10 | }
11 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/oauth/keycloak/files/users-group.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "demo-users",
3 | "attributes": {},
4 | "realmRoles": [],
5 | "clientRoles": {},
6 | "subGroups": []
7 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/proxyinjector/templates/kubehttpbin-values.yaml.j2:
--------------------------------------------------------------------------------
1 | containerPort: 8080
2 | namespace: kubehttpbin
3 | numReplicas: 3
4 | dockerTag: devel
5 | dockerOrg: arschles
6 | dockerPullPolicy: Always
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/spinnaker/config.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import spinnaker with context %}
5 |
6 | {% set state = 'absent' %}
7 | {% if spinnaker.enabled -%}
8 | {% set state = 'managed' -%}
9 | {% endif %}
10 |
11 | /srv/kubernetes/charts/spinnaker:
12 | file.directory:
13 | - user: root
14 | - group: root
15 | - dir_mode: "0750"
16 | - makedirs: True
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/spinnaker/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/spinnaker/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set spinnaker = salt['pillar.get']('kubernetes:charts:spinnaker', default=defaults['spinnaker'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/spinnaker/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import spinnaker with context %}
5 |
6 | spinnaker-repos:
7 | helm.repo_managed:
8 | {%- if spinnaker.enabled %}
9 | - present:
10 | - name: spinnaker
11 | url: {{ spinnaker.url }}
12 | {%- else%}
13 | - absent:
14 | - spinnaker
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/velero/README.md:
--------------------------------------------------------------------------------
1 | # Velero
2 |
3 | ## References
4 |
5 | *
6 | *
7 | *
8 | *
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/velero/charts.sls:
--------------------------------------------------------------------------------
1 | velero-remove-charts:
2 | file.absent:
3 | - name: /srv/kubernetes/manifests/velero/velero
4 |
5 | velero-fetch-charts:
6 | cmd.run:
7 | - runas: root
8 | - require:
9 | - file: velero-remove-charts
10 | - file: /srv/kubernetes/manifests/velero
11 | - cwd: /srv/kubernetes/manifests/velero
12 | - name: |
13 | helm repo add vmware-tanzu https://vmware-tanzu.github.io/helm-charts
14 | helm fetch --untar vmware-tanzu/velero
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/velero/clean.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import velero with context %}
6 |
7 | velero-teardown:
8 | cmd.run:
9 | - runas: root
10 | - cwd: /srv/kubernetes/manifests/velero/velero
11 | - name: |
12 | velero backup delete nginx-backup --confirm
13 | velero backup delete nginx-daily --confirm
14 | helm delete -n velero velero
15 | kubectl delete -f crds/
16 |
17 |
18 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/velero/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/velero:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/velero/values.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/velero
12 | - source: salt://{{ tpldir }}/templates/values.yaml.j2
13 | - user: root
14 | - group: root
15 | - mode: "0755"
16 | - template: jinja
17 | - context:
18 | tpldir: {{ tpldir }}
19 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/velero/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: velero
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/velero/files/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | kind: ServiceMonitor
2 | apiVersion: monitoring.coreos.com/v1
3 | metadata:
4 | name: velero
5 | namespace: velero
6 | spec:
7 | endpoints:
8 | - interval: 30s
9 | port: monitoring
10 | selector:
11 | matchLabels:
12 | app.kubernetes.io/instance: velero
13 | app.kubernetes.io/name: velero
14 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/velero/ingress.sls:
--------------------------------------------------------------------------------
1 | velero-ingress:
2 | file.managed:
3 | - name: /srv/kubernetes/manifests/velero-ingress.yaml
4 | - source: salt://{{ tpldir }}/templates/ingress.yaml.j2
5 | - user: root
6 | - template: jinja
7 | - group: root
8 | - mode: "0644"
9 | - context:
10 | tpldir: {{ tpldir }}
11 | cmd.run:
12 | - watch:
13 | - file: /srv/kubernetes/manifests/velero-ingress.yaml
14 | - cmd: velero-namespace
15 | - runas: root
16 | - name: kubectl apply -f /srv/kubernetes/manifests/velero-ingress.yaml
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/velero/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set velero = salt['pillar.get']('kubernetes:charts:velero', default=defaults['velero'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/velero/namespace.sls:
--------------------------------------------------------------------------------
1 | velero-namespace:
2 | file.managed:
3 | - require:
4 | - file: /srv/kubernetes/manifests/velero
5 | - name: /srv/kubernetes/manifests/velero/namespace.yaml
6 | - source: salt://{{ tpldir }}/files/namespace.yaml
7 | - user: root
8 | - group: root
9 | - mode: "0644"
10 | - context:
11 | tpldir: {{ tpldir }}
12 | cmd.run:
13 | - runas: root
14 | - watch:
15 | - file: /srv/kubernetes/manifests/velero/namespace.yaml
16 | - name: |
17 | kubectl apply -f /srv/kubernetes/manifests/velero/namespace.yaml
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/velero/templates/secrets.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import velero with context %}
6 |
7 | {% set credentials = "[default]" ~ "\n" ~ "aws_access_key_id = " ~ velero.s3.accesskey ~ "\n" ~ "aws_secret_access_key = " ~ velero.s3.secretkey %}
8 |
9 | apiVersion: v1
10 | kind: Secret
11 | metadata:
12 | name: cloud-credentials
13 | namespace: velero
14 | labels:
15 | app: velero
16 | type: Opaque
17 | data:
18 | cloud: >-
19 | {{ credentials | base64_encode }}
20 |
21 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/vistio/defaults.yaml:
--------------------------------------------------------------------------------
1 | vistio:
2 | version: 0.2.1
3 | ingress_host: vistio
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/vistio/ingress.sls:
--------------------------------------------------------------------------------
1 | vistio-ingress:
2 | file.managed:
3 | - name: /srv/kubernetes/manifests/vistio-ingress.yaml
4 | - source: salt://{{ tpldir }}/templates/ingress.yaml.j2
5 | - user: root
6 | - template: jinja
7 | - group: root
8 | - mode: "0644"
9 | - context:
10 | tpldir: {{ tpldir }}
11 | cmd.run:
12 | - require:
13 | - cmd: vistio
14 | - watch:
15 | - file: /srv/kubernetes/manifests/vistio-ingress.yaml
16 | - runas: root
17 | - name: kubectl apply -f /srv/kubernetes/manifests/vistio-ingress.yaml
18 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/vistio/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.charts.vistio.config
3 | - kubernetes.charts.vistio.install
4 | - kubernetes.charts.vistio.ingress
5 | - kubernetes.charts.vistio.test
--------------------------------------------------------------------------------
/srv/salt/kubernetes/charts/vistio/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set vistio = salt['pillar.get']('kubernetes:charts:vistio', default=defaults['vistio'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/99-loopback.conf:
--------------------------------------------------------------------------------
1 | {
2 | "cniVersion": "0.3.1",
3 | "type": "loopback"
4 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/calico/config.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | /srv/kubernetes/charts/calico:
5 | file.directory:
6 | - user: root
7 | - group: root
8 | - dir_mode: "0750"
9 | - makedirs: True
10 |
11 | /srv/kubernetes/manifests/calico:
12 | file.directory:
13 | - user: root
14 | - group: root
15 | - dir_mode: "0750"
16 | - makedirs: True
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/calico/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/calico/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set calico = salt['pillar.get']('kubernetes:common:cni:calico', default=defaults['calico'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/calico/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import calico with context %}
5 |
6 | calico-repos:
7 | helm.repo_managed:
8 | {%- if calico.enabled %}
9 | - present:
10 | - name: projectcalico
11 | url: {{ calico.url }}
12 | {%- else%}
13 | - absent:
14 | - projectcalico
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/cilium/config.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | /srv/kubernetes/charts/cilium:
5 | file.directory:
6 | - user: root
7 | - group: root
8 | - dir_mode: "0750"
9 | - makedirs: True
10 |
11 | /srv/kubernetes/manifests/cilium:
12 | file.directory:
13 | - user: root
14 | - group: root
15 | - dir_mode: "0750"
16 | - makedirs: True
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/cilium/defaults.yaml:
--------------------------------------------------------------------------------
1 | cilium:
2 | enabled: true
3 | version: 1.13.2
4 | chart_version: 1.13.2
5 | cli_version: 0.14.1
6 | url: https://helm.cilium.io/
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/cilium/init.sls:
--------------------------------------------------------------------------------
1 |
2 |
3 | include:
4 | - .config
5 | - .repos
6 | - .install
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/cilium/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set cilium = salt['pillar.get']('kubernetes:common:cni:cilium', default=defaults['cilium'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/cilium/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import cilium with context %}
5 |
6 | cilium-repos:
7 | helm.repo_managed:
8 | {%- if cilium.enabled %}
9 | - present:
10 | - name: cilium
11 | url: {{ cilium.url }}
12 | {%- else%}
13 | - absent:
14 | - cilium
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/cilium/templates/values.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import cilium with context %}
5 | {%- from "kubernetes/map.jinja" import common with context -%}
6 |
7 | MTU: 0
8 | cni:
9 | install: true
10 | chainingMode: portmap
11 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/flannel/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/flannel:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/flannel/flannel.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/flannel
12 | - source: salt://{{ tpldir }}/templates/flannel.yaml.j2
13 | - user: root
14 | - template: jinja
15 | - group: root
16 | - mode: "0644"
17 | - context:
18 | tpldir: {{ tpldir }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/flannel/defaults.yaml:
--------------------------------------------------------------------------------
1 | flannel:
2 | flannel_image: quay.io/coreos/flannel:v0.11.0-amd64
3 | interface: eth0
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/flannel/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.cni.flannel.config
3 | - kubernetes.cni.flannel.install
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/flannel/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set flannel = salt['pillar.get']('kubernetes:common:cni:flannel', default=defaults['flannel'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/weave/README.md:
--------------------------------------------------------------------------------
1 | *
2 |
3 | * **Resolve Port mapping issue with 2.4.x**
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/weave/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/weave:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/weave/weave.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/weave
12 | - source: salt://{{ tpldir }}/templates/weave.yaml.j2
13 | - user: root
14 | - template: jinja
15 | - group: root
16 | - mode: "0644"
17 | - context:
18 | tpldir: {{ tpldir }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/weave/defaults.yaml:
--------------------------------------------------------------------------------
1 | weave:
2 | interface: eth0
3 | weave_image: docker.io/weaveworks/weave-kube:2.6.5
4 | npc_image: docker.io/weaveworks/weave-npc:2.6.5
5 | # password: 0123456789abcdefghij
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/weave/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.cni.weave.config
3 | - kubernetes.cni.weave.install
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cni/weave/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set weave = salt['pillar.get']('kubernetes:common:cni:weave', default=defaults['weave'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/containerd/defaults.yaml:
--------------------------------------------------------------------------------
1 | containerd:
2 | enabled: true
3 | version: 1.4.11
4 | root: /var/lib/containerd
5 | state: /run/containerd
6 | grpc:
7 | address: /run/containerd/containerd.sock
8 | cni:
9 | bin_dir: /opt/cni/bin
10 | conf_dir: /etc/cni/net.d
11 | opt:
12 | path: /opt/containerd
13 |
14 | use_upstream_repo: true
15 | use_old_repo: false
16 | proxy: false
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/containerd/files/containerd.conf:
--------------------------------------------------------------------------------
1 | overlay
2 | br_netfilter
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/containerd/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .repo
3 | - .install
4 | - .config
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/containerd/install.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {%- from tpldir ~ "/map.jinja" import containerd with context %}
6 |
7 |
8 | {% set pkgState = 'absent' %}
9 | {% if containerd.enabled %}
10 | {% set pkgState = 'installed' %}
11 | {% endif %}
12 |
13 | containerd:
14 | pkg.{{ pkgState }}:
15 | - name: containerd.io
16 | - version: {{ containerd.version }}-1
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/containerd/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set containerd = salt['pillar.get']('kubernetes:common:cri:containerd', default=defaults['containerd'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/crictl.yaml:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 |
3 | runtime-endpoint: unix:///run/{{ common.cri.provider }}/{{ common.cri.provider }}.sock
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/crio/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | *
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/crio/defaults.yaml:
--------------------------------------------------------------------------------
1 | crio:
2 | version: 1.15
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/crio/files/crio-shutdown.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Shutdown CRIO containers before shutting down the system
3 | Wants=crio.service
4 | After=crio.service
5 | Documentation=https://github.com/kubernetes-incubator/cri-o
6 |
7 | [Service]
8 | Type=oneshot
9 | ExecStart=/usr/bin/rm -f /var/lib/crio/crio.shutdown
10 | ExecStop=/usr/bin/bash -c "/usr/bin/mkdir /var/lib/crio; /usr/bin/touch /var/lib/crio/crio.shutdown"
11 | RemainAfterExit=yes
12 |
13 | [Install]
14 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/crio/files/policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "default": [
3 | {
4 | "type": "insecureAcceptAnything"
5 | }
6 | ],
7 | "transports":
8 | {
9 | "docker-daemon":
10 | {
11 | "": [{"type":"insecureAcceptAnything"}]
12 | }
13 | }
14 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/crio/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.cri.crio.repo
3 | - kubernetes.cri.crio.pkg
4 | - kubernetes.cri.crio.install
5 | - kubernetes.cri.crio.config
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/crio/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set containerd = salt['pillar.get']('kubernetes:common:cri:crio', default=defaults['containerd'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/crio/pkg.sls:
--------------------------------------------------------------------------------
1 | libdevmapper1.02.1:
2 | pkg.latest:
3 | - refresh: true
4 |
5 | libgpgme11:
6 | pkg.latest:
7 | - refresh: true
8 |
9 | libseccomp2:
10 | pkg.latest:
11 | - refresh: true
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/docker/config.sls:
--------------------------------------------------------------------------------
1 | docker-daemon-dir:
2 | file.directory:
3 | - name: /etc/docker
4 | - user: root
5 | - group: root
6 | - mode: "0755"
7 |
8 | /etc/docker/daemon.json:
9 | file.managed:
10 | - source: salt://kubernetes/cri/docker/files/daemon.json
11 | - user: root
12 | - group: root
13 | - mode: "0644"
14 |
15 | docker.service:
16 | service.running:
17 | - watch:
18 | - pkg: docker-ce
19 | - file: /etc/docker/daemon.json
20 | - enable: True
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/docker/files/daemon.json:
--------------------------------------------------------------------------------
1 | {
2 | "exec-opts": ["native.cgroupdriver=systemd"],
3 | "log-driver": "json-file",
4 | "log-opts": {
5 | "max-size": "100m"
6 | },
7 | "storage-driver": "overlay2",
8 | "iptables": false
9 | }
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/docker/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - kubernetes.cri.docker.repo
3 | - kubernetes.cri.docker.install
4 | - kubernetes.cri.docker.config
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/docker/install.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {%- from tpldir ~ "/map.jinja" import docker with context %}
6 |
7 |
8 | {% set pkgState = 'absent' %}
9 | {% if docker.enabled %}
10 | {% set pkgState = 'installed' %}
11 | {% endif %}
12 |
13 | docker-ce:
14 | pkg.{{ pkgState }}:
15 | - version: 5:{{ docker.version }}~3-0~ubuntu-{{ grains["oscodename"] }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/cri/docker/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set docker = salt['pillar.get']('kubernetes:common:cri:docker', default=defaults['docker'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/longhorn/README.md:
--------------------------------------------------------------------------------
1 | # Longhorn
2 |
3 |
4 | ## References
5 |
6 | * https://longhorn.io/docs/1.0.0/advanced-resources/default-disk-and-node-config/
7 | * https://longhorn.io/docs/1.0.0/volumes-and-nodes/multidisk/
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/longhorn/clean.sls:
--------------------------------------------------------------------------------
1 | longhorn-teardown:
2 | cmd.run:
3 | - runas: root
4 | - name: |
5 | kubectl delete -f /srv/kubernetes/manifests/longhorn/longhorn.yaml
6 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/longhorn/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/longhorn:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/longhorn/longhorn.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/longhorn
12 | - source: salt://{{ tpldir }}/templates/longhorn.yaml.j2
13 | - template: jinja
14 | - user: root
15 | - group: root
16 | - mode: "0644"
17 | - context:
18 | tpldir: {{ tpldir }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/longhorn/defaults.yaml:
--------------------------------------------------------------------------------
1 | longhorn:
2 | manager_image: docker.io/longhornio/longhorn-manager:v1.0.0
3 | engine_image: docker.io/longhornio/longhorn-engine:v1.0.0
4 | instance_manager_image: docker.io/longhornio/longhorn-instance-manager:v1_20200514
5 | ui_image: docker.io/longhornio/longhorn-ui:v1.0.0
6 | ingress_host: longhorn
7 | default_storageclass:
8 | enabled: false
9 | name: longhorn
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/longhorn/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | apiVersion: v1
4 | kind: Namespace
5 | metadata:
6 | name: longhorn-system
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/longhorn/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .namespace
4 | - .install
5 | - .ingress
6 | - .node-label
7 | - .storageclass
8 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/longhorn/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set longhorn = salt['pillar.get']('kubernetes:storage:longhorn', default=defaults['longhorn'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/minio/config.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import minio with context %}
5 |
6 | {% set state = 'absent' %}
7 | {% if minio.enabled -%}
8 | {% set state = 'managed' -%}
9 | {% endif %}
10 |
11 | /srv/kubernetes/charts/minio:
12 | file.directory:
13 | - user: root
14 | - group: root
15 | - dir_mode: "0750"
16 | - makedirs: True
17 |
18 | /srv/kubernetes/manifests/minio:
19 | file.directory:
20 | - user: root
21 | - group: root
22 | - dir_mode: "0750"
23 | - makedirs: True
24 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/minio/defaults.yaml:
--------------------------------------------------------------------------------
1 | minio:
2 | enabled: true
3 | version: 5.0.4
4 | chart_version: 5.0.4
5 | client_version: 2023-05-04T18-10-16Z
6 | api_version: minio.min.io/v2
7 | url: https://operator.min.io/
8 | ingress:
9 | enabled: true
10 | host: minio-operator
11 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/minio/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/minio/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set minio = salt['pillar.get']('kubernetes:csi:minio', default=defaults['minio'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/minio/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import minio with context %}
5 |
6 | minio-repos:
7 | helm.repo_managed:
8 | {%- if minio.enabled %}
9 | - present:
10 | - name: minio
11 | url: {{ minio.url }}
12 | {%- else%}
13 | - absent:
14 | - minio
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/openebs/README.md:
--------------------------------------------------------------------------------
1 | # OpenEBS
2 |
3 | ## References
4 |
5 | * https://github.com/openebs/csi-openebs
6 | * https://github.com/openebs/cstor-csi
7 | * https://github.com/openebs/jiva-csi
8 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/openebs/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/openebs:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /srv/kubernetes/manifests/openebs/openebs-operator.yaml:
9 | file.managed:
10 | - require:
11 | - file: /srv/kubernetes/manifests/openebs
12 | - source: salt://{{ tpldir }}/templates/openebs-operator.yaml.j2
13 | - template: jinja
14 | - user: root
15 | - group: root
16 | - mode: "0644"
17 | - context:
18 | tpldir: {{ tpldir }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/openebs/driver.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import openebs with context %}
6 |
7 | openebs-csi-driver:
8 | cmd.run:
9 | - onlyif: http --verify false https://localhost:6443/livez?verbose
10 | - name: |
11 | kubectl apply -f {{ openebs.csi_driver }}
12 | kubectl apply -f {{ openebs.csi_nodeinfo }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/openebs/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: openebs
6 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/openebs/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import storage with context -%}
2 |
3 | include:
4 | - .driver
5 | - .config
6 | - .namespace
7 | - .install
8 | # - .blockdevice
9 | - .cstor
10 | - .jiva
11 | - .cstor-storageclass
12 | # - .ingress
13 |
14 |
15 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/openebs/jiva.sls:
--------------------------------------------------------------------------------
1 | openebs-jiva:
2 | file.managed:
3 | - require:
4 | - file: /srv/kubernetes/manifests/openebs
5 | - name: /srv/kubernetes/manifests/openebs/jiva-csi.yaml
6 | - source: salt://{{ tpldir }}/templates/jiva-csi.yaml.j2
7 | - template: jinja
8 | - user: root
9 | - group: root
10 | - mode: "0644"
11 | - context:
12 | tpldir: {{ tpldir }}
13 | cmd.run:
14 | - watch:
15 | - file: /srv/kubernetes/manifests/openebs/jiva-csi.yaml
16 | - runas: root
17 | - name: |
18 | kubectl apply -f /srv/kubernetes/manifests/openebs/jiva-csi.yaml
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/openebs/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set openebs = salt['pillar.get']('kubernetes:storage:openebs', default=defaults['openebs'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/portworx/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/manifests/portworx:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/portworx/defaults.yaml:
--------------------------------------------------------------------------------
1 | portworx:
2 | manifest_url: https://install.portworx.com/...
3 | dedicated_replication: 3
4 | shared_replication: 3
5 | fs_type: ext4
6 | dataplane_interface: ens10
7 | management_interface: wg0
8 | default_storageclass:
9 | enabled: False
10 | name: px-dedicated-sc
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/portworx/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: portworx
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/portworx/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 | {%- from "kubernetes/map.jinja" import kubeadm with context -%}
3 |
4 | include:
5 | - .config
6 | - .namespace
7 | {%- if kubeadm.get('etcd', {'external': False}).external %}
8 | - .external-etcd-cert
9 | {%- endif %}
10 | - .install
11 | {%- if common.addons.get('kube_prometheus', {'enabled': False}).enabled %}
12 | - .prometheus
13 | {%- endif %}
14 | - .storageclass
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/portworx/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set portworx = salt['pillar.get']('kubernetes:storage:portworx', default=defaults['portworx'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/rook-ceph/config.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import rook_ceph with context %}
5 |
6 | {% set state = 'absent' %}
7 | {% if rook_ceph.enabled -%}
8 | {% set state = 'managed' -%}
9 | {% endif %}
10 |
11 | /srv/kubernetes/charts/rook-ceph:
12 | file.directory:
13 | - user: root
14 | - group: root
15 | - dir_mode: "0750"
16 | - makedirs: True
17 |
18 | /srv/kubernetes/manifests/rook-ceph:
19 | file.directory:
20 | - user: root
21 | - group: root
22 | - dir_mode: "0750"
23 | - makedirs: True
24 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/rook-ceph/defaults.yaml:
--------------------------------------------------------------------------------
1 | rook_ceph:
2 | enabled: true
3 | version: 1.11.5
4 | chart_version: 1.11.5
5 | api_version: ceph.rook.io/v1
6 | url: https://charts.rook.io/release
7 | ceph:
8 | version: 17.2.6
9 | toolbox:
10 | enabled: true
11 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/rook-ceph/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | apiVersion: v1
4 | kind: Namespace
5 | metadata:
6 | name: nats-io
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/rook-ceph/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/rook-ceph/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set rook_ceph = salt['pillar.get']('kubernetes:csi:rook_ceph', default=defaults['rook_ceph'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/rook-ceph/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import rook_ceph with context %}
5 |
6 | rook-repos:
7 | helm.repo_managed:
8 | {%- if rook_ceph.enabled %}
9 | - present:
10 | - name: rook-release
11 | url: {{ rook_ceph.url }}
12 | {%- else %}
13 | - absent:
14 | - rook-release
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/csi/rook-ceph/templates/values.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import rook_ceph with context %}
5 | {%- from "kubernetes/map.jinja" import common with context -%}
6 |
7 | image:
8 | tag: v{{ rook_ceph.version }}
9 |
10 | monitoring:
11 | {%- if common.addons.get('kube_prometheus', {'enabled': False}).enabled %}
12 | enabled: true
13 | {%- endif %}
14 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/helm/config.sls:
--------------------------------------------------------------------------------
1 | /srv/kubernetes/charts:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
--------------------------------------------------------------------------------
/srv/salt/kubernetes/helm/defaults.yaml:
--------------------------------------------------------------------------------
1 | helm:
2 | enabled: true
3 | version: 3.11.3
4 | source_hash: ca2d5d40d4cdfb9a3a6205dd803b5bc8def00bd2f13e5526c127e9b667974a89
5 | url: https://charts.helm.sh/stable
--------------------------------------------------------------------------------
/srv/salt/kubernetes/helm/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .install
4 | - .repo
--------------------------------------------------------------------------------
/srv/salt/kubernetes/helm/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set helm = salt['pillar.get']('kubernetes:common:helm', default=defaults['helm'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/helm/repo.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import helm with context %}
5 |
6 | helm-stable-repos:
7 | helm.repo_managed:
8 | {%- if helm.enabled %}
9 | - present:
10 | - name: stable
11 | url: {{ helm.url }}
12 | {%- else%}
13 | - absent:
14 | - stable
15 | {%- endif %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/ambassador/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 |
3 |
4 |
5 | kubectl apply -f https://getambassador.io/yaml/ambassador/ambassador-rbac.yaml
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/cert-manager/defaults.yaml:
--------------------------------------------------------------------------------
1 | cert_manager:
2 | enabled: true
3 | version: 1.11.1
4 | chart_version: 1.11.1
5 | api_version: cert-manager.io/v1
6 | url: https://charts.jetstack.io
7 | acme_email: user@example.com
8 | dns:
9 | enabled: False
10 | provider: cloudflare
11 | cloudflare:
12 | email: admin@example.com
13 | secret: 012345abcde012345abcde012345abcde012345abcde
14 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/cert-manager/files/servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: cert-manager
5 | namespace: cert-manager
6 | labels:
7 | app: cert-manager
8 | spec:
9 | endpoints:
10 | - targetPort: 9402
11 | path: /metrics
12 | interval: 60s
13 | scrapeTimeout: 30s
14 | selector:
15 | matchLabels:
16 | app: cert-manager
17 | namespaceSelector:
18 | matchNames:
19 | - cert-manager
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/cert-manager/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 |
3 | include:
4 | - .config
5 | - .repos
6 | - .install
7 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/cert-manager/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set cert_manager = salt['pillar.get']('kubernetes:common:ingress:cert_manager', default=defaults['cert_manager'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/cert-manager/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import cert_manager with context %}
5 |
6 | cert-manager-repos:
7 | helm.repo_managed:
8 | {%- if cert_manager.enabled %}
9 | - present:
10 | - name: jetstack
11 | url: {{ cert_manager.url }}
12 | {%- else %}
13 | - absent:
14 | - jetstack
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/cert-manager/templates/cloudflare.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import cert_manager with context %}
5 |
6 | apiVersion: v1
7 | kind: Secret
8 | metadata:
9 | name: cloudflare-api-token-secret
10 | namespace: cert-manager
11 | labels:
12 | app: cert-manager
13 | type: Opaque
14 | data:
15 | api-token: >-
16 | {{ cert_manager.dns.cloudflare.secret | base64_encode }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/cert-manager/templates/values.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import cert_manager with context %}
5 | {% from "kubernetes/map.jinja" import common with context -%}
6 |
7 | image:
8 | tag: v{{ cert_manager.version }}
9 |
10 |
11 | prometheus:
12 | enabled: true
13 | {%- if common.addons.get('kube_prometheus', {'enabled': False}).enabled %}
14 | servicemonitor:
15 | enabled: true
16 | namespace: cert-manager
17 | {%- endif %}
18 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/contour/defaults.yaml:
--------------------------------------------------------------------------------
1 | contour:
2 | enabled: true
3 | version: 1.24.4
4 | chart_version: 11.3.1
5 | api_version: projectcontour.io/v1
6 | url: https://charts.bitnami.com/bitnami
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/contour/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 |
3 | include:
4 | - .config
5 | - .repos
6 | - .install
7 | {%- if common.addons.get('cert_manager', {'enabled': False}).enabled %}
8 | - .certificate
9 | {%- endif%}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/contour/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set contour = salt['pillar.get']('kubernetes:common:ingress:contour', default=defaults['contour'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/contour/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import contour with context %}
5 |
6 | contour-repos:
7 | helm.repo_managed:
8 | {%- if contour.enabled %}
9 | - present:
10 | - name: bitnami
11 | url: {{ contour.url }}
12 | {%- else %}
13 | - absent:
14 | - bitnami
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/contour/templates/certificate.yaml.j2:
--------------------------------------------------------------------------------
1 | {%- set public_domain = pillar['public-domain'] -%}
2 |
3 | apiVersion: cert-manager.io/v1
4 | kind: Certificate
5 | metadata:
6 | name: contour-wildcard-certs
7 | namespace: projectcontour
8 | spec:
9 | commonName: '{{ public_domain }}'
10 | dnsNames:
11 | - "*.{{ public_domain }}"
12 | - "{{ public_domain }}"
13 | issuerRef:
14 | kind: ClusterIssuer
15 | name: letsencrypt-prod
16 | secretName: nginx-ingress-certs
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import common with context -%}
2 |
3 | include:
4 | - kubernetes.ingress.metallb
5 | {%- if common.addons.get('cert_manager', {'enabled': False}).enabled %}
6 | - kubernetes.ingress.cert-manager
7 | {%- endif %}
8 | {%- if common.addons.get('istio', {'enabled': False}).enabled %}
9 | - kubernetes.ingress.istio
10 | {%- endif %}
11 | {%- if common.addons.get('contour', {'enabled': False}).enabled %}
12 | - kubernetes.ingress.contour
13 | {%- endif %}
14 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/istio/defaults.yaml:
--------------------------------------------------------------------------------
1 | istio:
2 | version: 1.9.0
3 | tracing: jaeger
4 | prometheus_ingress_host: istio-prometheus
5 | tracing_ingress_host: istio-tracing
6 | telemetry_ingress_host: istio-telemetry
7 | grafana_ingress_host: istio-grafana
8 | kiali_ingress_host: kiali
9 | bookinfo_ingress_host: bookinfo
10 | source_hash: 51c9e23a7be9211c9fe0fc1083cae89975984e7a87646db6f42014765a986d19
11 | profile: demo
12 | cni: true
13 | telemetry: false
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/istio/files/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | istio-injection: enabled
6 | name: bookinfo
7 | ---
8 | apiVersion: v1
9 | kind: Namespace
10 | metadata:
11 | name: istio-system
12 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/istio/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .namespace
4 | - .install
5 | - .monitoring
6 | # - .certificate
7 | - .ingress
8 | - .demo
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/istio/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set istio = salt['pillar.get']('kubernetes:common:addons:istio', default=defaults['istio'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/istio/namespace.sls:
--------------------------------------------------------------------------------
1 | istio-namespace:
2 | file.managed:
3 | - require:
4 | - archive: /srv/kubernetes/manifests/istio
5 | - name: /srv/kubernetes/manifests/istio/namespace.yaml
6 | - source: salt://{{ tpldir }}/files/namespace.yaml
7 | - user: root
8 | - group: root
9 | - mode: "0644"
10 | - context:
11 | tpldir: {{ tpldir }}
12 | cmd.run:
13 | - watch:
14 | - file: /srv/kubernetes/manifests/istio/namespace.yaml
15 | - name: |
16 | kubectl apply -f /srv/kubernetes/manifests/istio/namespace.yaml
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/istio/templates/certificate.yaml.j2:
--------------------------------------------------------------------------------
1 | {%- set public_domain = pillar['public-domain'] -%}
2 | {%- from "kubernetes/map.jinja" import common with context -%}
3 |
4 | apiVersion: cert-manager.io/v1
5 | kind: Certificate
6 | metadata:
7 | name: istio-ingressgateway-certs
8 | namespace: istio-system
9 | spec:
10 | commonName: '*.{{ public_domain }}'
11 | dnsNames:
12 | - '{{ public_domain }}'
13 | - '*.{{ public_domain }}'
14 | issuerRef:
15 | kind: ClusterIssuer
16 | name: letsencrypt-prod
17 | secretName: istio-ingressgateway-certs
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/istio/templates/istio-config.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Get the `tplroot` from `tpldir` #}
5 | {% from tpldir ~ "/map.jinja" import istio with context %}
6 |
7 | apiVersion: install.istio.io/v1alpha1
8 | kind: IstioOperator
9 | metadata:
10 | namespace: istio-system
11 | name: example-istiocontrolplane
12 | spec:
13 | profile: {{ istio.profile }}
14 | components:
15 | cni:
16 | enabled: {{ istio.cni }}
17 | values:
18 | cni:
19 | excludeNamespaces:
20 | - istio-system
21 | - kube-system
22 | logLevel: info
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/metallb/config.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | /srv/kubernetes/charts/metallb:
5 | file.directory:
6 | - user: root
7 | - group: root
8 | - dir_mode: "0750"
9 | - makedirs: True
10 |
11 | /srv/kubernetes/manifests/metallb:
12 | file.directory:
13 | - user: root
14 | - group: root
15 | - dir_mode: "0750"
16 | - makedirs: True
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/metallb/defaults.yaml:
--------------------------------------------------------------------------------
1 | metallb:
2 | enabled: true
3 | version: 0.13.9
4 | chart_version: 0.13.9
5 | api_version: metallb.io/v1beta2
6 | url: https://metallb.github.io/metallb
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/metallb/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .config
3 | - .repos
4 | - .install
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/metallb/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set metallb = salt['pillar.get']('kubernetes:charts:metallb', default=defaults['metallb'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/metallb/repos.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {%- from tpldir ~ "/map.jinja" import metallb with context %}
5 |
6 | metallb-repos:
7 | helm.repo_managed:
8 | {%- if metallb.enabled %}
9 | - present:
10 | - name: metallb
11 | url: {{ metallb.url }}
12 | {%- else %}
13 | - absent:
14 | - metallb
15 | {%- endif %}
16 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/ingress/metallb/templates/ipaddresspool.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: metallb.io/v1beta1
2 | kind: IPAddressPool
3 | metadata:
4 | name: default-pool
5 | namespace: metallb-system
6 | spec:
7 | addresses:
8 | - 172.17.4.200-172.17.4.249
--------------------------------------------------------------------------------
/srv/salt/kubernetes/map.jinja:
--------------------------------------------------------------------------------
1 | {% set common = salt['pillar.get']('kubernetes:common') %}
2 | {% set etcd = salt['pillar.get']('kubernetes:etcd') %}
3 | {% set kubeadm = salt['pillar.get']('kubeadm') %}
4 | {% set storage = salt['pillar.get']('kubernetes:storage') %}
5 | {% set charts = salt['pillar.get']('kubernetes:charts') %}
6 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/edge/files/acme.json:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fjudith/saltstack-kubernetes/95f3e1e44297d94146f57bf09591e33af5885395/srv/salt/kubernetes/role/edge/files/acme.json
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/edge/kubeadm/defaults.yaml:
--------------------------------------------------------------------------------
1 | kubeadm:
2 | enabled: true
3 | kubernetesVersion: 1.26.4
4 | apiVersion: v1beta3
5 | token: "abcdef.0123456789abcdef"
6 | nodeToken: "123456.abcdefghij123456"
7 | criSocket: "unix:///run/containerd/containerd.sock"
8 | cgroupDriver: "systemd"
9 | ignorePreflightErrors: []
10 | bindPort: 6443
11 | caCertPath: "/etc/kubernetes/pki/ca.crt"
12 | timeout: '5m0s'
13 | kubeletExtraArgs:
14 | cgroup-driver: systemd
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/edge/kubeadm/init.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | include:
5 | - .osprep
6 | - .repo
7 | - .install
8 | - .kubeadm-join
9 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/edge/kubeadm/install.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import kubeadm with context %}
5 |
6 | {% set repoState = 'absent' %}
7 | {% if kubeadm.enabled %}
8 | {% set repoState = 'installed' %}
9 | {% endif %}
10 |
11 | kubectl:
12 | pkg.{{ repoState }}:
13 | - version: {{ kubeadm.kubernetesVersion }}-00
14 |
15 | kubelet:
16 | pkg.{{ repoState }}:
17 | - version: {{ kubeadm.kubernetesVersion }}-00
18 |
19 | kubeadm:
20 | pkg.{{ repoState }}:
21 | - version: {{ kubeadm.kubernetesVersion }}-00
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/edge/kubeadm/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Start imports #}
5 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
6 |
7 | {#- Merge in salt pillar #}
8 | {% set kubeadm = salt['pillar.get']('kubeadm', default=defaults['kubeadm'], merge=True) %}
9 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/clean.sls:
--------------------------------------------------------------------------------
1 | /var/lib/etcd:
2 | file.absent
3 |
4 | /var/lib/etcd/member:
5 | file.absent
6 |
7 | /etc/etcd:
8 | file.absent
9 |
10 | stop-etcd:
11 | service.dead:
12 | - name: etcd.service
13 |
14 | disable-etcd:
15 | service.disabled:
16 | - require:
17 | - service: stop-etcd
18 | - name: etcd.service
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/config.sls:
--------------------------------------------------------------------------------
1 | /etc/etcd/pki:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
7 |
8 | /etc/etcd/etcd.env:
9 | file.managed:
10 | - require:
11 | - file: /etc/etcd/pki
12 | - source: salt://{{ tpldir }}/templates/etcd.env.j2
13 | - user: root
14 | - group: root
15 | - mode: "0755"
16 | - template: jinja
17 | - context:
18 | tpldir: {{ tpldir }}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/defaults.yaml:
--------------------------------------------------------------------------------
1 | etcd:
2 | version: 3.5.8
3 | initial_cluster_token: b1483289
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/etcdadm/config.sls:
--------------------------------------------------------------------------------
1 | /etc/etcd/pki:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: True
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/etcdadm/defaults.yaml:
--------------------------------------------------------------------------------
1 | etcdadm:
2 | version: 0.1.3
3 | source_hash: 1cc781d15cb5994eb9918c9f6947a00481c83494e7fe86eb9aac8ffe70bdfa96
4 | client_version: 3.5.1
5 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/etcdadm/init.sls:
--------------------------------------------------------------------------------
1 | {%- set etcds = [] -%}
2 | {%- for key, value in salt["mine.get"](tgt="role:etcd", fun="network.get_hostname", tgt_type="grain")|dictsort(false, 'value') -%}
3 | {%- do etcds.append(value) -%}
4 | {%- endfor -%}
5 |
6 | include:
7 | - .config
8 | - .install
9 | {%- if grains['id'] == etcds|first %}
10 | - .etcdadm-init
11 | {% else %}
12 | - .etcdadm-join
13 | {%- endif %}
14 | - .patch
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/etcdadm/map.jinja:
--------------------------------------------------------------------------------
1 |
2 | # -*- coding: utf-8 -*-
3 | # vim: ft=jinja
4 |
5 | {#- Start imports #}
6 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
7 |
8 | {#- Merge in salt pillar #}
9 | {% set etcdadm = salt['pillar.get']('etcdadm', default=defaults['etcdadm'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/files/etcd.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=etcd
3 | Documentation=https://github.com/coreos/etcd
4 | Conflicts=etcd-member.service
5 | Conflicts=etcd2.service
6 |
7 | [Service]
8 | EnvironmentFile=/etc/etcd/etcd.env
9 | ExecStart=/usr/bin/etcd
10 |
11 | Type=notify
12 | TimeoutStartSec=0
13 | Restart=on-failure
14 | RestartSec=5s
15 |
16 | LimitNOFILE=65536
17 | Nice=-10
18 | IOSchedulingClass=best-effort
19 | IOSchedulingPriority=2
20 | MemoryLow=200M
21 |
22 | [Install]
23 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/init.sls:
--------------------------------------------------------------------------------
1 | {%- set etcds = [] -%}
2 | {%- for key, value in salt["mine.get"](tgt="role:etcd", fun="network.get_hostname", tgt_type="grain")|dictsort(false, 'value') -%}
3 | {%- do etcds.append(value) -%}
4 | {%- endfor -%}
5 |
6 | include:
7 | - .config
8 | - .certs
9 | - .install
10 | - .test
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/map.jinja:
--------------------------------------------------------------------------------
1 |
2 | # -*- coding: utf-8 -*-
3 | # vim: ft=jinja
4 |
5 | {#- Start imports #}
6 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
7 |
8 | {#- Merge in salt pillar #}
9 | {% set etcd = salt['pillar.get']('etcd', default=defaults['etcd'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/etcd/test.sls:
--------------------------------------------------------------------------------
1 | test-etcd-members:
2 | cmd.wait:
3 | - name: |
4 | systemctl restart etcd.service
5 |
6 | sleep {{ range(10, 30) | random }}
7 |
8 | alias ec="ETCDCTL_API=3 etcdctl --cacert /etc/etcd/pki/ca.crt --cert /etc/etcd/pki/server.crt --key /etc/etcd/pki/server.key"
9 | ec member list
10 | - retry:
11 | attempts: 60
12 | until: True
13 | interval: 5
14 | splay: 10
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/master/kubeadm/encryption.sls:
--------------------------------------------------------------------------------
1 | /etc/kubernetes/pki:
2 | file.directory:
3 | - user: root
4 | - group: root
5 | - dir_mode: "0750"
6 | - makedirs: true
7 |
8 | /etc/kubernetes/pki/encryption-config.yaml:
9 | file.managed:
10 | - require:
11 | - file: /etc/kubernetes/pki
12 | - source: salt://{{ tpldir }}/templates/encryption-config.yaml.j2
13 | - template: jinja
14 | - user: root
15 | - group: root
16 | - mode: "0644"
17 | - context:
18 | tpldir: {{ tpldir }}
19 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/master/kubeadm/install.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import kubeadm with context %}
5 |
6 | {% set repoState = 'absent' %}
7 | {% if kubeadm.enabled %}
8 | {% set repoState = 'installed' %}
9 | {% endif %}
10 |
11 | kubectl:
12 | pkg.{{ repoState }}:
13 | - version: {{ kubeadm.kubernetesVersion }}-00
14 |
15 | kubelet:
16 | pkg.{{ repoState }}:
17 | - version: {{ kubeadm.kubernetesVersion }}-00
18 |
19 | kubeadm:
20 | pkg.{{ repoState }}:
21 | - version: {{ kubeadm.kubernetesVersion }}-00
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/master/kubeadm/map.jinja:
--------------------------------------------------------------------------------
1 |
2 | # -*- coding: utf-8 -*-
3 | # vim: ft=jinja
4 |
5 | {#- Start imports #}
6 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
7 |
8 | {#- Merge in salt pillar #}
9 | {% set kubeadm = salt['pillar.get']('kubeadm', default=defaults['kubeadm'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/master/kubeadm/refresh-token.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import kubeadm with context %}
5 |
6 | kubeadm-node-token:
7 | cmd.run:
8 | - name: |
9 | /usr/bin/kubeadm --kubeconfig /etc/kubernetes/admin.conf token delete {{ kubeadm.token }}
10 | /usr/bin/kubeadm --kubeconfig /etc/kubernetes/admin.conf token create {{ kubeadm.token }}
11 | /usr/bin/kubeadm --config /root/kubeadm-config.yaml init phase upload-certs --upload-certs
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/master/kubeadm/templates/encryption-config.yaml.j2:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import kubeadm with context %}
5 |
6 | kind: EncryptionConfig
7 | apiVersion: v1
8 | resources:
9 | - resources:
10 | - secrets
11 | providers:
12 | - aescbc:
13 | keys:
14 | - name: key1
15 | secret: "{{ kubeadm.encryptionKey }}"
16 | - identity: {}
17 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/node/kubeadm/defaults.yaml:
--------------------------------------------------------------------------------
1 | kubeadm:
2 | enabled: true
3 | kubernetesVersion: 1.26.4
4 | apiVersion: v1beta3
5 | token: "abcdef.0123456789abcdef"
6 | nodeToken: "123456.abcdefghij123456"
7 | criSocket: "unix:///run/containerd/containerd.sock"
8 | cgroupDriver: "systemd"
9 | ignorePreflightErrors: []
10 | bindPort: 6443
11 | caCertPath: "/etc/kubernetes/pki/ca.crt"
12 | timeout: '5m0s'
13 | kubeletExtraArgs:
14 | cgroup-driver: systemd
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/node/kubeadm/init.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | include:
5 | - .osprep
6 | - .repo
7 | - .install
8 | - .kubeadm-join
9 |
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/node/kubeadm/install.sls:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% from tpldir ~ "/map.jinja" import kubeadm with context %}
5 |
6 | {% set repoState = 'absent' %}
7 | {% if kubeadm.enabled %}
8 | {% set repoState = 'installed' %}
9 | {% endif %}
10 |
11 | kubectl:
12 | pkg.{{ repoState }}:
13 | - version: {{ kubeadm.kubernetesVersion }}-00
14 |
15 | kubelet:
16 | pkg.{{ repoState }}:
17 | - version: {{ kubeadm.kubernetesVersion }}-00
18 |
19 | kubeadm:
20 | pkg.{{ repoState }}:
21 | - version: {{ kubeadm.kubernetesVersion }}-00
--------------------------------------------------------------------------------
/srv/salt/kubernetes/role/node/kubeadm/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {#- Start imports #}
5 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
6 |
7 | {#- Merge in salt pillar #}
8 | {% set kubeadm = salt['pillar.get']('kubeadm', default=defaults['kubeadm'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/loopback-iscsi/config.sls:
--------------------------------------------------------------------------------
1 | {%- from tpldir ~ "/map.jinja" import loopback_iscsi with context -%}
2 |
3 | {{ loopback_iscsi.path }}:
4 | file.directory:
5 | - user: root
6 | - group: root
7 | - dir_mode: "0750"
8 | - makedirs: True
9 |
10 | /etc/tgt/conf.d/loopback-iscsi.conf:
11 | file.managed:
12 | - source: salt://{{ tpldir }}/templates/loopback-iscsi.conf.j2
13 | - user: root
14 | - group: root
15 | - mode: "0755"
16 | - template: jinja
17 | - context:
18 | tpldir: {{ tpldir }}
--------------------------------------------------------------------------------
/srv/salt/loopback-iscsi/init.sls:
--------------------------------------------------------------------------------
1 | {%- from "kubernetes/map.jinja" import storage with context -%}
2 |
3 | include:
4 | - .osprep
5 | - .config
6 | - .install
7 | # - .label
8 | {%- if storage.get('longhorn', {'enabled': False}).enabled %}
9 | - .mount
10 | {%- endif %}
--------------------------------------------------------------------------------
/srv/salt/loopback-iscsi/label.sls:
--------------------------------------------------------------------------------
1 | {%- from tpldir ~ "/map.jinja" import loopback_iscsi with context -%}
2 |
3 |
4 | {%- for file in loopback_iscsi.files %}
5 | mklabel-{{ file.lun_name }}:
6 | module.run:
7 | - watch:
8 | - service: open-iscsi.service
9 | - partition.mklabel:
10 | - device: /dev/disk/by-path/ip-{{ loopback_iscsi.initiator_address }}:{{ loopback_iscsi.initiator_port }}-iscsi-iqn.0000-00.target.local:{{ file.lun_name }}-lun-1
11 | - label_type: gpt
12 | {%- endfor %}
--------------------------------------------------------------------------------
/srv/salt/loopback-iscsi/map.jinja:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # vim: ft=jinja
3 |
4 | {% import_yaml tpldir ~ "/defaults.yaml" or {} as defaults %}
5 |
6 | {#- Merge in salt pillar #}
7 | {% set loopback_iscsi = salt['pillar.get']('kubernetes:storage:loopback_iscsi', default=defaults['loopback_iscsi'], merge=True) %}
--------------------------------------------------------------------------------
/srv/salt/loopback-iscsi/osprep.sls:
--------------------------------------------------------------------------------
1 | iscsi:
2 | pkg.latest:
3 | - pkgs:
4 | - open-iscsi
5 | - tgt
6 | - targetcli-fb
7 |
--------------------------------------------------------------------------------
/terraform/.gitignore:
--------------------------------------------------------------------------------
1 | .terraform
2 | terraform.tfvars
3 | terraform.tfstate
4 | terraform.tfstate.backup
5 | .terraform.tfstate.lock.info
6 | public_ips.txt
7 | private_ips.txt
8 | ssl
9 | *.backup
10 | backend.tf
11 | *.tfvars
--------------------------------------------------------------------------------
/terraform/backend.tf.exemple:
--------------------------------------------------------------------------------
1 | terraform {
2 | backend "s3" {
3 | bucket = "mybucket"
4 | key = "terraform.tfstate"
5 | region = "fr-par"
6 | endpoint = "https://s3.fr-par.scw.cloud"
7 | access_key = "__BACKEND_ACCESS_KEY__".
8 | secret_key = "__BACKEND_SECRET_KEY__"
9 | skip_credentials_validation = true
10 | skip_region_validation = true
11 | }
12 | }
--------------------------------------------------------------------------------
/terraform/dns/cloudflare/outputs.tf:
--------------------------------------------------------------------------------
1 | output "domains" {
2 | value = cloudflare_record.hosts.*.hostname
3 | }
--------------------------------------------------------------------------------
/terraform/dns/cloudflare/variables.tf:
--------------------------------------------------------------------------------
1 | variable "dns_count" {}
2 |
3 | variable "email" {}
4 |
5 | variable "token" {}
6 |
7 | variable "domain" {}
8 |
9 | variable "zone_id" {}
10 | variable "hostnames" {}
11 |
12 | variable "public_ips" {}
--------------------------------------------------------------------------------
/terraform/dns/cloudflare/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | cloudflare = {
4 | source = "cloudflare/cloudflare"
5 | version = "~> 2.0"
6 | }
7 | }
8 | required_version = ">= 0.13"
9 | }
10 |
--------------------------------------------------------------------------------
/terraform/dns/ovh/outputs.tf:
--------------------------------------------------------------------------------
1 | output "domains" {
2 | value = ovh_domain_zone_record.hosts.*.target
3 | }
--------------------------------------------------------------------------------
/terraform/dns/ovh/variables.tf:
--------------------------------------------------------------------------------
1 | variable "dns_count" {}
2 |
3 | variable "endpoint" {
4 | default = "ovh-eu"
5 | }
6 |
7 | variable "application_key" {}
8 |
9 | variable "application_secret" {}
10 |
11 | variable "consumer_key" {}
12 |
13 | variable "domain" {}
14 |
15 | variable "hostnames" {
16 | type = list
17 | }
18 |
19 | variable "public_ips" {
20 | type = list
21 | }
--------------------------------------------------------------------------------
/terraform/dns/ovh/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | ovh = {
4 | source = "ovh/ovh"
5 | version = "0.10.0"
6 | }
7 | }
8 | required_version = ">= 0.13"
9 | }
10 |
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/README.md:
--------------------------------------------------------------------------------
1 | # Cfssl module
2 |
3 | This module generates the various certificates to secure the Kubernetes cluster traffic.
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/admin-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "admin",
3 | "hosts": [],
4 | "key": {
5 | "algo": "ecdsa",
6 | "size": 256
7 | },
8 | "names": [
9 | {
10 | "O": "system:masters",
11 | "OU": "Kubernetes cluster"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/ca-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "signing": {
3 | "default": {
4 | "expiry": "12000h"
5 | },
6 | "profiles": {
7 | "kubernetes": {
8 | "usages": [
9 | "signing",
10 | "key encipherment",
11 | "server auth",
12 | "client auth"
13 | ],
14 | "expiry": "12000h"
15 | }
16 | }
17 | }
18 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/ca-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kubernetes",
3 | "key": {
4 | "algo": "ecdsa",
5 | "size": 256
6 | },
7 | "names": [
8 | {
9 | "O": "Kubernetes",
10 | "OU": "Kubernetes cluster"
11 | }
12 | ]
13 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/dashboard-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:serviceaccount:kube-system:kubernetes-dashboard",
3 | "hosts": [],
4 | "key": {
5 | "algo": "ecdsa",
6 | "size": 256
7 | },
8 | "names": [
9 | {
10 | "O": "system:serviceaccounts:kube-system",
11 | "OU": "Kubernetes cluster"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/etcd-ca-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "signing": {
3 | "default": {
4 | "expiry": "12000h"
5 | },
6 | "profiles": {
7 | "kubernetes": {
8 | "usages": [
9 | "signing",
10 | "key encipherment",
11 | "server auth",
12 | "client auth"
13 | ],
14 | "expiry": "12000h"
15 | }
16 | }
17 | }
18 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/etcd-ca-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kubernetes",
3 | "key": {
4 | "algo": "ecdsa",
5 | "size": 256
6 | },
7 | "names": [
8 | {
9 | "O": "Kubernetes",
10 | "OU": "etcd cluster"
11 | }
12 | ]
13 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/etcd-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "etcd",
3 | "hosts": [
4 | "${etcd_private_ips}",
5 | "127.0.0.1"
6 | ],
7 | "key": {
8 | "algo": "ecdsa",
9 | "size": 256
10 | },
11 | "names": [
12 | {
13 | "O": "k8s",
14 | "OU": "CoreOS Kubernetes"
15 | }
16 | ]
17 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/fanneld-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "flanneld",
3 | "hosts": [],
4 | "key": {
5 | "algo": "ecdsa",
6 | "size": 256
7 | },
8 | "names": [
9 | {
10 | "O": "Kubernetes",
11 | "OU": "Kubernetes cluster"
12 | }
13 | ]
14 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/kube-apiserver-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "kubernetes",
3 | "hosts": [
4 | "127.0.0.1",
5 | "${master_private_ips}",
6 | "localhost",
7 | "kubernetes",
8 | "kubernetes.default",
9 | "kubernetes.default.svc",
10 | "kubernetes.default.svc.cluster",
11 | "kubernetes.default.svc.cluster.local"
12 | ],
13 | "key": {
14 | "algo": "ecdsa",
15 | "size": 256
16 | },
17 | "names": [
18 | {
19 | "O": "Kubernetes",
20 | "OU": "Kubernetes cluster"
21 | }
22 | ]
23 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/kube-proxy-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:kube-proxy",
3 | "key": {
4 | "algo": "ecdsa",
5 | "size": 256
6 | },
7 | "names": [
8 | {
9 | "O": "system:node-proxier",
10 | "OU": "Kubernetes cluster"
11 | }
12 | ]
13 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/master-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:node:${master_private_ip}",
3 | "hosts": [
4 | "127.0.0.1",
5 | "${master_private_ips}",
6 | "localhost",
7 | "kubernetes",
8 | "kubernetes.default",
9 | "kubernetes.default.svc",
10 | "kubernetes.default.svc.cluster",
11 | "kubernetes.default.svc.cluster.local"
12 | ],
13 | "key": {
14 | "algo": "rsa",
15 | "size": 2048
16 | },
17 | "names": [
18 | {
19 | "O": "system:nodes",
20 | "OU": "CoreOS Kubernetes"
21 | }
22 | ]
23 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/templates/node-csr.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "system:node:${IP_ADDRESS}",
3 | "key": {
4 | "algo": "ecdsa",
5 | "size": 256
6 | },
7 | "names": [
8 | {
9 | "O": "system:nodes",
10 | "OU": "Kubernetes cluster"
11 | }
12 | ]
13 | }
--------------------------------------------------------------------------------
/terraform/encryption/cfssl/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | null = {
4 | source = "hashicorp/null"
5 | }
6 | }
7 | required_version = ">= 0.13"
8 | }
9 |
--------------------------------------------------------------------------------
/terraform/management/salt-master/README.md:
--------------------------------------------------------------------------------
1 | # Salt-Master module
2 |
3 | This module manages the installation of the Salt server component
--------------------------------------------------------------------------------
/terraform/management/salt-master/templates/master.conf:
--------------------------------------------------------------------------------
1 | master: localhost
2 | timeout: 30
3 | use_superseded:
4 | - module.run
--------------------------------------------------------------------------------
/terraform/management/salt-master/variables.tf:
--------------------------------------------------------------------------------
1 | variable "master_count" {}
2 |
3 | variable "bastion_host" {}
4 |
5 | variable "ssh_user" {
6 | default = "root"
7 | }
8 |
9 | variable "ssh_private_key" {
10 | default = "~/.ssh/id_rsa.insecure"
11 | }
12 |
13 | variable "connections" {
14 | type = list
15 | }
16 |
17 | variable "salt_master_host" {}
--------------------------------------------------------------------------------
/terraform/management/salt-master/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | null = {
4 | source = "hashicorp/null"
5 | }
6 | template = {
7 | source = "hashicorp/template"
8 | }
9 | }
10 | required_version = ">= 0.13"
11 | }
12 |
--------------------------------------------------------------------------------
/terraform/management/salt-minion/README.md:
--------------------------------------------------------------------------------
1 | # Salt-Minion module
2 |
3 | This module manages the installation of the Salt client component.
--------------------------------------------------------------------------------
/terraform/management/salt-minion/templates/master.conf:
--------------------------------------------------------------------------------
1 | master: ${salt_master_host}
2 | use_superseded:
3 | - module.run
--------------------------------------------------------------------------------
/terraform/management/salt-minion/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | null = {
4 | source = "hashicorp/null"
5 | }
6 | template = {
7 | source = "hashicorp/template"
8 | }
9 | }
10 | required_version = ">= 0.13"
11 | }
12 |
--------------------------------------------------------------------------------
/terraform/outputs.tf:
--------------------------------------------------------------------------------
1 | output "hostnames" {
2 | value = module.provider.hostnames
3 | }
4 |
5 | output "edge_hostnames" {
6 | value = module.provider.edge_hostnames
7 | }
8 |
9 | output "private_ips" {
10 | value = module.provider.private_ips
11 | }
12 |
13 | output "vpn_ips" {
14 | value = module.wireguard.vpn_ips
15 | }
16 |
17 | output "public_ips" {
18 | value = module.provider.public_ips
19 | }
--------------------------------------------------------------------------------
/terraform/provider/hcloud/README.md:
--------------------------------------------------------------------------------
1 | # Hetzner CLoud Module
2 |
3 | The following modules creates virtual machine instances in Hetzner Cloud.
--------------------------------------------------------------------------------
/terraform/provider/hcloud/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | hcloud = {
4 | source = "hetznercloud/hcloud"
5 | version = "1.24.0"
6 | }
7 | }
8 | required_version = ">= 0.13"
9 | }
--------------------------------------------------------------------------------
/terraform/provider/scaleway/README.md:
--------------------------------------------------------------------------------
1 | # Scaleway Module
2 |
3 | The following modules creates virtual machine instances in Scaleway.
--------------------------------------------------------------------------------
/terraform/provider/scaleway/scaleway.tf:
--------------------------------------------------------------------------------
1 | provider "scaleway" {
2 | organization = var.organization
3 | token = var.token
4 | region = var.region
5 | version = "~> 1.4"
6 | }
7 |
8 | data "scaleway_image" "ubuntu" {
9 | architecture = var.architecture
10 | name = var.image
11 | }
12 |
13 | data "scaleway_bootscript" "bootscript" {
14 | architecture = var.architecture
15 | name_filter = "mainline 4.15.11 rev1"
16 | }
17 |
18 | resource "scaleway_ip" "public_ip" {
19 | count = var.etcd_count
20 | }
--------------------------------------------------------------------------------
/terraform/provider/scaleway/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | scaleway = {
4 | source = "scaleway/scaleway"
5 | }
6 | }
7 | required_version = ">= 0.13"
8 | }
9 |
--------------------------------------------------------------------------------
/terraform/routing/README.md:
--------------------------------------------------------------------------------
1 | # Routing module
2 |
3 | This module manages to set the Wireguard interface to use the Edge servers as default gateway.
--------------------------------------------------------------------------------
/terraform/routing/scripts/wireguard_config.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | sed -i -r "s|^(AllowedIps\s\=\s${gateway}\/32)|\1,0.0.0.0/1,128.0.0.0/1|g" /etc/wireguard/${vpn_interface}.conf
5 |
6 | systemctl restart wg-quick@${vpn_interface}
7 |
--------------------------------------------------------------------------------
/terraform/routing/variables.tf:
--------------------------------------------------------------------------------
1 | variable route_count {}
2 |
3 | variable gateway {}
4 |
5 | variable "vpn_interface" {
6 | default = "wg0"
7 | }
8 |
9 | variable "bastion_host" {}
10 |
11 | variable "ssh_user" {
12 | default = "root"
13 | }
14 |
15 | variable "ssh_private_key" {
16 | default = "~/.ssh/id_rsa.insecure"
17 | }
18 |
19 | variable "connections" {
20 | type = list
21 | }
--------------------------------------------------------------------------------
/terraform/routing/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | null = {
4 | source = "hashicorp/null"
5 | }
6 | template = {
7 | source = "hashicorp/template"
8 | }
9 | }
10 | required_version = ">= 0.13"
11 | }
12 |
--------------------------------------------------------------------------------
/terraform/security/proxy-exceptions/README.md:
--------------------------------------------------------------------------------
1 | # Proxy-exceptions Module
2 |
3 | This module sets the addresses allowed to bypass the edge server.
--------------------------------------------------------------------------------
/terraform/security/proxy-exceptions/variables.tf:
--------------------------------------------------------------------------------
1 | variable "host_count" {}
2 |
3 | variable "vpn_iprange" {}
4 |
5 | variable "overlay_cidr" {}
6 |
7 | variable "service_cidr" {}
8 |
9 | variable "bastion_host" {}
10 |
11 | variable "ssh_user" {
12 | default = "root"
13 | }
14 |
15 | variable "ssh_private_key" {
16 | default = "~/.ssh/id_rsa.insecure"
17 | }
18 |
19 | variable "connections" {
20 | type = list
21 | }
22 |
--------------------------------------------------------------------------------
/terraform/security/proxy-exceptions/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | null = {
4 | source = "hashicorp/null"
5 | }
6 | }
7 | required_version = ">= 0.13"
8 | }
9 |
--------------------------------------------------------------------------------
/terraform/security/ufw/edge/README.md:
--------------------------------------------------------------------------------
1 | # UFW Kubernetes edge module
2 |
3 | This module configures rules and activates the firewall on Kubernetes edge servers.
--------------------------------------------------------------------------------
/terraform/security/ufw/etcd/README.md:
--------------------------------------------------------------------------------
1 | # UFW Etcd module
2 |
3 | This module configures rules and activates the firewall on Etcd servers.
--------------------------------------------------------------------------------
/terraform/security/ufw/etcd/files/etcd.ufw:
--------------------------------------------------------------------------------
1 | # Install into /etc/ufw/applications.d/ and run 'ufw app update'
2 |
3 | # https://docs.saltstack.com/en/latest/topics/tutorials/firewall.html
4 | [salt]
5 | title=salt
6 | description=fast and powerful configuration management and remote execution
7 | ports=4505,4506/tcp
8 |
9 | # https://docs.projectcalico.org/getting-started/kubernetes/requirementss
10 | [etcd]
11 | title=Etcd
12 | description=Etcd client request
13 | ports=2379/tcp
14 |
15 | [etcd-peer]
16 | title=Etcd cluster
17 | description=Etcd cluster peering
18 | ports=2380/tcp
--------------------------------------------------------------------------------
/terraform/security/ufw/etcd/variables.tf:
--------------------------------------------------------------------------------
1 | variable "host_count" {}
2 |
3 | variable "bastion_host" {}
4 | variable "overlay_cidr" {}
5 |
6 | variable "ssh_user" {
7 | default = "root"
8 | }
9 |
10 | variable "ssh_private_key" {
11 | default = "~/.ssh/id_rsa.insecure"
12 | }
13 |
14 | variable "connections" {
15 | type = list
16 | }
17 |
18 | variable "private_interface" {
19 | type = string
20 | }
21 |
22 | variable "docker_interface" {
23 | type = string
24 | }
25 |
26 | variable "vpn_interface" {
27 | type = string
28 | }
29 |
30 | variable "vpn_port" {
31 | type = string
32 | }
--------------------------------------------------------------------------------
/terraform/security/ufw/etcd/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | null = {
4 | source = "hashicorp/null"
5 | }
6 | template = {
7 | source = "hashicorp/template"
8 | }
9 | }
10 | required_version = ">= 0.13"
11 | }
12 |
--------------------------------------------------------------------------------
/terraform/security/ufw/master/README.md:
--------------------------------------------------------------------------------
1 | # UFW Kubernetes Master module
2 |
3 | This module configures rules and activates the firewall on Kubernetes Master servers.
--------------------------------------------------------------------------------
/terraform/security/ufw/master/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | null = {
4 | source = "hashicorp/null"
5 | }
6 | template = {
7 | source = "hashicorp/template"
8 | }
9 | }
10 | required_version = ">= 0.13"
11 | }
12 |
--------------------------------------------------------------------------------
/terraform/security/ufw/node/README.md:
--------------------------------------------------------------------------------
1 | # UFW Kubernetes Node module
2 |
3 | This module configures rules and activates the firewall on Kubernetes Node servers.
--------------------------------------------------------------------------------
/terraform/security/ufw/node/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | null = {
4 | source = "hashicorp/null"
5 | }
6 | template = {
7 | source = "hashicorp/template"
8 | }
9 | }
10 | required_version = ">= 0.13"
11 | }
12 |
--------------------------------------------------------------------------------
/terraform/security/wireguard/README.md:
--------------------------------------------------------------------------------
1 | # Wireguard Module
2 |
3 | This module manages the installation of the Wireguard VPN mesh for the Kubernetes control plane.
--------------------------------------------------------------------------------
/terraform/security/wireguard/scripts/gen_keys.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 |
4 | private_key=$(wg genkey)
5 | public_key=$(echo $private_key | wg pubkey)
6 |
7 | jq -n --arg private_key "$private_key" \
8 | --arg public_key "$public_key" \
9 | '{"private_key":$private_key,"public_key":$public_key}'
--------------------------------------------------------------------------------
/terraform/security/wireguard/templates/interface.conf:
--------------------------------------------------------------------------------
1 | [Interface]
2 | Address = ${address},${addressv6}
3 | PrivateKey = ${private_key}
4 | ListenPort = ${port}
5 |
6 | ${peers}
--------------------------------------------------------------------------------
/terraform/security/wireguard/templates/overlay-route.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Overlay network route for Wireguard
3 | After=wg-quick@wg0.service
4 |
5 | [Service]
6 | Type=oneshot
7 | User=root
8 | ExecStart=/sbin/ip route add ${overlay_cidr} dev wg0 src ${address}
9 |
10 | [Install]
11 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/terraform/security/wireguard/templates/peer.conf:
--------------------------------------------------------------------------------
1 | [Peer]
2 | PublicKey = ${public_key}
3 | AllowedIps = ${allowed_ips}
4 | Endpoint = ${endpoint}:${port}
--------------------------------------------------------------------------------
/terraform/security/wireguard/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | external = {
4 | source = "hashicorp/external"
5 | }
6 | null = {
7 | source = "hashicorp/null"
8 | }
9 | template = {
10 | source = "hashicorp/template"
11 | }
12 | }
13 | required_version = ">= 0.13"
14 | }
15 |
--------------------------------------------------------------------------------