├── .gitattributes
├── .github
├── ISSUE_TEMPLATE
│ ├── bug.md
│ └── enhancement.md
├── PULL_REQUEST_TEMPLATE.md
├── release.yaml
├── renovate.json5
└── workflows
│ ├── e2e.yaml
│ ├── images.yaml
│ ├── release-notes.yaml
│ ├── renovate.yaml
│ └── verify.yaml
├── .gitignore
├── .golangci.yaml
├── .run
├── experiment (kind).run.xml
├── shard (kind).run.xml
├── sharder (kind).run.xml
└── webhosting-operator (kind).run.xml
├── LICENSE
├── Makefile
├── README.md
├── cmd
├── checksum-controller
│ ├── main.go
│ └── reconciler.go
└── sharder
│ ├── app
│ ├── app.go
│ └── options.go
│ └── main.go
├── config
├── README.md
├── certificate
│ ├── certificate.yaml
│ ├── issuer.yaml
│ └── kustomization.yaml
├── crds
│ ├── kustomization.yaml
│ ├── namespace.yaml
│ └── sharding.timebertt.dev_controllerrings.yaml
├── default
│ └── kustomization.yaml
├── monitoring
│ ├── kustomization.yaml
│ ├── sharder
│ │ ├── kustomization.yaml
│ │ ├── prometheus_rbac.yaml
│ │ └── sharder_servicemonitor.yaml
│ └── sharding-exporter
│ │ ├── clusterrole.yaml
│ │ ├── clusterrolebinding.yaml
│ │ ├── config.yaml
│ │ ├── deployment.yaml
│ │ ├── kustomization.yaml
│ │ ├── rbac-proxy_clusterrole.yaml
│ │ ├── rbac-proxy_clusterrolebinding.yaml
│ │ ├── service.yaml
│ │ ├── serviceaccount.yaml
│ │ └── servicemonitor.yaml
├── rbac
│ ├── kustomization.yaml
│ ├── leader_election.yaml
│ ├── metrics_auth.yaml
│ ├── pprof_reader.yaml
│ ├── role.yaml
│ ├── rolebinding.yaml
│ └── serviceaccount.yaml
└── sharder
│ ├── config.yaml
│ ├── deployment.yaml
│ ├── kustomization.yaml
│ ├── poddisruptionbudget.yaml
│ └── service.yaml
├── docs
├── README.md
├── assets
│ ├── architecture.svg
│ ├── comparison-cpu.svg
│ ├── comparison-load.svg
│ ├── comparison-memory.svg
│ ├── comparison-network.svg
│ ├── scale-out-capacity.svg
│ ├── scale-out-load.svg
│ └── scale-out-slis.svg
├── design.md
├── development.md
├── evaluation.md
├── getting-started.md
├── implement-sharding.md
├── installation.md
└── monitoring.md
├── go.mod
├── go.sum
├── go.work
├── go.work.sum
├── hack
├── boilerplate.go.txt
├── ci-common.sh
├── ci-e2e-kind.sh
├── config
│ ├── README.md
│ ├── cert-manager
│ │ ├── kustomization.yaml
│ │ ├── patch-mutatingwebhook.yaml
│ │ ├── patch-validatingwebhook.yaml
│ │ └── resources
│ │ │ ├── cluster-issuer.yaml
│ │ │ └── kustomization.yaml
│ ├── certificates
│ │ └── host
│ │ │ ├── config.json
│ │ │ ├── generate.sh
│ │ │ ├── kustomization.yaml
│ │ │ ├── webhook-ca-key.pem
│ │ │ ├── webhook-ca.json
│ │ │ ├── webhook-ca.pem
│ │ │ ├── webhook-server-key.pem
│ │ │ ├── webhook-server.json
│ │ │ └── webhook-server.pem
│ ├── checksum-controller
│ │ ├── controller
│ │ │ ├── deployment.yaml
│ │ │ ├── kustomization.yaml
│ │ │ ├── rbac.yaml
│ │ │ └── serviceaccount.yaml
│ │ └── controllerring
│ │ │ ├── controllerring.yaml
│ │ │ ├── kustomization.yaml
│ │ │ └── sharder_rbac.yaml
│ ├── external-dns
│ │ ├── kustomization.yaml
│ │ ├── namespace.yaml
│ │ └── patch-deployment.yaml
│ ├── ingress-nginx
│ │ ├── default
│ │ │ ├── kustomization.yaml
│ │ │ ├── patch_controller_resources.yaml
│ │ │ └── patch_default_ingress_class.yaml
│ │ ├── kind
│ │ │ ├── kustomization.yaml
│ │ │ └── patch_service_nodeport.yaml
│ │ └── shoot
│ │ │ ├── certificate.yaml
│ │ │ ├── kustomization.yaml
│ │ │ └── patch_service.yaml
│ ├── kind-config.yaml
│ ├── kyverno
│ │ └── kustomization.yaml
│ ├── monitoring
│ │ ├── crds
│ │ │ ├── 0alertmanagerConfigCustomResourceDefinition.yaml
│ │ │ ├── 0alertmanagerCustomResourceDefinition.yaml
│ │ │ ├── 0podmonitorCustomResourceDefinition.yaml
│ │ │ ├── 0probeCustomResourceDefinition.yaml
│ │ │ ├── 0prometheusCustomResourceDefinition.yaml
│ │ │ ├── 0prometheusagentCustomResourceDefinition.yaml
│ │ │ ├── 0prometheusruleCustomResourceDefinition.yaml
│ │ │ ├── 0scrapeconfigCustomResourceDefinition.yaml
│ │ │ ├── 0servicemonitorCustomResourceDefinition.yaml
│ │ │ ├── 0thanosrulerCustomResourceDefinition.yaml
│ │ │ ├── README.md
│ │ │ └── kustomization.yaml
│ │ ├── default
│ │ │ ├── dashboards
│ │ │ │ ├── client-go.json
│ │ │ │ ├── controller-details.json
│ │ │ │ └── controller-runtime.json
│ │ │ ├── ensure-admin-password.sh
│ │ │ ├── grafana_ingress.yaml
│ │ │ ├── kustomization.yaml
│ │ │ ├── namespace.yaml
│ │ │ ├── patch_grafana_admin.yaml
│ │ │ ├── patch_grafana_networkpolicy.yaml
│ │ │ ├── patch_kubelet_metrics.yaml
│ │ │ ├── patch_kubestatemetrics.yaml
│ │ │ ├── patch_kubestatemetrics_servicemonitor.yaml
│ │ │ ├── patch_prometheus.yaml
│ │ │ └── rbac-proxy_clusterrole.yaml
│ │ ├── grafana-sidecar
│ │ │ ├── dashboards-sidecar.yaml
│ │ │ ├── kustomization.yaml
│ │ │ ├── patch_grafana_sidecar.yaml
│ │ │ ├── sidecar_clusterrole.yaml
│ │ │ └── sidecar_clusterrolebinding.yaml
│ │ ├── kube-prometheus
│ │ │ ├── README.md
│ │ │ ├── blackboxExporter-clusterRole.yaml
│ │ │ ├── blackboxExporter-clusterRoleBinding.yaml
│ │ │ ├── blackboxExporter-configuration.yaml
│ │ │ ├── blackboxExporter-deployment.yaml
│ │ │ ├── blackboxExporter-networkPolicy.yaml
│ │ │ ├── blackboxExporter-service.yaml
│ │ │ ├── blackboxExporter-serviceAccount.yaml
│ │ │ ├── blackboxExporter-serviceMonitor.yaml
│ │ │ ├── grafana-config.yaml
│ │ │ ├── grafana-dashboardDatasources.yaml
│ │ │ ├── grafana-dashboardDefinitions.yaml
│ │ │ ├── grafana-dashboardSources.yaml
│ │ │ ├── grafana-deployment.yaml
│ │ │ ├── grafana-networkPolicy.yaml
│ │ │ ├── grafana-prometheusRule.yaml
│ │ │ ├── grafana-service.yaml
│ │ │ ├── grafana-serviceAccount.yaml
│ │ │ ├── grafana-serviceMonitor.yaml
│ │ │ ├── kubePrometheus-prometheusRule.yaml
│ │ │ ├── kubeStateMetrics-clusterRole.yaml
│ │ │ ├── kubeStateMetrics-clusterRoleBinding.yaml
│ │ │ ├── kubeStateMetrics-deployment.yaml
│ │ │ ├── kubeStateMetrics-networkPolicy.yaml
│ │ │ ├── kubeStateMetrics-prometheusRule.yaml
│ │ │ ├── kubeStateMetrics-service.yaml
│ │ │ ├── kubeStateMetrics-serviceAccount.yaml
│ │ │ ├── kubeStateMetrics-serviceMonitor.yaml
│ │ │ ├── kubernetesControlPlane-prometheusRule.yaml
│ │ │ ├── kubernetesControlPlane-serviceMonitorApiserver.yaml
│ │ │ ├── kubernetesControlPlane-serviceMonitorCoreDNS.yaml
│ │ │ ├── kubernetesControlPlane-serviceMonitorKubeControllerManager.yaml
│ │ │ ├── kubernetesControlPlane-serviceMonitorKubeScheduler.yaml
│ │ │ ├── kubernetesControlPlane-serviceMonitorKubelet.yaml
│ │ │ ├── kustomization.yaml
│ │ │ ├── nodeExporter-clusterRole.yaml
│ │ │ ├── nodeExporter-clusterRoleBinding.yaml
│ │ │ ├── nodeExporter-daemonset.yaml
│ │ │ ├── nodeExporter-networkPolicy.yaml
│ │ │ ├── nodeExporter-prometheusRule.yaml
│ │ │ ├── nodeExporter-service.yaml
│ │ │ ├── nodeExporter-serviceAccount.yaml
│ │ │ ├── nodeExporter-serviceMonitor.yaml
│ │ │ ├── prometheus-clusterRole.yaml
│ │ │ ├── prometheus-clusterRoleBinding.yaml
│ │ │ ├── prometheus-networkPolicy.yaml
│ │ │ ├── prometheus-prometheus.yaml
│ │ │ ├── prometheus-prometheusRule.yaml
│ │ │ ├── prometheus-roleBindingConfig.yaml
│ │ │ ├── prometheus-roleBindingSpecificNamespaces.yaml
│ │ │ ├── prometheus-roleConfig.yaml
│ │ │ ├── prometheus-roleSpecificNamespaces.yaml
│ │ │ ├── prometheus-service.yaml
│ │ │ ├── prometheus-serviceAccount.yaml
│ │ │ ├── prometheus-serviceMonitor.yaml
│ │ │ ├── prometheusAdapter-clusterRole.yaml
│ │ │ ├── prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml
│ │ │ ├── prometheusAdapter-clusterRoleBinding.yaml
│ │ │ ├── prometheusAdapter-clusterRoleBindingDelegator.yaml
│ │ │ ├── prometheusAdapter-clusterRoleServerResources.yaml
│ │ │ ├── prometheusAdapter-configMap.yaml
│ │ │ ├── prometheusAdapter-deployment.yaml
│ │ │ ├── prometheusAdapter-networkPolicy.yaml
│ │ │ ├── prometheusAdapter-podDisruptionBudget.yaml
│ │ │ ├── prometheusAdapter-roleBindingAuthReader.yaml
│ │ │ ├── prometheusAdapter-service.yaml
│ │ │ ├── prometheusAdapter-serviceAccount.yaml
│ │ │ ├── prometheusAdapter-serviceMonitor.yaml
│ │ │ ├── prometheusOperator-clusterRole.yaml
│ │ │ ├── prometheusOperator-clusterRoleBinding.yaml
│ │ │ ├── prometheusOperator-deployment.yaml
│ │ │ ├── prometheusOperator-networkPolicy.yaml
│ │ │ ├── prometheusOperator-prometheusRule.yaml
│ │ │ ├── prometheusOperator-service.yaml
│ │ │ ├── prometheusOperator-serviceAccount.yaml
│ │ │ └── prometheusOperator-serviceMonitor.yaml
│ │ ├── shoot
│ │ │ ├── kustomization.yaml
│ │ │ ├── patch_prometheus.yaml
│ │ │ └── storageclass.yaml
│ │ └── update.sh
│ ├── policy
│ │ ├── ci
│ │ │ ├── kustomization.yaml
│ │ │ └── no-requests.yaml
│ │ ├── controlplane
│ │ │ ├── etcd-main.yaml
│ │ │ ├── kube-apiserver-scale.yaml
│ │ │ ├── kube-apiserver.yaml
│ │ │ ├── kube-controller-manager.yaml
│ │ │ ├── kustomization.yaml
│ │ │ └── tests
│ │ │ │ ├── kube-apiserver-scale-awake
│ │ │ │ ├── kyverno-test.yaml
│ │ │ │ ├── scale.yaml
│ │ │ │ ├── scale_expected.yaml
│ │ │ │ └── variables.yaml
│ │ │ │ ├── kube-apiserver-scale-hibernated
│ │ │ │ ├── kyverno-test.yaml
│ │ │ │ ├── scale.yaml
│ │ │ │ └── variables.yaml
│ │ │ │ └── kube-apiserver
│ │ │ │ ├── kube-apiserver-awake.yaml
│ │ │ │ ├── kube-apiserver-awake_expected.yaml
│ │ │ │ ├── kube-apiserver-hibernated.yaml
│ │ │ │ ├── kube-apiserver-null.yaml
│ │ │ │ ├── kube-apiserver-null_expected.yaml
│ │ │ │ └── kyverno-test.yaml
│ │ └── shoot
│ │ │ ├── kustomization.yaml
│ │ │ └── sharder-scheduling.yaml
│ ├── profiling
│ │ ├── ensure-admin-password.sh
│ │ ├── kustomization.yaml
│ │ ├── parca_config.yaml
│ │ ├── parca_ingress.yaml
│ │ ├── parca_pvc.yaml
│ │ ├── patch_deployment_pvc.yaml
│ │ └── rbac_sharder.yaml
│ ├── sharder
│ │ ├── devel
│ │ │ └── kustomization.yaml
│ │ └── host
│ │ │ └── config.yaml
│ ├── shoot.yaml
│ └── skaffold.yaml
├── prepare-image-metadata.sh
├── test-e2e.env
├── test-e2e.sh
├── test-integration.env
├── test-integration.sh
├── test.sh
├── tools.go
├── tools.mk
├── tools
│ └── bin
│ │ └── .gitkeep
├── update-codegen.sh
└── vgopath-setup.sh
├── pkg
├── apis
│ ├── config
│ │ ├── doc.go
│ │ └── v1alpha1
│ │ │ ├── defaults.go
│ │ │ ├── defaults_test.go
│ │ │ ├── doc.go
│ │ │ ├── register.go
│ │ │ ├── types.go
│ │ │ ├── v1alpha1_suite_test.go
│ │ │ ├── zz_generated.deepcopy.go
│ │ │ └── zz_generated.defaults.go
│ └── sharding
│ │ ├── doc.go
│ │ └── v1alpha1
│ │ ├── constants.go
│ │ ├── constants_test.go
│ │ ├── doc.go
│ │ ├── register.go
│ │ ├── types_controllerring.go
│ │ ├── types_controllerring_test.go
│ │ ├── v1alpha1_suite_test.go
│ │ └── zz_generated.deepcopy.go
├── controller
│ ├── add.go
│ ├── controllerring
│ │ ├── add.go
│ │ ├── add_test.go
│ │ ├── controllerring_suite_test.go
│ │ ├── reconciler.go
│ │ └── reconciler_test.go
│ ├── sharder
│ │ ├── add.go
│ │ ├── reconciler.go
│ │ ├── reconciler_test.go
│ │ └── sharder_suite_test.go
│ └── shardlease
│ │ ├── add.go
│ │ ├── add_test.go
│ │ ├── reconciler.go
│ │ └── shardlease_suite_test.go
├── shard
│ ├── controller
│ │ ├── builder.go
│ │ ├── builder_test.go
│ │ ├── controller_suite_test.go
│ │ ├── predicate.go
│ │ ├── predicate_test.go
│ │ ├── reconciler.go
│ │ └── reconciler_test.go
│ └── lease
│ │ ├── lease.go
│ │ ├── lease_suite_test.go
│ │ └── lease_test.go
├── sharding
│ ├── consistenthash
│ │ ├── benchmark_test.go
│ │ ├── consistenthash_suite_test.go
│ │ ├── ring.go
│ │ └── ring_test.go
│ ├── handler
│ │ ├── controllerring.go
│ │ ├── controllerring_test.go
│ │ ├── handler_suite_test.go
│ │ ├── lease.go
│ │ └── lease_test.go
│ ├── key
│ │ ├── key.go
│ │ ├── key_suite_test.go
│ │ └── key_test.go
│ ├── leases
│ │ ├── leases_suite_test.go
│ │ ├── shards.go
│ │ ├── shards_test.go
│ │ ├── state.go
│ │ ├── state_test.go
│ │ ├── times.go
│ │ └── times_test.go
│ ├── metrics
│ │ └── metrics.go
│ ├── predicate
│ │ ├── controllerring.go
│ │ ├── controllerring_test.go
│ │ ├── lease.go
│ │ ├── lease_test.go
│ │ └── predicate_suite_test.go
│ └── ring
│ │ ├── ring.go
│ │ ├── ring_suite_test.go
│ │ └── ring_test.go
├── utils
│ ├── client
│ │ ├── client_suite_test.go
│ │ ├── options.go
│ │ ├── options_test.go
│ │ └── scheme.go
│ ├── errors
│ │ ├── errors_suite_test.go
│ │ ├── multi.go
│ │ └── multi_test.go
│ ├── healthz
│ │ ├── cache.go
│ │ ├── cache_test.go
│ │ └── healthz_suite_test.go
│ ├── pager
│ │ ├── pager.go
│ │ ├── pager_suite_test.go
│ │ └── pager_test.go
│ ├── routes
│ │ └── profiling.go
│ ├── strings.go
│ ├── strings_test.go
│ ├── test
│ │ ├── envtest.go
│ │ ├── matchers
│ │ │ ├── condition.go
│ │ │ ├── errors.go
│ │ │ ├── matchers.go
│ │ │ └── object.go
│ │ ├── object.go
│ │ └── paths.go
│ └── utils_suite_test.go
└── webhook
│ ├── add.go
│ └── sharder
│ ├── add.go
│ ├── add_test.go
│ ├── handler.go
│ ├── handler_test.go
│ ├── metrics.go
│ └── sharder_suite_test.go
├── test
├── e2e
│ ├── checksum_controller_test.go
│ └── e2e_suite_test.go
└── integration
│ ├── shard
│ ├── controller
│ │ ├── controller_suite_test.go
│ │ ├── controller_test.go
│ │ └── reconciler.go
│ └── lease
│ │ ├── lease_suite_test.go
│ │ └── lease_test.go
│ └── sharder
│ ├── controller
│ ├── controllerring
│ │ ├── controllerring_suite_test.go
│ │ └── controllerring_test.go
│ ├── sharder
│ │ ├── sharder_suite_test.go
│ │ └── sharder_test.go
│ └── shardlease
│ │ ├── shardlease_suite_test.go
│ │ └── shardlease_test.go
│ └── webhook
│ └── sharder
│ ├── sharder_suite_test.go
│ └── sharder_test.go
└── webhosting-operator
├── PROJECT
├── README.md
├── cmd
├── experiment
│ └── main.go
├── measure
│ ├── main.go
│ └── test.yaml
├── samples-generator
│ └── main.go
└── webhosting-operator
│ └── main.go
├── config
├── experiment
│ ├── base
│ │ ├── job.yaml
│ │ ├── kustomization.yaml
│ │ ├── namespace.yaml
│ │ ├── prometheus_rbac.yaml
│ │ ├── rbac.yaml
│ │ ├── service.yaml
│ │ └── servicemonitor.yaml
│ ├── basic
│ │ └── kustomization.yaml
│ └── scale-out
│ │ └── kustomization.yaml
├── manager
│ ├── base
│ │ ├── kustomization.yaml
│ │ ├── manager.yaml
│ │ ├── metrics_auth.yaml
│ │ ├── namespace.yaml
│ │ └── service.yaml
│ ├── controllerring
│ │ ├── controllerring.yaml
│ │ ├── kustomization.yaml
│ │ ├── manager_patch.yaml
│ │ └── sharder_rbac.yaml
│ ├── crds
│ │ ├── kustomization.yaml
│ │ ├── kustomizeconfig.yaml
│ │ ├── webhosting.timebertt.dev_themes.yaml
│ │ └── webhosting.timebertt.dev_websites.yaml
│ ├── devel
│ │ └── kustomization.yaml
│ ├── overlays
│ │ ├── debug
│ │ │ ├── kustomization.yaml
│ │ │ └── manager_debug_patch.yaml
│ │ ├── default
│ │ │ └── kustomization.yaml
│ │ ├── devel
│ │ │ └── kustomization.yaml
│ │ ├── non-sharded-devel
│ │ │ └── kustomization.yaml
│ │ ├── non-sharded
│ │ │ ├── kustomization.yaml
│ │ │ └── manager_patch.yaml
│ │ └── shoot
│ │ │ ├── default
│ │ │ └── kustomization.yaml
│ │ │ ├── devel
│ │ │ └── kustomization.yaml
│ │ │ ├── non-sharded-devel
│ │ │ └── kustomization.yaml
│ │ │ └── non-sharded
│ │ │ └── kustomization.yaml
│ ├── rbac
│ │ ├── kustomization.yaml
│ │ ├── leader_election_role.yaml
│ │ ├── leader_election_role_binding.yaml
│ │ ├── parca_rbac.yaml
│ │ ├── role.yaml
│ │ ├── role_binding.yaml
│ │ ├── service_account.yaml
│ │ ├── theme_editor_role.yaml
│ │ ├── theme_viewer_role.yaml
│ │ ├── website_editor_role.yaml
│ │ └── website_viewer_role.yaml
│ └── with-dns
│ │ ├── config.yaml
│ │ ├── kustomization.yaml
│ │ └── manager_patch.yaml
├── monitoring
│ ├── default
│ │ ├── dashboards
│ │ │ ├── experiments.json
│ │ │ ├── sharding.json
│ │ │ └── webhosting.json
│ │ └── kustomization.yaml
│ └── webhosting-operator
│ │ ├── kustomization.yaml
│ │ ├── prometheus_rbac.yaml
│ │ ├── prometheusrule.yaml
│ │ └── servicemonitor.yaml
├── policy
│ ├── experiment-scheduling.yaml
│ ├── guaranteed-resources.yaml
│ ├── kustomization.yaml
│ ├── scale-up-worker-experiment.yaml
│ └── webhosting-operator-scheduling.yaml
└── samples
│ ├── kustomization.yaml
│ ├── project_namespace.yaml
│ ├── theme_exciting.yaml
│ ├── theme_lame.yaml
│ ├── website_kubecon.yaml
│ ├── website_library.yaml
│ └── website_museum.yaml
├── go.mod
├── go.sum
├── pkg
├── apis
│ ├── config
│ │ ├── doc.go
│ │ └── v1alpha1
│ │ │ ├── defaults.go
│ │ │ ├── doc.go
│ │ │ ├── register.go
│ │ │ ├── types.go
│ │ │ ├── zz_generated.deepcopy.go
│ │ │ └── zz_generated.defaults.go
│ └── webhosting
│ │ ├── doc.go
│ │ └── v1alpha1
│ │ ├── constants.go
│ │ ├── doc.go
│ │ ├── register.go
│ │ ├── types_theme.go
│ │ ├── types_website.go
│ │ └── zz_generated.deepcopy.go
├── controllers
│ └── webhosting
│ │ ├── common.go
│ │ ├── suite_test.go
│ │ ├── templates
│ │ ├── index.go
│ │ ├── index.tmpl
│ │ ├── index_test.go
│ │ ├── internal
│ │ │ └── examples.go
│ │ ├── nginx.conf.tmpl
│ │ ├── nginx.go
│ │ ├── nginx_test.go
│ │ ├── templates_suite_test.go
│ │ └── testserver
│ │ │ └── server.go
│ │ └── website_controller.go
├── experiment
│ ├── generator
│ │ ├── options.go
│ │ ├── project.go
│ │ ├── reconciler.go
│ │ ├── theme.go
│ │ ├── utils.go
│ │ └── website.go
│ ├── scenario.go
│ ├── scenario
│ │ ├── all
│ │ │ └── all.go
│ │ ├── base
│ │ │ └── base.go
│ │ ├── basic
│ │ │ └── basic.go
│ │ └── scale-out
│ │ │ └── scale_out.go
│ └── tracker
│ │ ├── tracker.go
│ │ └── website.go
├── metrics
│ ├── add.go
│ ├── theme.go
│ └── website.go
└── utils
│ ├── kubernetes.go
│ └── utils.go
└── test
└── e2e
├── e2e_suite_test.go
└── webhosting_operator_test.go
/.gitattributes:
--------------------------------------------------------------------------------
1 | docs/assets/*.jpg filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug Report
3 | about: Report a bug encountered while using this project
4 | labels: bug
5 | ---
6 |
7 | **What happened**:
8 |
9 | **What you expected to happen**:
10 |
11 | **How to reproduce it (as minimally and precisely as possible)**:
12 |
13 | **Anything else we need to know?**:
14 |
15 | **Environment**:
16 |
17 | - kubernetes-controller-sharding version:
18 | - Kubernetes version:
19 | - Others:
20 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/enhancement.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Enhancement Request
3 | about: Suggest an enhancement to this project
4 | labels: enhancement
5 | ---
6 |
7 | **What would you like to be added**:
8 |
9 | **Why is this needed**:
10 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | **What this PR does / why we need it**:
2 |
3 | **Which issue(s) this PR fixes**:
4 | Fixes #
5 |
6 | **Special notes for your reviewer**:
7 |
--------------------------------------------------------------------------------
/.github/release.yaml:
--------------------------------------------------------------------------------
1 | changelog:
2 | exclude:
3 | labels:
4 | - no-release-note
5 | categories:
6 | - title: ⚠️ Breaking Changes
7 | labels:
8 | - breaking
9 | - title: ✨ Features
10 | labels:
11 | - enhancement
12 | - title: 🐛 Bug Fixes
13 | labels:
14 | - bug
15 | - title: 📖 Documentation
16 | labels:
17 | - documentation
18 | - title: 🧹 Cleanups
19 | labels:
20 | - cleanup
21 | - title: 🤖 Dependencies
22 | labels:
23 | - dependencies
24 | - title: ℹ️ Other Changes
25 | labels:
26 | - "*"
27 |
--------------------------------------------------------------------------------
/.github/workflows/e2e.yaml:
--------------------------------------------------------------------------------
1 | name: e2e
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | tags:
8 | - v*
9 | paths-ignore:
10 | - "**.md"
11 | pull_request:
12 |
13 | jobs:
14 | e2e-kind:
15 | runs-on: ubuntu-latest
16 | env:
17 | ARTIFACTS: artifacts
18 |
19 | steps:
20 | - uses: actions/checkout@v4
21 | - uses: actions/setup-go@v5
22 | with:
23 | go-version-file: go.mod
24 | - run: make ci-e2e-kind
25 | - uses: actions/upload-artifact@v4
26 | if: always()
27 | with:
28 | name: e2e-artifacts
29 | path: artifacts
30 | if-no-files-found: error
31 |
--------------------------------------------------------------------------------
/.github/workflows/release-notes.yaml:
--------------------------------------------------------------------------------
1 | name: release-notes
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | workflow_dispatch: {}
8 |
9 | jobs:
10 | release-notes:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 | - name: Draft release notes
15 | run: |
16 | set -o errexit
17 | set -o nounset
18 | set -o pipefail
19 | set -x
20 |
21 | latest_tag="$(gh release view --json tagName --jq .tagName)"
22 |
23 | major="$(echo "$latest_tag" | cut -d. -f1)"
24 | minor="$(echo "$latest_tag" | cut -d. -f2)"
25 | new_tag="$major.$((minor+1)).0"
26 |
27 | if [ "$(gh release view "$new_tag" --json isDraft --jq .isDraft)" = true ] ; then
28 | # clean up previous draft release
29 | gh release delete -y "$new_tag"
30 | fi
31 |
32 | gh release create "$new_tag" --draft --generate-notes --notes-start-tag="${latest_tag%.*}.0"
33 | env:
34 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
35 |
--------------------------------------------------------------------------------
/.github/workflows/verify.yaml:
--------------------------------------------------------------------------------
1 | name: verify
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | tags:
8 | - v*
9 | pull_request:
10 |
11 | jobs:
12 | verify:
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - uses: actions/checkout@v4
17 | - uses: actions/setup-go@v5
18 | with:
19 | go-version-file: go.mod
20 | - run: make verify
21 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.secret*
2 | .envrc
3 | hack/kind_kubeconfig.yaml
4 | .gitguardian.yaml
5 | .ko.yaml
6 |
7 | # Binaries for programs and plugins
8 | *.exe
9 | *.exe~
10 | *.dll
11 | *.so
12 | *.dylib
13 | bin
14 | testbin/*
15 |
16 | # Test binary, build with `go test -c`
17 | *.test
18 |
19 | # Output of the go coverage tool, specifically when used with LiteIDE
20 | *.out
21 |
22 | # editor and IDE settings
23 | .idea
24 | .vscode
25 | *.swp
26 | *.swo
27 | *~
28 |
--------------------------------------------------------------------------------
/.run/experiment (kind).run.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/.run/shard (kind).run.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/.run/sharder (kind).run.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/.run/webhosting-operator (kind).run.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/cmd/sharder/main.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package main
18 |
19 | import (
20 | "fmt"
21 | "os"
22 |
23 | "sigs.k8s.io/controller-runtime/pkg/manager/signals"
24 |
25 | "github.com/timebertt/kubernetes-controller-sharding/cmd/sharder/app"
26 | )
27 |
28 | func main() {
29 | if err := app.NewCommand().ExecuteContext(signals.SetupSignalHandler()); err != nil {
30 | fmt.Println(err)
31 | os.Exit(1)
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/config/README.md:
--------------------------------------------------------------------------------
1 | # config
2 |
3 | This directory hosts manifests for deploying the sharding components.
4 | Manifests of components for the development setup should be hosted in [`hack/config`](../hack/config) instead.
5 | I.e., this directory should only contain manifests that are useful for others wanting to reuse the sharding components in their setup.
6 |
--------------------------------------------------------------------------------
/config/certificate/certificate.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cert-manager.io/v1
2 | kind: Certificate
3 | metadata:
4 | name: webhook-server
5 | spec:
6 | issuerRef:
7 | name: selfsigned
8 | commonName: sharding:sharder:webhook
9 | dnsNames:
10 | - sharder.sharding-system
11 | - sharder.sharding-system.svc
12 | - sharder.sharding-system.svc.cluster.local
13 | secretName: webhook-server
14 |
--------------------------------------------------------------------------------
/config/certificate/issuer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cert-manager.io/v1
2 | kind: Issuer
3 | metadata:
4 | name: selfsigned
5 | spec:
6 | selfSigned: {}
7 |
--------------------------------------------------------------------------------
/config/certificate/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1alpha1
2 | kind: Component
3 |
4 | namespace: sharding-system
5 |
6 | labels:
7 | - includeSelectors: true
8 | pairs:
9 | app.kubernetes.io/name: controller-sharding
10 |
11 | resources:
12 | - certificate.yaml
13 | - issuer.yaml
14 |
15 | patches:
16 | - patch: |
17 | apiVersion: apps/v1
18 | kind: Deployment
19 | metadata:
20 | name: sharder
21 | namespace: sharding-system
22 | spec:
23 | template:
24 | spec:
25 | containers:
26 | - name: sharder
27 | volumeMounts:
28 | - name: cert
29 | mountPath: /tmp/k8s-webhook-server/serving-certs
30 | volumes:
31 | - name: cert
32 | secret:
33 | secretName: webhook-server
34 |
--------------------------------------------------------------------------------
/config/crds/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | labels:
5 | - includeSelectors: true
6 | pairs:
7 | app.kubernetes.io/name: controller-sharding
8 |
9 | resources:
10 | - namespace.yaml
11 | - sharding.timebertt.dev_controllerrings.yaml
12 |
--------------------------------------------------------------------------------
/config/crds/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: sharding-system
5 |
--------------------------------------------------------------------------------
/config/default/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../crds
6 | - ../sharder
7 |
8 | components:
9 | - ../certificate
10 |
--------------------------------------------------------------------------------
/config/monitoring/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - sharder
6 | - sharding-exporter
7 |
--------------------------------------------------------------------------------
/config/monitoring/sharder/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: sharding-system
5 |
6 | resources:
7 | - sharder_servicemonitor.yaml
8 | # provide prometheus running in namespace "monitoring" with the permissions required for service discovery in namespace
9 | # "sharding-system"
10 | - prometheus_rbac.yaml
11 |
--------------------------------------------------------------------------------
/config/monitoring/sharder/prometheus_rbac.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: prometheus
7 | app.kubernetes.io/instance: k8s
8 | app.kubernetes.io/name: prometheus
9 | name: prometheus-k8s-service-discovery
10 | rules:
11 | - apiGroups:
12 | - ""
13 | resources:
14 | - services
15 | - endpoints
16 | - pods
17 | verbs:
18 | - get
19 | - list
20 | - watch
21 | ---
22 | apiVersion: rbac.authorization.k8s.io/v1
23 | kind: RoleBinding
24 | metadata:
25 | labels:
26 | app.kubernetes.io/component: prometheus
27 | app.kubernetes.io/instance: k8s
28 | app.kubernetes.io/name: prometheus
29 | name: prometheus-k8s-service-discovery
30 | roleRef:
31 | apiGroup: rbac.authorization.k8s.io
32 | kind: Role
33 | name: prometheus-k8s-service-discovery
34 | subjects:
35 | - kind: ServiceAccount
36 | name: prometheus-k8s
37 | namespace: monitoring
38 |
--------------------------------------------------------------------------------
/config/monitoring/sharder/sharder_servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: sharder
5 | labels:
6 | app.kubernetes.io/name: controller-sharding
7 | app.kubernetes.io/component: sharder
8 | spec:
9 | jobLabel: app.kubernetes.io/component
10 | endpoints:
11 | - path: /metrics
12 | port: metrics
13 | scheme: https
14 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
15 | honorLabels: true
16 | interval: 10s
17 | scrapeTimeout: 10s
18 | tlsConfig:
19 | insecureSkipVerify: true
20 | relabelings:
21 | - action: labelmap
22 | regex: "__meta_kubernetes_pod_label_label_prometheus_io_(.*)"
23 | replacement: "${1}"
24 | selector:
25 | matchLabels:
26 | app.kubernetes.io/name: controller-sharding
27 | app.kubernetes.io/component: sharder
28 |
--------------------------------------------------------------------------------
/config/monitoring/sharding-exporter/clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: sharding:exporter
5 | rules:
6 | - apiGroups:
7 | - apiextensions.k8s.io
8 | resources:
9 | - customresourcedefinitions
10 | verbs:
11 | - get
12 | - list
13 | - watch
14 | - apiGroups:
15 | - coordination.k8s.io
16 | resources:
17 | - leases
18 | verbs:
19 | - get
20 | - list
21 | - watch
22 | - apiGroups:
23 | - sharding.timebertt.dev
24 | resources:
25 | - controllerrings
26 | verbs:
27 | - get
28 | - list
29 | - watch
30 |
--------------------------------------------------------------------------------
/config/monitoring/sharding-exporter/clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: sharding:exporter
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: sharding:exporter
9 | subjects:
10 | - kind: ServiceAccount
11 | name: sharding-exporter
12 |
--------------------------------------------------------------------------------
/config/monitoring/sharding-exporter/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: monitoring
5 |
6 | generatorOptions:
7 | disableNameSuffixHash: true
8 |
9 | labels:
10 | - includeSelectors: true
11 | pairs:
12 | app.kubernetes.io/name: controller-sharding
13 | app.kubernetes.io/component: sharding-exporter
14 |
15 | resources:
16 | - clusterrole.yaml
17 | - clusterrolebinding.yaml
18 | - rbac-proxy_clusterrole.yaml
19 | - rbac-proxy_clusterrolebinding.yaml
20 | - serviceaccount.yaml
21 | - service.yaml
22 | - deployment.yaml
23 | - servicemonitor.yaml
24 |
25 | configMapGenerator:
26 | - name: sharding-exporter-config
27 | files:
28 | - config.yaml
29 |
--------------------------------------------------------------------------------
/config/monitoring/sharding-exporter/rbac-proxy_clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: sharding:exporter:rbac-proxy
5 | rules:
6 | - apiGroups:
7 | - authentication.k8s.io
8 | resources:
9 | - tokenreviews
10 | verbs:
11 | - create
12 | - apiGroups:
13 | - authorization.k8s.io
14 | resources:
15 | - subjectaccessreviews
16 | verbs:
17 | - create
18 |
--------------------------------------------------------------------------------
/config/monitoring/sharding-exporter/rbac-proxy_clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: sharding:exporter:rbac-proxy
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: sharding:exporter:rbac-proxy
9 | subjects:
10 | - kind: ServiceAccount
11 | name: sharding-exporter
12 |
--------------------------------------------------------------------------------
/config/monitoring/sharding-exporter/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: sharding-exporter
5 | spec:
6 | clusterIP: None
7 | ports:
8 | - name: https-main
9 | port: 8443
10 | targetPort: https-main
11 | - name: https-self
12 | port: 9443
13 | targetPort: https-self
14 |
--------------------------------------------------------------------------------
/config/monitoring/sharding-exporter/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: sharding-exporter
5 | automountServiceAccountToken: false
6 |
--------------------------------------------------------------------------------
/config/monitoring/sharding-exporter/servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: sharding-exporter
5 | spec:
6 | jobLabel: app.kubernetes.io/component
7 | endpoints:
8 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
9 | honorLabels: true
10 | interval: 10s
11 | port: https-main
12 | relabelings:
13 | - action: labeldrop
14 | regex: (pod|service|endpoint|namespace)
15 | scheme: https
16 | scrapeTimeout: 10s
17 | tlsConfig:
18 | insecureSkipVerify: true
19 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
20 | interval: 30s
21 | port: https-self
22 | scheme: https
23 | tlsConfig:
24 | insecureSkipVerify: true
25 | selector:
26 | matchLabels:
27 | app.kubernetes.io/name: controller-sharding
28 | app.kubernetes.io/component: sharding-exporter
29 |
--------------------------------------------------------------------------------
/config/rbac/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - serviceaccount.yaml
6 | - leader_election.yaml
7 | - metrics_auth.yaml
8 | - role.yaml
9 | - rolebinding.yaml
10 | - pprof_reader.yaml
11 |
12 | patches:
13 | # This is a workaround for controller-gen not being able to handle colons in the role name option.
14 | - target:
15 | kind: ClusterRole
16 | name: sharder
17 | patch: |
18 | - op: replace
19 | path: /metadata/name
20 | value: sharding:sharder
21 |
--------------------------------------------------------------------------------
/config/rbac/leader_election.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | name: sharding:sharder:leader-election
6 | rules:
7 | - apiGroups:
8 | - coordination.k8s.io
9 | resources:
10 | - leases
11 | verbs:
12 | - get
13 | - create
14 | - update
15 | - apiGroups:
16 | - ""
17 | resources:
18 | - events
19 | verbs:
20 | - create
21 | - patch
22 | ---
23 | apiVersion: rbac.authorization.k8s.io/v1
24 | kind: RoleBinding
25 | metadata:
26 | name: sharding:sharder:leader-election
27 | roleRef:
28 | apiGroup: rbac.authorization.k8s.io
29 | kind: Role
30 | name: sharding:sharder:leader-election
31 | subjects:
32 | - kind: ServiceAccount
33 | name: sharder
34 | namespace: sharding-system
35 |
--------------------------------------------------------------------------------
/config/rbac/metrics_auth.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: sharding:metrics-auth
6 | rules:
7 | - apiGroups:
8 | - authentication.k8s.io
9 | resources:
10 | - tokenreviews
11 | verbs:
12 | - create
13 | - apiGroups:
14 | - authorization.k8s.io
15 | resources:
16 | - subjectaccessreviews
17 | verbs:
18 | - create
19 | ---
20 | apiVersion: rbac.authorization.k8s.io/v1
21 | kind: ClusterRoleBinding
22 | metadata:
23 | name: sharding:metrics-auth
24 | roleRef:
25 | apiGroup: rbac.authorization.k8s.io
26 | kind: ClusterRole
27 | name: sharding:metrics-auth
28 | subjects:
29 | - kind: ServiceAccount
30 | name: sharder
31 | namespace: sharding-system
32 |
--------------------------------------------------------------------------------
/config/rbac/pprof_reader.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: sharding:sharder:pprof-reader
6 | rules:
7 | - nonResourceURLs:
8 | - "/debug/pprof/allocs"
9 | - "/debug/pprof/block"
10 | - "/debug/pprof/goroutine"
11 | - "/debug/pprof/heap"
12 | - "/debug/pprof/mutex"
13 | - "/debug/pprof/profile"
14 | - "/debug/pprof/symbol"
15 | - "/debug/pprof/threadcreate"
16 | - "/debug/pprof/trace"
17 | verbs:
18 | - get
19 |
--------------------------------------------------------------------------------
/config/rbac/role.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: sharder
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - events
11 | verbs:
12 | - create
13 | - patch
14 | - apiGroups:
15 | - ""
16 | resources:
17 | - namespaces
18 | verbs:
19 | - get
20 | - list
21 | - watch
22 | - apiGroups:
23 | - admissionregistration.k8s.io
24 | resources:
25 | - mutatingwebhookconfigurations
26 | verbs:
27 | - create
28 | - patch
29 | - apiGroups:
30 | - coordination.k8s.io
31 | resources:
32 | - leases
33 | verbs:
34 | - delete
35 | - get
36 | - list
37 | - patch
38 | - update
39 | - watch
40 | - apiGroups:
41 | - sharding.timebertt.dev
42 | resources:
43 | - controllerrings
44 | verbs:
45 | - get
46 | - list
47 | - watch
48 | - apiGroups:
49 | - sharding.timebertt.dev
50 | resources:
51 | - controllerrings/status
52 | verbs:
53 | - patch
54 | - update
55 |
--------------------------------------------------------------------------------
/config/rbac/rolebinding.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: sharding:sharder
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: sharding:sharder
10 | subjects:
11 | - kind: ServiceAccount
12 | name: sharder
13 | namespace: sharding-system
14 |
--------------------------------------------------------------------------------
/config/rbac/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: sharder
5 | automountServiceAccountToken: false
6 |
--------------------------------------------------------------------------------
/config/sharder/config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: config.sharding.timebertt.dev/v1alpha1
2 | kind: SharderConfig
3 | webhook:
4 | config:
5 | annotations:
6 | # Technically, this belongs to the certificate component. It doesn't hurt to add this by default though.
7 | # Kustomize doesn't allow merging config files in ConfigMaps. Hence, keep the full default config here.
8 | cert-manager.io/inject-ca-from: sharding-system/webhook-server
9 |
--------------------------------------------------------------------------------
/config/sharder/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: sharding-system
5 |
6 | generatorOptions:
7 | disableNameSuffixHash: true
8 |
9 | labels:
10 | - includeSelectors: true
11 | pairs:
12 | app.kubernetes.io/name: controller-sharding
13 |
14 | images:
15 | - name: sharder
16 | newName: ghcr.io/timebertt/kubernetes-controller-sharding/sharder
17 | newTag: latest
18 |
19 | resources:
20 | - deployment.yaml
21 | - poddisruptionbudget.yaml
22 | - service.yaml
23 | - ../rbac
24 |
25 | configMapGenerator:
26 | - name: sharder-config
27 | options:
28 | labels:
29 | app.kubernetes.io/component: sharder
30 | files:
31 | - config=../sharder/config.yaml
32 |
--------------------------------------------------------------------------------
/config/sharder/poddisruptionbudget.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: sharder
6 | name: sharder
7 | spec:
8 | maxUnavailable: 1
9 | selector:
10 | matchLabels:
11 | app.kubernetes.io/component: sharder
12 |
--------------------------------------------------------------------------------
/config/sharder/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: sharder
5 | namespace: sharding-system
6 | labels:
7 | app.kubernetes.io/component: sharder
8 | spec:
9 | type: ClusterIP
10 | selector:
11 | app.kubernetes.io/component: sharder
12 | ports:
13 | - port: 443
14 | name: webhook
15 | protocol: TCP
16 | targetPort: webhook
17 | - port: 8080
18 | name: metrics
19 | protocol: TCP
20 | targetPort: metrics
21 |
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Documentation Index
2 |
3 | - [Getting Started With Controller Sharding](getting-started.md) ⬅️ start here, if you're new to the project
4 | - [Install the Sharding Components](installation.md)
5 | - [Implement Sharding in Your Controller](implement-sharding.md)
6 | - [Monitoring the Sharding Components](monitoring.md)
7 | - [Design](design.md)
8 | - [Evaluating the Sharding Mechanism](evaluation.md)
9 | - [Development and Testing Setup](development.md)
10 |
--------------------------------------------------------------------------------
/go.work:
--------------------------------------------------------------------------------
1 | go 1.23.0
2 |
3 | toolchain go1.23.5
4 |
5 | use (
6 | .
7 | ./webhosting-operator
8 | )
9 |
--------------------------------------------------------------------------------
/hack/boilerplate.go.txt:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
--------------------------------------------------------------------------------
/hack/ci-common.sh:
--------------------------------------------------------------------------------
1 | export_artifacts() {
2 | [ -n "${ARTIFACTS:-}" ] || return 0
3 |
4 | mkdir -p "$ARTIFACTS"
5 | cluster_name=sharding
6 | echo "> Exporting logs of kind cluster '$cluster_name'"
7 | kind export logs "$ARTIFACTS" --name "$cluster_name" || true
8 |
9 | echo "> Exporting events of kind cluster '$cluster_name'"
10 | export_events
11 | }
12 |
13 | export_events() {
14 | local dir="$ARTIFACTS/events"
15 | mkdir -p "$dir"
16 |
17 | while IFS= read -r namespace; do
18 | kubectl -n "$namespace" get event --sort-by=lastTimestamp >"$dir/$namespace.log" 2>&1 || true
19 | done < <(kubectl get ns -oname | cut -d/ -f2)
20 | }
21 |
--------------------------------------------------------------------------------
/hack/ci-e2e-kind.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o nounset
4 | set -o pipefail
5 | set -o errexit
6 |
7 | source "$(dirname "$0")/ci-common.sh"
8 |
9 | # test setup
10 | make kind-up
11 | export KUBECONFIG=$PWD/hack/kind_kubeconfig.yaml
12 |
13 | # export all container logs and events after test execution
14 | trap '{
15 | export_artifacts
16 | make kind-down
17 | }' EXIT
18 |
19 | # deploy and test
20 | make up SKAFFOLD_TAIL=false
21 | make test-e2e GINKGO_FLAGS="--github-output"
22 |
--------------------------------------------------------------------------------
/hack/config/README.md:
--------------------------------------------------------------------------------
1 | # dev
2 |
3 | This directory hosts manifests of components for the development setup.
4 | Manifests of the sharding components are contained in [`config`](../../config).
5 | I.e., this directory should host only "internal" manifests that are not supposed to be reused outside of this repository.
6 |
--------------------------------------------------------------------------------
/hack/config/cert-manager/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - https://github.com/cert-manager/cert-manager/releases/download/v1.17.2/cert-manager.yaml
6 |
7 | patches:
8 | # lower the webhook timeouts to make the webhooks compliant with gardener's requirements
9 | - path: patch-validatingwebhook.yaml
10 | - path: patch-mutatingwebhook.yaml
11 |
--------------------------------------------------------------------------------
/hack/config/cert-manager/patch-mutatingwebhook.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: admissionregistration.k8s.io/v1
2 | kind: MutatingWebhookConfiguration
3 | metadata:
4 | name: cert-manager-webhook
5 | webhooks:
6 | - name: webhook.cert-manager.io
7 | timeoutSeconds: 15
8 |
--------------------------------------------------------------------------------
/hack/config/cert-manager/patch-validatingwebhook.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: admissionregistration.k8s.io/v1
2 | kind: ValidatingWebhookConfiguration
3 | metadata:
4 | name: cert-manager-webhook
5 | webhooks:
6 | - name: webhook.cert-manager.io
7 | timeoutSeconds: 15
8 |
--------------------------------------------------------------------------------
/hack/config/cert-manager/resources/cluster-issuer.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cert-manager.io/v1
2 | kind: ClusterIssuer
3 | metadata:
4 | name: letsencrypt-http01
5 | spec:
6 | acme:
7 | email: null@timebertt.dev
8 | server: https://acme-v02.api.letsencrypt.org/directory
9 | privateKeySecretRef:
10 | name: http01-timebertt-dev
11 | solvers:
12 | - http01:
13 | ingress:
14 | ingressClassName: nginx
15 |
--------------------------------------------------------------------------------
/hack/config/cert-manager/resources/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - cluster-issuer.yaml
6 |
--------------------------------------------------------------------------------
/hack/config/certificates/host/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "signing": {
3 | "default": {
4 | "expiry": "43800h"
5 | },
6 | "profiles": {
7 | "server": {
8 | "usages": [
9 | "signing",
10 | "key encipherment",
11 | "server auth"
12 | ],
13 | "expiry": "43800h"
14 | }
15 | }
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/hack/config/certificates/host/generate.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if ! command -v cfssl &>/dev/null ; then
4 | echo "cfssl not found, install it from https://github.com/cloudflare/cfssl"
5 | exit 1
6 | fi
7 |
8 | cd "$(dirname "$0")"
9 |
10 | rm -f *.pem
11 |
12 | cfssl gencert -config config.json -initca webhook-ca.json | cfssljson -bare webhook-ca
13 |
14 | cfssl gencert -config config.json -ca=webhook-ca.pem -ca-key=webhook-ca-key.pem -profile=server webhook-server.json | cfssljson -bare webhook-server
15 |
16 | rm *.csr
17 |
--------------------------------------------------------------------------------
/hack/config/certificates/host/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: sharding-system
5 |
6 | generatorOptions:
7 | disableNameSuffixHash: true
8 |
9 | secretGenerator:
10 | - name: webhook-ca
11 | options:
12 | annotations:
13 | cert-manager.io/allow-direct-injection: "true"
14 | files:
15 | - ca.crt=webhook-ca.pem
16 |
--------------------------------------------------------------------------------
/hack/config/certificates/host/webhook-ca.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "sharding:sharder",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 4096
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/hack/config/certificates/host/webhook-server.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "sharding:sharder:webhook",
3 | "key": {
4 | "algo": "rsa",
5 | "size": 4096
6 | },
7 | "hosts": [
8 | "localhost",
9 | "host.docker.internal",
10 | "sharder.sharding-system",
11 | "sharder.sharding-system.svc",
12 | "sharder.sharding-system.svc.cluster.local"
13 | ]
14 | }
15 |
--------------------------------------------------------------------------------
/hack/config/checksum-controller/controller/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: checksum-controller
5 | spec:
6 | replicas: 3
7 | template:
8 | spec:
9 | automountServiceAccountToken: true
10 | securityContext:
11 | runAsNonRoot: true
12 | containers:
13 | - name: checksum-controller
14 | image: checksum-controller:latest
15 | args:
16 | - --zap-devel
17 | env:
18 | - name: DISABLE_HTTP2
19 | value: "true"
20 | securityContext:
21 | allowPrivilegeEscalation: false
22 | resources:
23 | requests:
24 | cpu: 25m
25 | memory: 50Mi
26 | serviceAccountName: checksum-controller
27 | terminationGracePeriodSeconds: 10
28 |
--------------------------------------------------------------------------------
/hack/config/checksum-controller/controller/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: default
5 |
6 | labels:
7 | - includeSelectors: true
8 | pairs:
9 | app.kubernetes.io/name: controller-sharding
10 | app.kubernetes.io/component: checksum-controller
11 |
12 | images:
13 | - name: checksum-controller
14 | newName: ghcr.io/timebertt/kubernetes-controller-sharding/checksum-controller
15 | newTag: latest
16 |
17 | resources:
18 | - ../controllerring
19 | - deployment.yaml
20 | - rbac.yaml
21 | - serviceaccount.yaml
22 |
--------------------------------------------------------------------------------
/hack/config/checksum-controller/controller/rbac.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | name: sharding:checksum-controller
6 | rules:
7 | - apiGroups:
8 | - coordination.k8s.io
9 | resources:
10 | - leases
11 | verbs:
12 | - get
13 | - create
14 | - update
15 | - apiGroups:
16 | - ""
17 | resources:
18 | - events
19 | verbs:
20 | - create
21 | - patch
22 | - apiGroups:
23 | - ""
24 | resources:
25 | - secrets
26 | verbs:
27 | - get
28 | - list
29 | - watch
30 | - patch
31 | - apiGroups:
32 | - ""
33 | resources:
34 | - configmaps
35 | verbs:
36 | - get
37 | - list
38 | - watch
39 | - create
40 | ---
41 | apiVersion: rbac.authorization.k8s.io/v1
42 | kind: RoleBinding
43 | metadata:
44 | name: sharding:checksum-controller
45 | roleRef:
46 | apiGroup: rbac.authorization.k8s.io
47 | kind: Role
48 | name: sharding:checksum-controller
49 | subjects:
50 | - kind: ServiceAccount
51 | name: checksum-controller
52 |
--------------------------------------------------------------------------------
/hack/config/checksum-controller/controller/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: checksum-controller
5 | automountServiceAccountToken: false
6 |
--------------------------------------------------------------------------------
/hack/config/checksum-controller/controllerring/controllerring.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: sharding.timebertt.dev/v1alpha1
2 | kind: ControllerRing
3 | metadata:
4 | name: checksum-controller
5 | spec:
6 | resources:
7 | - group: ""
8 | resource: secrets
9 | controlledResources:
10 | - group: ""
11 | resource: configmaps
12 | namespaceSelector:
13 | matchLabels:
14 | kubernetes.io/metadata.name: default
15 |
--------------------------------------------------------------------------------
/hack/config/checksum-controller/controllerring/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - controllerring.yaml
6 | - sharder_rbac.yaml
7 |
--------------------------------------------------------------------------------
/hack/config/checksum-controller/controllerring/sharder_rbac.yaml:
--------------------------------------------------------------------------------
1 | # These manifests grant the sharder controller permissions to act on resources that we listed in the ControllerRing.
2 | # We need to grant these permissions explicitly depending on what we configured. Otherwise, the sharder would require
3 | # cluster-admin access.
4 | ---
5 | apiVersion: rbac.authorization.k8s.io/v1
6 | kind: ClusterRole
7 | metadata:
8 | name: sharding:controllerring:checksum-controller
9 | rules:
10 | - apiGroups:
11 | - ""
12 | resources:
13 | - configmaps
14 | - secrets
15 | verbs:
16 | - list
17 | - patch
18 | ---
19 | apiVersion: rbac.authorization.k8s.io/v1
20 | kind: ClusterRoleBinding
21 | metadata:
22 | name: sharding:controllerring:checksum-controller
23 | roleRef:
24 | apiGroup: rbac.authorization.k8s.io
25 | kind: ClusterRole
26 | name: sharding:controllerring:checksum-controller
27 | subjects:
28 | - kind: ServiceAccount
29 | name: sharder
30 | namespace: sharding-system
31 |
--------------------------------------------------------------------------------
/hack/config/external-dns/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: external-dns
5 |
6 | images:
7 | - name: registry.k8s.io/external-dns/external-dns
8 | newTag: v0.17.0
9 |
10 | resources:
11 | - namespace.yaml
12 | - https://github.com/kubernetes-sigs/external-dns//kustomize?ref=v0.17.0
13 |
14 | patches:
15 | - path: patch-deployment.yaml
16 |
--------------------------------------------------------------------------------
/hack/config/external-dns/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: external-dns
5 |
--------------------------------------------------------------------------------
/hack/config/external-dns/patch-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: external-dns
5 | namespace: default
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: external-dns
11 | args:
12 | - --source=ingress
13 | - --source=service
14 | - --domain-filter=timebertt.dev
15 | - --provider=google
16 | - --log-format=json
17 | - --google-project=$(GOOGLE_PROJECT)
18 | - --google-zone-visibility=public
19 | - --policy=sync
20 | - --registry=txt
21 | - --txt-owner-id=shoot--ixywdlfvei--sharding-2025a5e1-9ac9-471c-9ef0-0a2e70527e5f-ske
22 | - --interval=1m
23 | # ensure the records are not owned by short-lived acme solvers managed by cert-manager or website ingresses
24 | - --label-filter=acme.cert-manager.io/http01-solver!=true,app!=website
25 | env:
26 | - name: GOOGLE_APPLICATION_CREDENTIALS
27 | value: /etc/secrets/service-account/service-account.json
28 | - name: GOOGLE_PROJECT
29 | valueFrom:
30 | secretKeyRef:
31 | name: google-clouddns-timebertt-dev
32 | key: project
33 | volumeMounts:
34 | - name: clouddns-credentials
35 | mountPath: /etc/secrets/service-account
36 | readOnly: true
37 | volumes:
38 | - name: clouddns-credentials
39 | secret:
40 | secretName: google-clouddns-timebertt-dev
41 |
--------------------------------------------------------------------------------
/hack/config/ingress-nginx/default/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: ingress-nginx
5 |
6 | resources:
7 | - https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.12.2/deploy/static/provider/cloud/deploy.yaml
8 |
9 | patches:
10 | - path: patch_default_ingress_class.yaml
11 | - path: patch_controller_resources.yaml
12 | # Delete validation webhook for Ingresses.
13 | # We don't need or want validation for Ingress objects in the development and load testing setup. It results in high
14 | # latency for API requests and CPU waste during load tests.
15 | - patch: |
16 | apiVersion: admissionregistration.k8s.io/v1
17 | kind: ValidatingWebhookConfiguration
18 | metadata:
19 | name: ingress-nginx-admission
20 | $patch: delete
21 | # This job fails if the ValidatingWebhookConfiguration does not exist. Drop it as well.
22 | # Note: we can't drop the ingress-nginx-admission-create job, which creates the webhook certificate. Without this,
23 | # the ingress-nginx-controller won't start.
24 | - patch: |
25 | apiVersion: batch/v1
26 | kind: Job
27 | metadata:
28 | name: ingress-nginx-admission-patch
29 | namespace: ingress-nginx
30 | $patch: delete
31 |
--------------------------------------------------------------------------------
/hack/config/ingress-nginx/default/patch_controller_resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: ingress-nginx-controller
5 | namespace: ingress-nginx
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: controller
11 | resources:
12 | requests:
13 | cpu: 10m
14 | memory: 256Mi
15 |
--------------------------------------------------------------------------------
/hack/config/ingress-nginx/default/patch_default_ingress_class.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: IngressClass
3 | metadata:
4 | name: nginx
5 | namespace: ingress-nginx
6 | annotations:
7 | ingressclass.kubernetes.io/is-default-class: "true"
8 |
--------------------------------------------------------------------------------
/hack/config/ingress-nginx/kind/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../default
6 |
7 | patches:
8 | - path: patch_service_nodeport.yaml
9 |
--------------------------------------------------------------------------------
/hack/config/ingress-nginx/kind/patch_service_nodeport.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: ingress-nginx-controller
5 | namespace: ingress-nginx
6 | spec:
7 | ports:
8 | - appProtocol: http
9 | name: http
10 | nodePort: 30888
11 | port: 80
12 | protocol: TCP
13 | targetPort: http
14 |
--------------------------------------------------------------------------------
/hack/config/ingress-nginx/shoot/certificate.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cert-manager.io/v1
2 | kind: Certificate
3 | metadata:
4 | name: webhosting-tls
5 | spec:
6 | dnsNames:
7 | - webhosting.timebertt.dev
8 | issuerRef:
9 | group: cert-manager.io
10 | kind: ClusterIssuer
11 | name: letsencrypt-http01
12 | secretName: webhosting-tls
13 | usages:
14 | - digital signature
15 | - key encipherment
16 |
--------------------------------------------------------------------------------
/hack/config/ingress-nginx/shoot/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: ingress-nginx
5 |
6 | resources:
7 | - ../default
8 | - certificate.yaml
9 |
10 | patches:
11 | - path: patch_service.yaml
12 | - target:
13 | group: apps
14 | version: v1
15 | kind: Deployment
16 | name: ingress-nginx-controller
17 | namespace: ingress-nginx
18 | patch: |
19 | - op: add
20 | path: /spec/template/spec/containers/0/args/-
21 | value: "--default-ssl-certificate=ingress-nginx/webhosting-tls"
22 |
--------------------------------------------------------------------------------
/hack/config/ingress-nginx/shoot/patch_service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: ingress-nginx-controller
5 | namespace: ingress-nginx
6 | annotations:
7 | external-dns.alpha.kubernetes.io/hostname: webhosting.timebertt.dev
8 |
--------------------------------------------------------------------------------
/hack/config/kind-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kind.x-k8s.io/v1alpha4
2 | kind: Cluster
3 | nodes:
4 | - role: control-plane
5 | extraPortMappings:
6 | # ingress-nginx
7 | - containerPort: 30888
8 | hostPort: 8088
9 | kubeadmConfigPatches:
10 | - |
11 | apiVersion: kubelet.config.k8s.io/v1beta1
12 | kind: KubeletConfiguration
13 | maxPods: 250
14 | - |
15 | apiVersion: kubeadm.k8s.io/v1beta3
16 | kind: ClusterConfiguration
17 | controllerManager:
18 | extraArgs:
19 | kube-api-qps: "800"
20 | kube-api-burst: "1000"
21 |
--------------------------------------------------------------------------------
/hack/config/kyverno/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - https://github.com/kyverno/kyverno/releases/download/v1.14.1/install.yaml
6 |
7 | configMapGenerator:
8 | - name: kyverno
9 | namespace: kyverno
10 | behavior: merge
11 | options:
12 | disableNameSuffixHash: true
13 | literals:
14 | # overwrite default namespaceSelector for webhook configs to exclude kube-system
15 | # the second part makes gardeners care controller/webhook remediation happy
16 | - >-
17 | webhooks={
18 | "namespaceSelector": {
19 | "matchExpressions": [{
20 | "key": "kubernetes.io/metadata.name",
21 | "operator": "NotIn",
22 | "values": ["kyverno", "kube-system"]
23 | }, {
24 | "key": "gardener.cloud/purpose",
25 | "operator": "NotIn",
26 | "values": ["kube-system"]
27 | }]
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/hack/config/monitoring/crds/README.md:
--------------------------------------------------------------------------------
1 | The CRDs in this directory were downloaded from
2 | https://github.com/prometheus-operator/kube-prometheus/tree/v0.14.0/manifests/setup.
3 |
4 | Bump the version in [`update.sh`](../update.sh) and run the script to update the CRDs.
5 |
--------------------------------------------------------------------------------
/hack/config/monitoring/crds/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # Code generated by update.sh, DO NOT EDIT.
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 |
5 | labels:
6 | - includeSelectors: true
7 | pairs:
8 | app.kubernetes.io/name: prometheus-operator
9 | app.kubernetes.io/part-of: kube-prometheus
10 | app.kubernetes.io/version: 0.76.2
11 |
12 | resources:
13 | - 0alertmanagerConfigCustomResourceDefinition.yaml
14 | - 0alertmanagerCustomResourceDefinition.yaml
15 | - 0podmonitorCustomResourceDefinition.yaml
16 | - 0probeCustomResourceDefinition.yaml
17 | - 0prometheusCustomResourceDefinition.yaml
18 | - 0prometheusagentCustomResourceDefinition.yaml
19 | - 0prometheusruleCustomResourceDefinition.yaml
20 | - 0scrapeconfigCustomResourceDefinition.yaml
21 | - 0servicemonitorCustomResourceDefinition.yaml
22 | - 0thanosrulerCustomResourceDefinition.yaml
23 |
--------------------------------------------------------------------------------
/hack/config/monitoring/default/ensure-admin-password.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | dir="$(dirname "$0")"
4 | file="$dir/grafana_admin_password.secret.txt"
5 |
6 | [ -f "$file" ] && exit 0
7 | cat /dev/urandom | tr -dc "a-zA-Z0-9" | head -c 32 > "$file"
8 |
--------------------------------------------------------------------------------
/hack/config/monitoring/default/grafana_ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | annotations:
5 | cert-manager.io/cluster-issuer: letsencrypt-http01
6 | labels:
7 | app.kubernetes.io/component: grafana
8 | app.kubernetes.io/name: grafana
9 | name: grafana
10 | namespace: monitoring
11 | spec:
12 | rules:
13 | - host: grafana.webhosting.timebertt.dev
14 | http:
15 | paths:
16 | - backend:
17 | service:
18 | name: grafana
19 | port:
20 | name: http
21 | path: /
22 | pathType: Prefix
23 | tls:
24 | - hosts:
25 | - grafana.webhosting.timebertt.dev
26 | secretName: grafana-tls
27 |
--------------------------------------------------------------------------------
/hack/config/monitoring/default/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: monitoring
5 |
--------------------------------------------------------------------------------
/hack/config/monitoring/default/patch_grafana_admin.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: grafana
11 | env:
12 | - name: GF_SECURITY_ADMIN_PASSWORD
13 | valueFrom:
14 | secretKeyRef:
15 | name: grafana-admin
16 | key: password
17 | - name: GF_AUTH_ANONYMOUS_ENABLED
18 | value: "true"
19 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE
20 | value: "Viewer"
21 | - name: GF_USERS_VIEWERS_CAN_EDIT
22 | value: "false"
23 | - name: GF_ALERTING_ENABLED
24 | value: "false"
25 | - name: GF_UNIFIED_ALERTING_ENABLED
26 | value: "false"
27 | - name: GF_USERS_DEFAULT_THEME
28 | value: "light"
29 |
--------------------------------------------------------------------------------
/hack/config/monitoring/default/patch_grafana_networkpolicy.yaml:
--------------------------------------------------------------------------------
1 | - op: add
2 | path: /spec/ingress/-
3 | value:
4 | from:
5 | - podSelector:
6 | matchLabels:
7 | app.kubernetes.io/name: ingress-nginx
8 | app.kubernetes.io/component: controller
9 | namespaceSelector:
10 | matchLabels:
11 | app.kubernetes.io/name: ingress-nginx
12 | ports:
13 | - port: 3000
14 | protocol: TCP
15 |
--------------------------------------------------------------------------------
/hack/config/monitoring/default/patch_kubelet_metrics.yaml:
--------------------------------------------------------------------------------
1 | # drop storage operation duration metrics (high cardinality)
2 | - op: add
3 | path: /spec/endpoints/0/metricRelabelings/-
4 | value:
5 | sourceLabels: [__name__]
6 | regex: storage_operation_duration_seconds_.+
7 | action: drop
8 | # drop runtime operation duration metrics (high cardinality)
9 | - op: add
10 | path: /spec/endpoints/0/metricRelabelings/-
11 | value:
12 | sourceLabels: [__name__]
13 | regex: kubelet_runtime_operations_duration_seconds_.+
14 | action: drop
15 | # drop metrics for project namespaces
16 | - op: add
17 | path: /spec/endpoints/0/metricRelabelings/-
18 | value:
19 | sourceLabels: [namespace]
20 | regex: project-.+
21 | action: drop
22 | # drop cadvisor metrics for project namespaces
23 | - op: add
24 | path: /spec/endpoints/1/metricRelabelings/-
25 | value:
26 | sourceLabels: [namespace]
27 | regex: project-.+
28 | action: drop
29 | # increase cadvisor scrape interval
30 | - op: replace
31 | path: /spec/endpoints/1/interval
32 | value: 10s
33 |
--------------------------------------------------------------------------------
/hack/config/monitoring/default/patch_kubestatemetrics.yaml:
--------------------------------------------------------------------------------
1 | # drop kube-state-metrics metrics for project namespaces
2 | - op: add
3 | path: /spec/template/spec/containers/0/args/-
4 | value:
5 | --namespaces=cert-manager,default,experiment,external-dns,ingress-nginx,kube-node-lease,kube-public,kube-system,kyverno,monitoring,parca,sharding-system,webhosting-system
6 | # add run_id label to kube_pod_labels to select metrics by experiment run ID
7 | # flag doesn't support wildcard patterns
8 | - op: add
9 | path: /spec/template/spec/containers/0/args/-
10 | value:
11 | --metric-labels-allowlist=pods=[label.prometheus.io/run_id]
12 |
--------------------------------------------------------------------------------
/hack/config/monitoring/default/patch_kubestatemetrics_servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | # label map for label.prometheus.io/* labels
2 | - op: add
3 | path: /spec/endpoints/0/metricRelabelings/-
4 | value:
5 | action: labelmap
6 | regex: "label_label_prometheus_io_(.*)"
7 | replacement: "${1}"
8 | - op: add
9 | path: /spec/endpoints/0/metricRelabelings/-
10 | value:
11 | action: labeldrop
12 | regex: "label_label_prometheus_io_(.*)"
13 |
--------------------------------------------------------------------------------
/hack/config/monitoring/default/patch_prometheus.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Prometheus
3 | metadata:
4 | name: k8s
5 | namespace: monitoring
6 | spec:
7 | replicas: 1 # don't need HA for our purposes
8 | evaluationInterval: 15s
9 | alerting: null
10 | resources:
11 | requests:
12 | cpu: 3000m
13 | memory: 12Gi
14 | limits:
15 | cpu: 4000m # replaying WAL takes some CPU
16 | memory: 12Gi
17 |
--------------------------------------------------------------------------------
/hack/config/monitoring/default/rbac-proxy_clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: rbac-proxy
5 | rules:
6 | - apiGroups:
7 | - authentication.k8s.io
8 | resources:
9 | - tokenreviews
10 | verbs:
11 | - create
12 | - apiGroups:
13 | - authorization.k8s.io
14 | resources:
15 | - subjectaccessreviews
16 | verbs:
17 | - create
18 |
--------------------------------------------------------------------------------
/hack/config/monitoring/grafana-sidecar/dashboards-sidecar.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 | providers:
3 | - folder: Default
4 | folderUid: ""
5 | name: "1"
6 | options:
7 | path: /grafana-dashboard-definitions-sidecar/0
8 | orgId: 1
9 | type: file
10 |
--------------------------------------------------------------------------------
/hack/config/monitoring/grafana-sidecar/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1alpha1
2 | kind: Component
3 |
4 | generatorOptions:
5 | disableNameSuffixHash: true
6 |
7 | configMapGenerator:
8 | - name: grafana-dashboards
9 | namespace: monitoring
10 | behavior: merge
11 | files:
12 | - dashboards-sidecar.yaml
13 |
14 | patches:
15 | - path: patch_grafana_sidecar.yaml
16 |
17 | resources:
18 | - sidecar_clusterrole.yaml
19 | - sidecar_clusterrolebinding.yaml
20 |
--------------------------------------------------------------------------------
/hack/config/monitoring/grafana-sidecar/patch_grafana_sidecar.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
6 | spec:
7 | template:
8 | spec:
9 | automountServiceAccountToken: true
10 | containers:
11 | - name: grafana-sc-dashboard
12 | image: quay.io/kiwigrid/k8s-sidecar:1.30.3
13 | imagePullPolicy: IfNotPresent
14 | env:
15 | - name: METHOD
16 | value: WATCH
17 | - name: LABEL
18 | value: grafana_dashboard
19 | - name: FOLDER
20 | value: /grafana-dashboard-definitions-sidecar/0
21 | - name: RESOURCE
22 | value: configmap
23 | volumeMounts:
24 | - name: sc-dashboard-volume
25 | mountPath: /grafana-dashboard-definitions-sidecar/0
26 | - name: grafana
27 | volumeMounts:
28 | - name: sc-dashboard-volume
29 | mountPath: /grafana-dashboard-definitions-sidecar/0
30 | volumes:
31 | - name: sc-dashboard-volume
32 | emptyDir: {}
33 |
--------------------------------------------------------------------------------
/hack/config/monitoring/grafana-sidecar/sidecar_clusterrole.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRole
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: grafana
6 | name: grafana-sidecar
7 | rules:
8 | - apiGroups:
9 | - ""
10 | resources:
11 | - configmaps
12 | verbs:
13 | - get
14 | - list
15 | - watch
16 |
--------------------------------------------------------------------------------
/hack/config/monitoring/grafana-sidecar/sidecar_clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: grafana
6 | name: grafana-sidecar
7 | roleRef:
8 | apiGroup: rbac.authorization.k8s.io
9 | kind: ClusterRole
10 | name: grafana-sidecar
11 | subjects:
12 | - kind: ServiceAccount
13 | name: grafana
14 | namespace: monitoring
15 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/README.md:
--------------------------------------------------------------------------------
1 | The manifests in this directory were downloaded from
2 | https://github.com/prometheus-operator/kube-prometheus/tree/v0.14.0/manifests.
3 |
4 | Bump the version in [`update.sh`](../update.sh) and run the script to update the CRDs.
5 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/blackboxExporter-clusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: blackbox-exporter
5 | rules:
6 | - apiGroups:
7 | - authentication.k8s.io
8 | resources:
9 | - tokenreviews
10 | verbs:
11 | - create
12 | - apiGroups:
13 | - authorization.k8s.io
14 | resources:
15 | - subjectaccessreviews
16 | verbs:
17 | - create
18 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/blackboxExporter-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: blackbox-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.25.0
9 | name: blackbox-exporter
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: blackbox-exporter
14 | subjects:
15 | - kind: ServiceAccount
16 | name: blackbox-exporter
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/blackboxExporter-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: blackbox-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.25.0
9 | name: blackbox-exporter
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 9115
21 | protocol: TCP
22 | - port: 19115
23 | protocol: TCP
24 | podSelector:
25 | matchLabels:
26 | app.kubernetes.io/component: exporter
27 | app.kubernetes.io/name: blackbox-exporter
28 | app.kubernetes.io/part-of: kube-prometheus
29 | policyTypes:
30 | - Egress
31 | - Ingress
32 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/blackboxExporter-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: blackbox-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.25.0
9 | name: blackbox-exporter
10 | namespace: monitoring
11 | spec:
12 | ports:
13 | - name: https
14 | port: 9115
15 | targetPort: https
16 | - name: probe
17 | port: 19115
18 | targetPort: http
19 | selector:
20 | app.kubernetes.io/component: exporter
21 | app.kubernetes.io/name: blackbox-exporter
22 | app.kubernetes.io/part-of: kube-prometheus
23 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/blackboxExporter-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: exporter
7 | app.kubernetes.io/name: blackbox-exporter
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 0.25.0
10 | name: blackbox-exporter
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/blackboxExporter-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: blackbox-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.25.0
9 | name: blackbox-exporter
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 | interval: 30s
15 | path: /metrics
16 | port: https
17 | scheme: https
18 | tlsConfig:
19 | insecureSkipVerify: true
20 | selector:
21 | matchLabels:
22 | app.kubernetes.io/component: exporter
23 | app.kubernetes.io/name: blackbox-exporter
24 | app.kubernetes.io/part-of: kube-prometheus
25 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/grafana-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 11.2.0
9 | name: grafana-config
10 | namespace: monitoring
11 | stringData:
12 | grafana.ini: |
13 | [date_formats]
14 | default_timezone = UTC
15 | type: Opaque
16 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/grafana-dashboardDatasources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 11.2.0
9 | name: grafana-datasources
10 | namespace: monitoring
11 | stringData:
12 | datasources.yaml: |-
13 | {
14 | "apiVersion": 1,
15 | "datasources": [
16 | {
17 | "access": "proxy",
18 | "editable": false,
19 | "name": "prometheus",
20 | "orgId": 1,
21 | "type": "prometheus",
22 | "url": "http://prometheus-k8s.monitoring.svc:9090",
23 | "version": 1
24 | }
25 | ]
26 | }
27 | type: Opaque
28 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/grafana-dashboardSources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | dashboards.yaml: |-
4 | {
5 | "apiVersion": 1,
6 | "providers": [
7 | {
8 | "folder": "Default",
9 | "folderUid": "",
10 | "name": "0",
11 | "options": {
12 | "path": "/grafana-dashboard-definitions/0"
13 | },
14 | "orgId": 1,
15 | "type": "file"
16 | }
17 | ]
18 | }
19 | kind: ConfigMap
20 | metadata:
21 | labels:
22 | app.kubernetes.io/component: grafana
23 | app.kubernetes.io/name: grafana
24 | app.kubernetes.io/part-of: kube-prometheus
25 | app.kubernetes.io/version: 11.2.0
26 | name: grafana-dashboards
27 | namespace: monitoring
28 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/grafana-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 11.2.0
9 | name: grafana
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 3000
21 | protocol: TCP
22 | podSelector:
23 | matchLabels:
24 | app.kubernetes.io/component: grafana
25 | app.kubernetes.io/name: grafana
26 | app.kubernetes.io/part-of: kube-prometheus
27 | policyTypes:
28 | - Egress
29 | - Ingress
30 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/grafana-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 11.2.0
9 | name: grafana
10 | namespace: monitoring
11 | spec:
12 | ports:
13 | - name: http
14 | port: 3000
15 | targetPort: http
16 | selector:
17 | app.kubernetes.io/component: grafana
18 | app.kubernetes.io/name: grafana
19 | app.kubernetes.io/part-of: kube-prometheus
20 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/grafana-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: grafana
7 | app.kubernetes.io/name: grafana
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 11.2.0
10 | name: grafana
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/grafana-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: grafana
6 | app.kubernetes.io/name: grafana
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 11.2.0
9 | name: grafana
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - interval: 15s
14 | port: http
15 | selector:
16 | matchLabels:
17 | app.kubernetes.io/name: grafana
18 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/kubeStateMetrics-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: kube-state-metrics
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 2.13.0
9 | name: kube-state-metrics
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: kube-state-metrics
14 | subjects:
15 | - kind: ServiceAccount
16 | name: kube-state-metrics
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/kubeStateMetrics-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: kube-state-metrics
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 2.13.0
9 | name: kube-state-metrics
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 8443
21 | protocol: TCP
22 | - port: 9443
23 | protocol: TCP
24 | podSelector:
25 | matchLabels:
26 | app.kubernetes.io/component: exporter
27 | app.kubernetes.io/name: kube-state-metrics
28 | app.kubernetes.io/part-of: kube-prometheus
29 | policyTypes:
30 | - Egress
31 | - Ingress
32 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/kubeStateMetrics-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: kube-state-metrics
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 2.13.0
9 | name: kube-state-metrics
10 | namespace: monitoring
11 | spec:
12 | clusterIP: None
13 | ports:
14 | - name: https-main
15 | port: 8443
16 | targetPort: https-main
17 | - name: https-self
18 | port: 9443
19 | targetPort: https-self
20 | selector:
21 | app.kubernetes.io/component: exporter
22 | app.kubernetes.io/name: kube-state-metrics
23 | app.kubernetes.io/part-of: kube-prometheus
24 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/kubeStateMetrics-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: exporter
7 | app.kubernetes.io/name: kube-state-metrics
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 2.13.0
10 | name: kube-state-metrics
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/kubeStateMetrics-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: kube-state-metrics
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 2.13.0
9 | name: kube-state-metrics
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 | honorLabels: true
15 | interval: 30s
16 | metricRelabelings:
17 | - action: drop
18 | regex: kube_endpoint_address_not_ready|kube_endpoint_address_available
19 | sourceLabels:
20 | - __name__
21 | port: https-main
22 | relabelings:
23 | - action: labeldrop
24 | regex: (pod|service|endpoint|namespace)
25 | scheme: https
26 | scrapeTimeout: 30s
27 | tlsConfig:
28 | insecureSkipVerify: true
29 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
30 | interval: 30s
31 | port: https-self
32 | scheme: https
33 | tlsConfig:
34 | insecureSkipVerify: true
35 | jobLabel: app.kubernetes.io/name
36 | selector:
37 | matchLabels:
38 | app.kubernetes.io/component: exporter
39 | app.kubernetes.io/name: kube-state-metrics
40 | app.kubernetes.io/part-of: kube-prometheus
41 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/kubernetesControlPlane-serviceMonitorCoreDNS.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: coredns
6 | app.kubernetes.io/part-of: kube-prometheus
7 | name: coredns
8 | namespace: monitoring
9 | spec:
10 | endpoints:
11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
12 | interval: 15s
13 | metricRelabelings:
14 | - action: drop
15 | regex: coredns_cache_misses_total
16 | sourceLabels:
17 | - __name__
18 | port: metrics
19 | jobLabel: app.kubernetes.io/name
20 | namespaceSelector:
21 | matchNames:
22 | - kube-system
23 | selector:
24 | matchLabels:
25 | k8s-app: kube-dns
26 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/kubernetesControlPlane-serviceMonitorKubeScheduler.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-scheduler
6 | app.kubernetes.io/part-of: kube-prometheus
7 | name: kube-scheduler
8 | namespace: monitoring
9 | spec:
10 | endpoints:
11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
12 | interval: 30s
13 | port: https-metrics
14 | scheme: https
15 | tlsConfig:
16 | insecureSkipVerify: true
17 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
18 | interval: 5s
19 | metricRelabelings:
20 | - action: drop
21 | regex: process_start_time_seconds
22 | sourceLabels:
23 | - __name__
24 | path: /metrics/slis
25 | port: https-metrics
26 | scheme: https
27 | tlsConfig:
28 | insecureSkipVerify: true
29 | jobLabel: app.kubernetes.io/name
30 | namespaceSelector:
31 | matchNames:
32 | - kube-system
33 | selector:
34 | matchLabels:
35 | app.kubernetes.io/name: kube-scheduler
36 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/nodeExporter-clusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: node-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 1.8.2
9 | name: node-exporter
10 | rules:
11 | - apiGroups:
12 | - authentication.k8s.io
13 | resources:
14 | - tokenreviews
15 | verbs:
16 | - create
17 | - apiGroups:
18 | - authorization.k8s.io
19 | resources:
20 | - subjectaccessreviews
21 | verbs:
22 | - create
23 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/nodeExporter-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: node-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 1.8.2
9 | name: node-exporter
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: node-exporter
14 | subjects:
15 | - kind: ServiceAccount
16 | name: node-exporter
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/nodeExporter-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: node-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 1.8.2
9 | name: node-exporter
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 9100
21 | protocol: TCP
22 | podSelector:
23 | matchLabels:
24 | app.kubernetes.io/component: exporter
25 | app.kubernetes.io/name: node-exporter
26 | app.kubernetes.io/part-of: kube-prometheus
27 | policyTypes:
28 | - Egress
29 | - Ingress
30 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/nodeExporter-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: node-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 1.8.2
9 | name: node-exporter
10 | namespace: monitoring
11 | spec:
12 | clusterIP: None
13 | ports:
14 | - name: https
15 | port: 9100
16 | targetPort: https
17 | selector:
18 | app.kubernetes.io/component: exporter
19 | app.kubernetes.io/name: node-exporter
20 | app.kubernetes.io/part-of: kube-prometheus
21 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/nodeExporter-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: exporter
7 | app.kubernetes.io/name: node-exporter
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 1.8.2
10 | name: node-exporter
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/nodeExporter-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: exporter
6 | app.kubernetes.io/name: node-exporter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 1.8.2
9 | name: node-exporter
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 | interval: 15s
15 | port: https
16 | relabelings:
17 | - action: replace
18 | regex: (.*)
19 | replacement: $1
20 | sourceLabels:
21 | - __meta_kubernetes_pod_node_name
22 | targetLabel: instance
23 | scheme: https
24 | tlsConfig:
25 | insecureSkipVerify: true
26 | jobLabel: app.kubernetes.io/name
27 | selector:
28 | matchLabels:
29 | app.kubernetes.io/component: exporter
30 | app.kubernetes.io/name: node-exporter
31 | app.kubernetes.io/part-of: kube-prometheus
32 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheus-clusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 2.54.1
10 | name: prometheus-k8s
11 | rules:
12 | - apiGroups:
13 | - ""
14 | resources:
15 | - nodes/metrics
16 | verbs:
17 | - get
18 | - nonResourceURLs:
19 | - /metrics
20 | - /metrics/slis
21 | verbs:
22 | - get
23 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheus-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 2.54.1
10 | name: prometheus-k8s
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: ClusterRole
14 | name: prometheus-k8s
15 | subjects:
16 | - kind: ServiceAccount
17 | name: prometheus-k8s
18 | namespace: monitoring
19 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheus-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 2.54.1
10 | name: prometheus-k8s
11 | namespace: monitoring
12 | spec:
13 | egress:
14 | - {}
15 | ingress:
16 | - from:
17 | - podSelector:
18 | matchLabels:
19 | app.kubernetes.io/name: prometheus
20 | ports:
21 | - port: 9090
22 | protocol: TCP
23 | - port: 8080
24 | protocol: TCP
25 | - from:
26 | - podSelector:
27 | matchLabels:
28 | app.kubernetes.io/name: prometheus-adapter
29 | ports:
30 | - port: 9090
31 | protocol: TCP
32 | - from:
33 | - podSelector:
34 | matchLabels:
35 | app.kubernetes.io/name: grafana
36 | ports:
37 | - port: 9090
38 | protocol: TCP
39 | podSelector:
40 | matchLabels:
41 | app.kubernetes.io/component: prometheus
42 | app.kubernetes.io/instance: k8s
43 | app.kubernetes.io/name: prometheus
44 | app.kubernetes.io/part-of: kube-prometheus
45 | policyTypes:
46 | - Egress
47 | - Ingress
48 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheus-roleBindingConfig.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 2.54.1
10 | name: prometheus-k8s-config
11 | namespace: monitoring
12 | roleRef:
13 | apiGroup: rbac.authorization.k8s.io
14 | kind: Role
15 | name: prometheus-k8s-config
16 | subjects:
17 | - kind: ServiceAccount
18 | name: prometheus-k8s
19 | namespace: monitoring
20 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheus-roleConfig.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: Role
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 2.54.1
10 | name: prometheus-k8s-config
11 | namespace: monitoring
12 | rules:
13 | - apiGroups:
14 | - ""
15 | resources:
16 | - configmaps
17 | verbs:
18 | - get
19 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheus-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 2.54.1
10 | name: prometheus-k8s
11 | namespace: monitoring
12 | spec:
13 | ports:
14 | - name: web
15 | port: 9090
16 | targetPort: web
17 | - name: reloader-web
18 | port: 8080
19 | targetPort: reloader-web
20 | selector:
21 | app.kubernetes.io/component: prometheus
22 | app.kubernetes.io/instance: k8s
23 | app.kubernetes.io/name: prometheus
24 | app.kubernetes.io/part-of: kube-prometheus
25 | sessionAffinity: ClientIP
26 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheus-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: true
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: prometheus
7 | app.kubernetes.io/instance: k8s
8 | app.kubernetes.io/name: prometheus
9 | app.kubernetes.io/part-of: kube-prometheus
10 | app.kubernetes.io/version: 2.54.1
11 | name: prometheus-k8s
12 | namespace: monitoring
13 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheus-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 2.54.1
10 | name: prometheus-k8s
11 | namespace: monitoring
12 | spec:
13 | endpoints:
14 | - interval: 30s
15 | port: web
16 | - interval: 30s
17 | port: reloader-web
18 | selector:
19 | matchLabels:
20 | app.kubernetes.io/component: prometheus
21 | app.kubernetes.io/instance: k8s
22 | app.kubernetes.io/name: prometheus
23 | app.kubernetes.io/part-of: kube-prometheus
24 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-clusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | rules:
11 | - apiGroups:
12 | - ""
13 | resources:
14 | - nodes
15 | - namespaces
16 | - pods
17 | - services
18 | verbs:
19 | - get
20 | - list
21 | - watch
22 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-clusterRoleAggregatedMetricsReader.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | rbac.authorization.k8s.io/aggregate-to-admin: "true"
10 | rbac.authorization.k8s.io/aggregate-to-edit: "true"
11 | rbac.authorization.k8s.io/aggregate-to-view: "true"
12 | name: system:aggregated-metrics-reader
13 | rules:
14 | - apiGroups:
15 | - metrics.k8s.io
16 | resources:
17 | - pods
18 | - nodes
19 | verbs:
20 | - get
21 | - list
22 | - watch
23 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: prometheus-adapter
14 | subjects:
15 | - kind: ServiceAccount
16 | name: prometheus-adapter
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-clusterRoleBindingDelegator.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: resource-metrics:system:auth-delegator
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: system:auth-delegator
14 | subjects:
15 | - kind: ServiceAccount
16 | name: prometheus-adapter
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-clusterRoleServerResources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: resource-metrics-server-resources
10 | rules:
11 | - apiGroups:
12 | - metrics.k8s.io
13 | resources:
14 | - '*'
15 | verbs:
16 | - '*'
17 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - {}
16 | podSelector:
17 | matchLabels:
18 | app.kubernetes.io/component: metrics-adapter
19 | app.kubernetes.io/name: prometheus-adapter
20 | app.kubernetes.io/part-of: kube-prometheus
21 | policyTypes:
22 | - Egress
23 | - Ingress
24 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-podDisruptionBudget.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | namespace: monitoring
11 | spec:
12 | minAvailable: 1
13 | selector:
14 | matchLabels:
15 | app.kubernetes.io/component: metrics-adapter
16 | app.kubernetes.io/name: prometheus-adapter
17 | app.kubernetes.io/part-of: kube-prometheus
18 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-roleBindingAuthReader.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: resource-metrics-auth-reader
10 | namespace: kube-system
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: Role
14 | name: extension-apiserver-authentication-reader
15 | subjects:
16 | - kind: ServiceAccount
17 | name: prometheus-adapter
18 | namespace: monitoring
19 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | namespace: monitoring
11 | spec:
12 | ports:
13 | - name: https
14 | port: 443
15 | targetPort: 6443
16 | selector:
17 | app.kubernetes.io/component: metrics-adapter
18 | app.kubernetes.io/name: prometheus-adapter
19 | app.kubernetes.io/part-of: kube-prometheus
20 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: metrics-adapter
7 | app.kubernetes.io/name: prometheus-adapter
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 0.12.0
10 | name: prometheus-adapter
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusAdapter-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: metrics-adapter
6 | app.kubernetes.io/name: prometheus-adapter
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.12.0
9 | name: prometheus-adapter
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 | interval: 30s
15 | metricRelabelings:
16 | - action: drop
17 | regex: (apiserver_client_certificate_.*|apiserver_envelope_.*|apiserver_flowcontrol_.*|apiserver_storage_.*|apiserver_webhooks_.*|workqueue_.*)
18 | sourceLabels:
19 | - __name__
20 | port: https
21 | scheme: https
22 | tlsConfig:
23 | insecureSkipVerify: true
24 | selector:
25 | matchLabels:
26 | app.kubernetes.io/component: metrics-adapter
27 | app.kubernetes.io/name: prometheus-adapter
28 | app.kubernetes.io/part-of: kube-prometheus
29 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusOperator-clusterRoleBinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.76.2
9 | name: prometheus-operator
10 | roleRef:
11 | apiGroup: rbac.authorization.k8s.io
12 | kind: ClusterRole
13 | name: prometheus-operator
14 | subjects:
15 | - kind: ServiceAccount
16 | name: prometheus-operator
17 | namespace: monitoring
18 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusOperator-networkPolicy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.76.2
9 | name: prometheus-operator
10 | namespace: monitoring
11 | spec:
12 | egress:
13 | - {}
14 | ingress:
15 | - from:
16 | - podSelector:
17 | matchLabels:
18 | app.kubernetes.io/name: prometheus
19 | ports:
20 | - port: 8443
21 | protocol: TCP
22 | podSelector:
23 | matchLabels:
24 | app.kubernetes.io/component: controller
25 | app.kubernetes.io/name: prometheus-operator
26 | app.kubernetes.io/part-of: kube-prometheus
27 | policyTypes:
28 | - Egress
29 | - Ingress
30 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusOperator-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.76.2
9 | name: prometheus-operator
10 | namespace: monitoring
11 | spec:
12 | clusterIP: None
13 | ports:
14 | - name: https
15 | port: 8443
16 | targetPort: https
17 | selector:
18 | app.kubernetes.io/component: controller
19 | app.kubernetes.io/name: prometheus-operator
20 | app.kubernetes.io/part-of: kube-prometheus
21 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusOperator-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | automountServiceAccountToken: false
3 | kind: ServiceAccount
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: controller
7 | app.kubernetes.io/name: prometheus-operator
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 0.76.2
10 | name: prometheus-operator
11 | namespace: monitoring
12 |
--------------------------------------------------------------------------------
/hack/config/monitoring/kube-prometheus/prometheusOperator-serviceMonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/part-of: kube-prometheus
8 | app.kubernetes.io/version: 0.76.2
9 | name: prometheus-operator
10 | namespace: monitoring
11 | spec:
12 | endpoints:
13 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
14 | honorLabels: true
15 | port: https
16 | scheme: https
17 | tlsConfig:
18 | insecureSkipVerify: true
19 | selector:
20 | matchLabels:
21 | app.kubernetes.io/component: controller
22 | app.kubernetes.io/name: prometheus-operator
23 | app.kubernetes.io/part-of: kube-prometheus
24 | app.kubernetes.io/version: 0.76.2
25 |
--------------------------------------------------------------------------------
/hack/config/monitoring/shoot/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../default
6 | - storageclass.yaml
7 |
8 | patches:
9 | - path: patch_prometheus.yaml
10 | # drop ServiceMonitors for control plane components (not reachable in Shoot cluster)
11 | - patch: |
12 | apiVersion: monitoring.coreos.com/v1
13 | metadata:
14 | name: kube-apiserver
15 | namespace: monitoring
16 | kind: ServiceMonitor
17 | $patch: delete
18 | - patch: |
19 | apiVersion: monitoring.coreos.com/v1
20 | metadata:
21 | name: kube-controller-manager
22 | namespace: monitoring
23 | kind: ServiceMonitor
24 | $patch: delete
25 | - patch: |
26 | apiVersion: monitoring.coreos.com/v1
27 | metadata:
28 | name: kube-scheduler
29 | namespace: monitoring
30 | kind: ServiceMonitor
31 | $patch: delete
32 |
--------------------------------------------------------------------------------
/hack/config/monitoring/shoot/patch_prometheus.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Prometheus
3 | metadata:
4 | name: k8s
5 | namespace: monitoring
6 | spec:
7 | retention: 30d
8 | retentionSize: 90GiB
9 | storage:
10 | volumeClaimTemplate:
11 | metadata:
12 | labels:
13 | app.kubernetes.io/component: prometheus
14 | app.kubernetes.io/instance: k8s
15 | app.kubernetes.io/name: prometheus
16 | app.kubernetes.io/part-of: kube-prometheus
17 | name: prometheus
18 | spec:
19 | accessModes:
20 | - ReadWriteOnce
21 | resources:
22 | requests:
23 | storage: 100Gi
24 | storageClassName: premium-perf1-stackit
25 |
--------------------------------------------------------------------------------
/hack/config/monitoring/shoot/storageclass.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: gce-ssd
5 | parameters:
6 | type: pd-ssd
7 | allowVolumeExpansion: true
8 | provisioner: pd.csi.storage.gke.io
9 | reclaimPolicy: Delete
10 | volumeBindingMode: WaitForFirstConsumer
11 |
--------------------------------------------------------------------------------
/hack/config/policy/ci/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - no-requests.yaml
6 |
--------------------------------------------------------------------------------
/hack/config/policy/ci/no-requests.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: no-requests-limits
5 | spec:
6 | failurePolicy: Fail
7 | rules:
8 | # drop resource requests to allow scheduling all controller instances on a resource-restricted kind cluster (e.g., in CI)
9 | - name: no-requests-limits
10 | match:
11 | any:
12 | - resources:
13 | kinds:
14 | - Pod
15 | selector:
16 | matchExpressions:
17 | - key: app.kubernetes.io/name
18 | operator: In
19 | values:
20 | - controller-sharding
21 | - key: app.kubernetes.io/component
22 | operator: In
23 | values:
24 | - sharder
25 | - checksum-controller
26 | operations:
27 | - CREATE
28 | - resources:
29 | kinds:
30 | - Pod
31 | selector:
32 | matchExpressions:
33 | - key: app.kubernetes.io/name
34 | operator: In
35 | values:
36 | - webhosting-operator
37 | operations:
38 | - CREATE
39 | mutate:
40 | foreach:
41 | - list: request.object.spec.containers
42 | patchStrategicMerge:
43 | spec:
44 | containers:
45 | - (name): "{{element.name}}"
46 | resources: null
47 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/kube-apiserver-scale.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: Policy
3 | metadata:
4 | name: kube-apiserver-scale
5 | namespace: shoot--timebertt--sharding
6 | spec:
7 | failurePolicy: Ignore
8 | # schema validation doesn't seem to work in combination with the /scale subresource, disable it for now
9 | schemaValidation: false
10 | rules:
11 | # set static replicas on kube-apiserver to ensure similar evaluation environment between load test runs
12 | - name: replicas-scale
13 | match:
14 | any:
15 | - resources:
16 | # mutate scale requests by HPA
17 | kinds:
18 | - Deployment/scale
19 | # the Scale subresource doesn't have the original resource's labels -> we have to match by name
20 | names:
21 | - kube-apiserver
22 | preconditions:
23 | all:
24 | # Only patch spec.replicas if the control plane is not hibernated, i.e., if spec.replicas>=1.
25 | - key: "{{ request.object.spec.replicas || `1` }}"
26 | operator: GreaterThan
27 | value: 0
28 | mutate:
29 | patchStrategicMerge:
30 | spec:
31 | replicas: 4
32 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | # This kustomization contains policies for manipulating shoot control plane components.
5 | # For this to work, kyverno needs to be installed on the seed cluster.
6 |
7 | resources:
8 | - etcd-main.yaml
9 | - kube-apiserver.yaml
10 | - kube-apiserver-scale.yaml
11 | - kube-controller-manager.yaml
12 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver-scale-awake/kyverno-test.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cli.kyverno.io/v1alpha1
2 | kind: Test
3 | metadata:
4 | name: kube-apiserver-scale-awake
5 | policies:
6 | - ../../kube-apiserver-scale.yaml
7 | resources:
8 | # spec.replicas=2 -> expect spec.replicas=4
9 | - scale.yaml
10 | variables: variables.yaml
11 | results:
12 | - policy: shoot--timebertt--sharding/kube-apiserver-scale
13 | rule: replicas-scale
14 | resources:
15 | - shoot--timebertt--sharding/kube-apiserver
16 | kind: Scale
17 | result: pass
18 | patchedResource: scale_expected.yaml
19 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver-scale-awake/scale.yaml:
--------------------------------------------------------------------------------
1 | kind: Scale
2 | apiVersion: autoscaling/v1
3 | metadata:
4 | name: kube-apiserver
5 | namespace: shoot--timebertt--sharding
6 | spec:
7 | replicas: 2
8 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver-scale-awake/scale_expected.yaml:
--------------------------------------------------------------------------------
1 | kind: Scale
2 | apiVersion: autoscaling/v1
3 | metadata:
4 | name: kube-apiserver
5 | namespace: shoot--timebertt--sharding
6 | spec:
7 | replicas: 4
8 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver-scale-awake/variables.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cli.kyverno.io/v1alpha1
2 | kind: Values
3 | metadata:
4 | name: values
5 | subresources:
6 | - subresource:
7 | name: "deployments/scale"
8 | kind: "Scale"
9 | group: "autoscaling"
10 | version: "v1"
11 | parentResource:
12 | name: "deployments"
13 | kind: "Deployment"
14 | group: "apps"
15 | version: "v1"
16 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver-scale-hibernated/kyverno-test.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cli.kyverno.io/v1alpha1
2 | kind: Test
3 | metadata:
4 | name: kube-apiserver-scale-hibernated
5 | policies:
6 | - ../../kube-apiserver-scale.yaml
7 | resources:
8 | # spec.replicas=0 -> expect skip
9 | - scale.yaml
10 | variables: variables.yaml
11 | results:
12 | - policy: shoot--timebertt--sharding/kube-apiserver-scale
13 | rule: replicas-scale
14 | resources:
15 | - shoot--timebertt--sharding/kube-apiserver
16 | kind: Scale
17 | result: skip
18 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver-scale-hibernated/scale.yaml:
--------------------------------------------------------------------------------
1 | kind: Scale
2 | apiVersion: autoscaling/v1
3 | metadata:
4 | name: kube-apiserver
5 | namespace: shoot--timebertt--sharding
6 | spec:
7 | replicas: 0
8 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver-scale-hibernated/variables.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cli.kyverno.io/v1alpha1
2 | kind: Values
3 | metadata:
4 | name: values
5 | subresources:
6 | - subresource:
7 | name: "deployments/scale"
8 | kind: "Scale"
9 | group: "autoscaling"
10 | version: "v1"
11 | parentResource:
12 | name: "deployments"
13 | kind: "Deployment"
14 | group: "apps"
15 | version: "v1"
16 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver/kube-apiserver-awake.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: kubernetes
6 | role: apiserver
7 | name: kube-apiserver-awake
8 | namespace: shoot--timebertt--sharding
9 | spec:
10 | replicas: 2
11 | template:
12 | spec:
13 | containers:
14 | - name: kube-apiserver
15 | resources:
16 | requests:
17 | cpu: 800m
18 | memory: 800Mi
19 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver/kube-apiserver-awake_expected.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: kubernetes
6 | role: apiserver
7 | name: kube-apiserver-awake
8 | namespace: shoot--timebertt--sharding
9 | spec:
10 | replicas: 4
11 | template:
12 | spec:
13 | containers:
14 | - name: kube-apiserver
15 | resources:
16 | requests:
17 | cpu: 800m
18 | memory: 800Mi
19 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver/kube-apiserver-hibernated.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: kubernetes
6 | role: apiserver
7 | name: kube-apiserver-hibernated
8 | namespace: shoot--timebertt--sharding
9 | spec:
10 | replicas: 0
11 | template:
12 | spec:
13 | containers:
14 | - name: kube-apiserver
15 | resources:
16 | requests:
17 | cpu: 800m
18 | memory: 800Mi
19 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver/kube-apiserver-null.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: kubernetes
6 | role: apiserver
7 | name: kube-apiserver-null
8 | namespace: shoot--timebertt--sharding
9 | spec:
10 | template:
11 | spec:
12 | containers:
13 | - name: kube-apiserver
14 | resources:
15 | requests:
16 | cpu: 800m
17 | memory: 800Mi
18 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver/kube-apiserver-null_expected.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: kubernetes
6 | role: apiserver
7 | name: kube-apiserver-null
8 | namespace: shoot--timebertt--sharding
9 | spec:
10 | replicas: 4
11 | template:
12 | spec:
13 | containers:
14 | - name: kube-apiserver
15 | resources:
16 | requests:
17 | cpu: 800m
18 | memory: 800Mi
19 |
--------------------------------------------------------------------------------
/hack/config/policy/controlplane/tests/kube-apiserver/kyverno-test.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cli.kyverno.io/v1alpha1
2 | kind: Test
3 | metadata:
4 | name: kube-apiserver
5 | policies:
6 | - ../../kube-apiserver.yaml
7 | resources:
8 | # spec.replicas=2 -> expect spec.replicas=4
9 | - kube-apiserver-awake.yaml
10 | # spec.replicas=null -> expect spec.replicas=4
11 | - kube-apiserver-null.yaml
12 | # spec.replicas=0 -> expect skip
13 | - kube-apiserver-hibernated.yaml
14 | results:
15 | - policy: shoot--timebertt--sharding/kube-apiserver
16 | rule: replicas
17 | resources:
18 | - shoot--timebertt--sharding/kube-apiserver-awake
19 | kind: Deployment
20 | result: pass
21 | patchedResource: kube-apiserver-awake_expected.yaml
22 | - policy: shoot--timebertt--sharding/kube-apiserver
23 | rule: replicas
24 | resources:
25 | - shoot--timebertt--sharding/kube-apiserver-null
26 | kind: Deployment
27 | result: pass
28 | patchedResource: kube-apiserver-null_expected.yaml
29 | - policy: shoot--timebertt--sharding/kube-apiserver
30 | rule: replicas
31 | resources:
32 | - shoot--timebertt--sharding/kube-apiserver-hibernated
33 | kind: Deployment
34 | result: skip
35 |
--------------------------------------------------------------------------------
/hack/config/policy/shoot/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - sharder-scheduling.yaml
6 |
--------------------------------------------------------------------------------
/hack/config/policy/shoot/sharder-scheduling.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: sharder-scheduling
5 | spec:
6 | failurePolicy: Fail
7 | rules:
8 | # schedule sharder on dedicated worker pool for better isolation in load tests
9 | - name: add-scheduling-constraints
10 | match:
11 | any:
12 | - resources:
13 | kinds:
14 | - Pod
15 | namespaces:
16 | - sharding-system
17 | selector:
18 | matchLabels:
19 | app.kubernetes.io/name: controller-sharding
20 | app.kubernetes.io/component: sharder
21 | operations:
22 | - CREATE
23 | mutate:
24 | patchesJson6902: |-
25 | - op: add
26 | path: "/spec/tolerations/-"
27 | value: {"key":"dedicated-for","operator":"Equal","value":"sharding","effect":"NoSchedule"}
28 | - op: add
29 | path: "/spec/affinity/nodeAffinity/requiredDuringSchedulingIgnoredDuringExecution/nodeSelectorTerms/-"
30 | value: {"matchExpressions": [{"key":"dedicated-for","operator":"In","values":["sharding"]}]}
31 |
--------------------------------------------------------------------------------
/hack/config/profiling/ensure-admin-password.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | dir="$(dirname "$0")"
4 | password_file="$dir/parca_password.secret.txt"
5 | auth_file="$dir/parca_auth.secret.txt"
6 |
7 | [ -f "$password_file" ] && [ -f "$auth_file" ] && exit 0
8 | cat /dev/urandom | tr -dc "a-zA-Z0-9" | head -c 32 > "$password_file"
9 | cat "$password_file" | htpasswd -i -c "$auth_file" parca
10 |
--------------------------------------------------------------------------------
/hack/config/profiling/parca_ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | annotations:
5 | cert-manager.io/cluster-issuer: letsencrypt-http01
6 | nginx.ingress.kubernetes.io/auth-type: basic
7 | nginx.ingress.kubernetes.io/auth-secret: parca-basic-auth
8 | nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required'
9 | labels:
10 | app.kubernetes.io/component: observability
11 | app.kubernetes.io/instance: parca
12 | app.kubernetes.io/name: parca
13 | name: parca
14 | namespace: parca
15 | spec:
16 | ingressClassName: nginx
17 | rules:
18 | - host: parca.webhosting.timebertt.dev
19 | http:
20 | paths:
21 | - backend:
22 | service:
23 | name: parca
24 | port:
25 | name: http
26 | path: /
27 | pathType: Prefix
28 | tls:
29 | - hosts:
30 | - parca.webhosting.timebertt.dev
31 | secretName: parca-tls
32 |
--------------------------------------------------------------------------------
/hack/config/profiling/parca_pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: observability
6 | app.kubernetes.io/instance: parca
7 | app.kubernetes.io/name: parca
8 | name: parca
9 | namespace: parca
10 | spec:
11 | accessModes:
12 | - ReadWriteOnce
13 | resources:
14 | requests:
15 | storage: 150Gi
16 |
--------------------------------------------------------------------------------
/hack/config/profiling/patch_deployment_pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: parca
5 | namespace: parca
6 | spec:
7 | # set replicas and strategy to play nicely with PVC
8 | replicas: 1
9 | strategy:
10 | type: Recreate
11 | rollingUpdate: null
12 | template:
13 | spec:
14 | volumes:
15 | - name: data
16 | emptyDir: null
17 | persistentVolumeClaim:
18 | claimName: parca
19 |
--------------------------------------------------------------------------------
/hack/config/profiling/rbac_sharder.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | name: parca-service-discovery
6 | namespace: sharding-system
7 | rules:
8 | - apiGroups:
9 | - ""
10 | resources:
11 | - services
12 | - endpoints
13 | - pods
14 | verbs:
15 | - get
16 | - list
17 | - watch
18 | ---
19 | apiVersion: rbac.authorization.k8s.io/v1
20 | kind: RoleBinding
21 | metadata:
22 | name: parca-service-discovery
23 | namespace: sharding-system
24 | roleRef:
25 | apiGroup: rbac.authorization.k8s.io
26 | kind: Role
27 | name: parca-service-discovery
28 | subjects:
29 | - kind: ServiceAccount
30 | name: parca
31 | namespace: parca
32 | ---
33 | apiVersion: rbac.authorization.k8s.io/v1
34 | kind: ClusterRoleBinding
35 | metadata:
36 | labels:
37 | app.kubernetes.io/component: observability
38 | app.kubernetes.io/instance: parca
39 | app.kubernetes.io/name: parca
40 | name: parca-sharder-pprof-reader
41 | roleRef:
42 | apiGroup: rbac.authorization.k8s.io
43 | kind: ClusterRole
44 | name: sharding:sharder:pprof-reader
45 | subjects:
46 | - kind: ServiceAccount
47 | name: parca
48 | namespace: parca
49 |
--------------------------------------------------------------------------------
/hack/config/sharder/devel/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../../../config/default
6 |
7 | patches:
8 | - target:
9 | group: apps
10 | kind: Deployment
11 | name: sharder
12 | namespace: sharding-system
13 | patch: |
14 | - op: add
15 | path: /spec/template/spec/containers/0/args/-
16 | value: --zap-devel
17 |
--------------------------------------------------------------------------------
/hack/config/sharder/host/config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: config.sharding.timebertt.dev/v1alpha1
2 | kind: SharderConfig
3 | webhook:
4 | server:
5 | certDir: hack/config/certificates/host
6 | certName: webhook-server.pem
7 | keyName: webhook-server-key.pem
8 | config:
9 | annotations:
10 | cert-manager.io/inject-ca-from-secret: sharding-system/webhook-ca
11 | clientConfig:
12 | url: https://host.docker.internal:9443/
13 | controller:
14 | sharder:
15 | syncPeriod: 30s
16 |
--------------------------------------------------------------------------------
/hack/test-e2e.env:
--------------------------------------------------------------------------------
1 | export GOMEGA_DEFAULT_EVENTUALLY_TIMEOUT=5m
2 | export GOMEGA_DEFAULT_EVENTUALLY_POLLING_INTERVAL=500ms
3 | export GOMEGA_DEFAULT_CONSISTENTLY_DURATION=10s
4 | export GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL=500ms
5 |
--------------------------------------------------------------------------------
/hack/test-e2e.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o nounset
4 | set -o pipefail
5 | set -o errexit
6 |
7 | source "$(dirname "$0")/test-e2e.env"
8 |
9 | ginkgo run --timeout=1h --poll-progress-after=60s --poll-progress-interval=30s --randomize-all --randomize-suites --keep-going -v --show-node-events "$@"
10 |
--------------------------------------------------------------------------------
/hack/test-integration.env:
--------------------------------------------------------------------------------
1 | export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=2m
2 | export GOMEGA_DEFAULT_EVENTUALLY_TIMEOUT=5s
3 | export GOMEGA_DEFAULT_EVENTUALLY_POLLING_INTERVAL=200ms
4 | export GOMEGA_DEFAULT_CONSISTENTLY_DURATION=5s
5 | export GOMEGA_DEFAULT_CONSISTENTLY_POLLING_INTERVAL=200ms
6 |
--------------------------------------------------------------------------------
/hack/test-integration.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o nounset
4 | set -o pipefail
5 | set -o errexit
6 |
7 | ENVTEST_K8S_VERSION=${ENVTEST_K8S_VERSION:-"1.31"}
8 |
9 | # shellcheck disable=SC1090
10 | # --use-env allows overwriting the envtest tools path via the KUBEBUILDER_ASSETS env var
11 | source <(setup-envtest use --use-env -p env "${ENVTEST_K8S_VERSION}")
12 | echo "Using envtest binaries installed at '$KUBEBUILDER_ASSETS'"
13 |
14 | source "$(dirname "$0")/test-integration.env"
15 |
16 | test_flags=
17 | if [ -n "${CI:-}" ] ; then
18 | # Use Ginkgo timeout in CI to print everything that is buffered in GinkgoWriter.
19 | test_flags+=" --ginkgo.timeout=5m"
20 | else
21 | # We don't want Ginkgo's timeout flag locally because it causes skipping the test cache.
22 | timeout_flag=-timeout=5m
23 | fi
24 |
25 | # shellcheck disable=SC2086
26 | go test ${timeout_flag:-} "$@" $test_flags
27 |
--------------------------------------------------------------------------------
/hack/test.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o nounset
4 | set -o pipefail
5 | set -o errexit
6 |
7 | test_flags=
8 | if [ -n "${CI:-}" ] ; then
9 | # Use Ginkgo timeout in CI to print everything that is buffered in GinkgoWriter.
10 | test_flags+=" --ginkgo.timeout=2m"
11 | else
12 | # We don't want Ginkgo's timeout flag locally because it causes skipping the test cache.
13 | timeout_flag=-timeout=2m
14 | fi
15 |
16 | # shellcheck disable=SC2086
17 | go test -race ${timeout_flag:-} "$@" $test_flags
18 |
--------------------------------------------------------------------------------
/hack/tools.go:
--------------------------------------------------------------------------------
1 | //go:build tools
2 |
3 | /*
4 | Copyright 2023 Tim Ebert.
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | you may not use this file except in compliance with the License.
8 | You may obtain a copy of the License at
9 |
10 | http://www.apache.org/licenses/LICENSE-2.0
11 |
12 | Unless required by applicable law or agreed to in writing, software
13 | distributed under the License is distributed on an "AS IS" BASIS,
14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 | */
18 |
19 | package hack
20 |
21 | import (
22 | _ "github.com/onsi/ginkgo/v2/ginkgo"
23 | _ "k8s.io/code-generator"
24 | )
25 |
--------------------------------------------------------------------------------
/hack/tools/bin/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/timebertt/kubernetes-controller-sharding/dc38c68a18bbab009823d7bbefbf9ecc00cd9baf/hack/tools/bin/.gitkeep
--------------------------------------------------------------------------------
/hack/vgopath-setup.sh:
--------------------------------------------------------------------------------
1 | # Ensure that if GOPATH is set, the GOPATH/{bin,pkg} directory exists. This might not be the case in CI.
2 | # As we will create a symlink against the bin folder we need to make sure that the bin directory is
3 | # present in the GOPATH.
4 | if [ -n "${GOPATH:-}" ] && [ ! -d "$GOPATH/bin" ]; then mkdir -p "$GOPATH/bin"; fi
5 | if [ -n "${GOPATH:-}" ] && [ ! -d "$GOPATH/pkg" ]; then mkdir -p "$GOPATH/pkg"; fi
6 |
7 | VIRTUAL_GOPATH="$(mktemp -d)"
8 | trap 'rm -rf "$VIRTUAL_GOPATH"' EXIT
9 |
10 | # Setup virtual GOPATH
11 | go mod download && vgopath -o "$VIRTUAL_GOPATH"
12 |
13 | export GOPATH="$VIRTUAL_GOPATH"
14 |
--------------------------------------------------------------------------------
/pkg/apis/config/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // +groupName=config.sharding.timebertt.dev
18 |
19 | package config // import "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/config"
20 |
--------------------------------------------------------------------------------
/pkg/apis/config/v1alpha1/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // +k8s:defaulter-gen=TypeMeta
18 |
19 | package v1alpha1 // import "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/config/v1alpha1"
20 |
--------------------------------------------------------------------------------
/pkg/apis/config/v1alpha1/v1alpha1_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package v1alpha1_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestV1alpha1(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Sharder Config API V1alpha1 Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/apis/sharding/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // +groupName=sharding.timebertt.dev
18 |
19 | package sharding // import "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding"
20 |
--------------------------------------------------------------------------------
/pkg/apis/sharding/v1alpha1/constants_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package v1alpha1_test
18 |
19 | import (
20 | . "github.com/onsi/ginkgo/v2"
21 | . "github.com/onsi/gomega"
22 |
23 | . "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
24 | )
25 |
26 | var _ = Describe("#LabelShard", func() {
27 | It("should append the ControllerRing name", func() {
28 | Expect(LabelShard("foo")).To(Equal("shard.alpha.sharding.timebertt.dev/foo"))
29 | })
30 | })
31 |
32 | var _ = Describe("#LabelDrain", func() {
33 | It("should append the ControllerRing name", func() {
34 | Expect(LabelDrain("foo")).To(Equal("drain.alpha.sharding.timebertt.dev/foo"))
35 | })
36 | })
37 |
--------------------------------------------------------------------------------
/pkg/apis/sharding/v1alpha1/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package v1alpha1 // import "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
18 |
--------------------------------------------------------------------------------
/pkg/apis/sharding/v1alpha1/v1alpha1_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package v1alpha1_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestV1alpha1(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Sharding API V1alpha1 Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/controller/controllerring/controllerring_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package controllerring_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestControllerRing(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "ControllerRing Controller Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/controller/sharder/sharder_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package sharder_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestSharder(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Sharder Controller Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/controller/shardlease/shardlease_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package shardlease_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestShardLease(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Shard Lease Controller Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/shard/controller/controller_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package controller_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestController(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Shard Controller Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/shard/lease/lease_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package lease_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestLease(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Shard Library Lease Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/sharding/consistenthash/consistenthash_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package consistenthash_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestConsistentHash(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Consistent Hash Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/sharding/handler/handler_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package handler_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestHandler(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Sharding Handlers Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/sharding/handler/lease.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package handler
18 |
19 | import (
20 | "context"
21 |
22 | "sigs.k8s.io/controller-runtime/pkg/client"
23 | "sigs.k8s.io/controller-runtime/pkg/reconcile"
24 |
25 | shardingv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
26 | )
27 |
28 | // MapLeaseToControllerRing maps a shard lease to its ControllerRing.
29 | func MapLeaseToControllerRing(_ context.Context, obj client.Object) []reconcile.Request {
30 | ring := obj.GetLabels()[shardingv1alpha1.LabelControllerRing]
31 | if ring == "" {
32 | return nil
33 | }
34 |
35 | return []reconcile.Request{{NamespacedName: client.ObjectKey{Name: ring}}}
36 | }
37 |
--------------------------------------------------------------------------------
/pkg/sharding/key/key_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package key_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestKey(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Sharding Key Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/sharding/leases/leases_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package leases_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestLeases(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Leases Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/sharding/predicate/controllerring.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package predicate
18 |
19 | import (
20 | "sigs.k8s.io/controller-runtime/pkg/event"
21 | "sigs.k8s.io/controller-runtime/pkg/predicate"
22 | )
23 |
24 | // ControllerRingCreatedOrUpdated reacts on create and update events with generation changes but ignores delete
25 | // events. On deletion, there is nothing to do for the sharding controllers.
26 | func ControllerRingCreatedOrUpdated() predicate.Predicate {
27 | return predicate.And(
28 | predicate.GenerationChangedPredicate{},
29 | // ignore deletion of ControllerRings
30 | predicate.Funcs{
31 | CreateFunc: func(_ event.CreateEvent) bool { return true },
32 | UpdateFunc: func(_ event.UpdateEvent) bool { return true },
33 | DeleteFunc: func(_ event.DeleteEvent) bool { return false },
34 | },
35 | )
36 | }
37 |
--------------------------------------------------------------------------------
/pkg/sharding/predicate/predicate_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package predicate_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestPredicate(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Sharding Predicates Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/sharding/ring/ring_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package ring_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestRing(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Sharding Ring Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/utils/client/client_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package client_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestClient(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Client Utils Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/utils/client/options_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package client_test
18 |
19 | import (
20 | . "github.com/onsi/ginkgo/v2"
21 | . "github.com/onsi/gomega"
22 | "sigs.k8s.io/controller-runtime/pkg/client"
23 |
24 | . "github.com/timebertt/kubernetes-controller-sharding/pkg/utils/client"
25 | )
26 |
27 | var _ = Describe("ResourceVersion", func() {
28 | It("should set the resourceVersion on GetOptions", func() {
29 | opts := &client.GetOptions{}
30 | opts.ApplyOptions([]client.GetOption{ResourceVersion("1")})
31 |
32 | Expect(opts.Raw.ResourceVersion).To(Equal("1"))
33 | })
34 |
35 | It("should set the resourceVersion on ListOptions", func() {
36 | opts := &client.ListOptions{}
37 | opts.ApplyOptions([]client.ListOption{ResourceVersion("1")})
38 |
39 | Expect(opts.Raw.ResourceVersion).To(Equal("1"))
40 | })
41 | })
42 |
--------------------------------------------------------------------------------
/pkg/utils/client/scheme.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package client
18 |
19 | import (
20 | "k8s.io/apimachinery/pkg/runtime"
21 | utilruntime "k8s.io/apimachinery/pkg/util/runtime"
22 | clientgoscheme "k8s.io/client-go/kubernetes/scheme"
23 |
24 | configv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/config/v1alpha1"
25 | shardingv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/sharding/v1alpha1"
26 | )
27 |
28 | // SharderScheme is the scheme that the sharder uses.
29 | var SharderScheme = runtime.NewScheme()
30 |
31 | func init() {
32 | schemeBuilder := runtime.NewSchemeBuilder(
33 | clientgoscheme.AddToScheme,
34 | shardingv1alpha1.AddToScheme,
35 | configv1alpha1.AddToScheme,
36 | )
37 | utilruntime.Must(schemeBuilder.AddToScheme(SharderScheme))
38 | }
39 |
--------------------------------------------------------------------------------
/pkg/utils/errors/errors_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package errors_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestErrors(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Errors Utils Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/utils/errors/multi.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package errors
18 |
19 | import (
20 | "fmt"
21 | "strings"
22 | )
23 |
24 | // FormatErrors is like multierror.ListFormatFunc without the noisy newlines and tabs.
25 | // It also simplifies the format for a single error.
26 | func FormatErrors(es []error) string {
27 | if len(es) == 1 {
28 | return es[0].Error()
29 | }
30 |
31 | errs := make([]string, len(es))
32 | for i, err := range es {
33 | errs[i] = err.Error()
34 | }
35 |
36 | return fmt.Sprintf("%d errors occurred: %s", len(es), strings.Join(errs, ", "))
37 | }
38 |
--------------------------------------------------------------------------------
/pkg/utils/errors/multi_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package errors_test
18 |
19 | import (
20 | "fmt"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 |
25 | . "github.com/timebertt/kubernetes-controller-sharding/pkg/utils/errors"
26 | )
27 |
28 | var _ = Describe("FormatErrors", func() {
29 | It("should return the single error", func() {
30 | Expect(FormatErrors([]error{fmt.Errorf("foo")})).To(Equal("foo"))
31 | })
32 |
33 | It("should return the error count and comma separated error list", func() {
34 | Expect(
35 | FormatErrors([]error{fmt.Errorf("foo"), fmt.Errorf("bar")}),
36 | ).To(
37 | Equal("2 errors occurred: foo, bar"),
38 | )
39 | })
40 | })
41 |
--------------------------------------------------------------------------------
/pkg/utils/healthz/cache.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package healthz
18 |
19 | import (
20 | "context"
21 | "fmt"
22 | "net/http"
23 | "time"
24 |
25 | "sigs.k8s.io/controller-runtime/pkg/healthz"
26 | )
27 |
28 | type cacheSyncWaiter interface {
29 | WaitForCacheSync(ctx context.Context) bool
30 | }
31 |
32 | // CacheSync returns a new healthz.Checker that will pass if all informers in the given cacheSyncWaiter have synced.
33 | func CacheSync(cacheSyncWaiter cacheSyncWaiter) healthz.Checker {
34 | return func(_ *http.Request) error {
35 | // cache.Cache.WaitForCacheSync is racy for a closed context, so use context with 5ms timeout instead.
36 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Millisecond)
37 | defer cancel()
38 |
39 | if !cacheSyncWaiter.WaitForCacheSync(ctx) {
40 | return fmt.Errorf("informers have not synced yet")
41 | }
42 | return nil
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/pkg/utils/healthz/cache_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package healthz_test
18 |
19 | import (
20 | "context"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 |
25 | . "github.com/timebertt/kubernetes-controller-sharding/pkg/utils/healthz"
26 | )
27 |
28 | var _ = Describe("CacheSync", func() {
29 | It("should succeed if all informers sync", func() {
30 | checker := CacheSync(fakeSyncWaiter(true))
31 | Expect(checker(nil)).NotTo(HaveOccurred())
32 | })
33 | It("should fail if informers don't sync", func() {
34 | checker := CacheSync(fakeSyncWaiter(false))
35 | Expect(checker(nil)).To(MatchError(ContainSubstring("not synced")))
36 | })
37 | })
38 |
39 | type fakeSyncWaiter bool
40 |
41 | func (f fakeSyncWaiter) WaitForCacheSync(_ context.Context) bool {
42 | return bool(f)
43 | }
44 |
--------------------------------------------------------------------------------
/pkg/utils/healthz/healthz_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package healthz_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestHealthz(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Healthz Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/utils/pager/pager_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package pager_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestPager(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Pager Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/utils/routes/profiling.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package routes
18 |
19 | import (
20 | "net/http"
21 | "net/http/pprof"
22 | )
23 |
24 | // ProfilingHandlers is the set of profiling endpoints.
25 | // This can be added to controller-runtime's metrics server via manager.Options.Metrics.ExtraHandlers.
26 | var ProfilingHandlers = map[string]http.Handler{
27 | "/debug/pprof": http.RedirectHandler("/debug/pprof/", http.StatusFound),
28 | "/debug/pprof/": http.HandlerFunc(pprof.Index),
29 | "/debug/pprof/profile": http.HandlerFunc(pprof.Profile),
30 | "/debug/pprof/symbol": http.HandlerFunc(pprof.Symbol),
31 | "/debug/pprof/trace": http.HandlerFunc(pprof.Trace),
32 | }
33 |
--------------------------------------------------------------------------------
/pkg/utils/strings.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package utils
18 |
19 | import (
20 | "unicode"
21 | )
22 |
23 | // CapitalizeFirst capitalizes the first letter in the given string.
24 | func CapitalizeFirst(in string) string {
25 | r := []rune(in)
26 | r[0] = unicode.ToUpper(r[0])
27 | return string(r)
28 | }
29 |
--------------------------------------------------------------------------------
/pkg/utils/strings_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package utils_test
18 |
19 | import (
20 | . "github.com/onsi/ginkgo/v2"
21 | . "github.com/onsi/gomega"
22 |
23 | . "github.com/timebertt/kubernetes-controller-sharding/pkg/utils"
24 | )
25 |
26 | var _ = Describe("CapitalizeFirst", func() {
27 | It("should capitalize the first letter", func() {
28 | Expect(CapitalizeFirst("foo bar Baz")).To(Equal("Foo bar Baz"))
29 | Expect(CapitalizeFirst("Foo BAR Baz")).To(Equal("Foo BAR Baz"))
30 | Expect(CapitalizeFirst("FOO bar Baz")).To(Equal("FOO bar Baz"))
31 | })
32 | })
33 |
--------------------------------------------------------------------------------
/pkg/utils/test/envtest.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package test
18 |
19 | import (
20 | "os"
21 | "strings"
22 | )
23 |
24 | // UseExistingCluster reads the `USE_EXISTING_CLUSTER` env var similar to what envtest does, though exported.
25 | func UseExistingCluster() bool {
26 | return strings.ToLower(os.Getenv("USE_EXISTING_CLUSTER")) == "true"
27 | }
28 |
--------------------------------------------------------------------------------
/pkg/utils/test/object.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package test
18 |
19 | import (
20 | "crypto/sha256"
21 | "encoding/hex"
22 |
23 | "github.com/google/uuid"
24 | )
25 |
26 | // RandomSuffix generates a random string that is safe to use as a name suffix in tests.
27 | func RandomSuffix() string {
28 | unique := uuid.New()
29 | hash := sha256.Sum256(unique[:])
30 | return hex.EncodeToString(hash[:])[:8]
31 | }
32 |
--------------------------------------------------------------------------------
/pkg/utils/utils_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package utils_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestUtils(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Utils Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/pkg/webhook/add.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package webhook
18 |
19 | import (
20 | "context"
21 | "fmt"
22 |
23 | "sigs.k8s.io/controller-runtime/pkg/manager"
24 |
25 | configv1alpha1 "github.com/timebertt/kubernetes-controller-sharding/pkg/apis/config/v1alpha1"
26 | "github.com/timebertt/kubernetes-controller-sharding/pkg/webhook/sharder"
27 | )
28 |
29 | // AddToManager adds all webhooks to the manager.
30 | func AddToManager(ctx context.Context, mgr manager.Manager, config *configv1alpha1.SharderConfig) error {
31 | if err := (&sharder.Handler{}).AddToManager(mgr); err != nil {
32 | return fmt.Errorf("failed adding sharder webhook: %w", err)
33 | }
34 |
35 | return nil
36 | }
37 |
--------------------------------------------------------------------------------
/pkg/webhook/sharder/metrics.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package sharder
18 |
19 | import (
20 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
21 |
22 | shardingmetrics "github.com/timebertt/kubernetes-controller-sharding/pkg/sharding/metrics"
23 | )
24 |
25 | type Metrics interface {
26 | ObserveAssignment(controllerRingName string, gr metav1.GroupResource)
27 | }
28 |
29 | type realMetrics struct{}
30 |
31 | func (realMetrics) ObserveAssignment(controllerRingName string, gr metav1.GroupResource) {
32 | shardingmetrics.AssignmentsTotal.WithLabelValues(controllerRingName, gr.Group, gr.Resource).Inc()
33 | }
34 |
--------------------------------------------------------------------------------
/pkg/webhook/sharder/sharder_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2025 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package sharder_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestSharder(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "Sharder Webhook Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/webhosting-operator/PROJECT:
--------------------------------------------------------------------------------
1 | componentConfig: true
2 | domain: timebertt.dev
3 | layout:
4 | - go.kubebuilder.io/v3
5 | multigroup: true
6 | projectName: webhosting-operator
7 | repo: github.com/timebertt/kubernetes-controller-sharding/webhosting-operator
8 | resources:
9 | - api:
10 | crdVersion: v1
11 | namespaced: true
12 | controller: true
13 | domain: timebertt.dev
14 | group: webhosting
15 | kind: Website
16 | path: github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/apis/webhosting/v1alpha1
17 | version: v1alpha1
18 | - api:
19 | crdVersion: v1
20 | controller: true
21 | domain: timebertt.dev
22 | group: webhosting
23 | kind: Theme
24 | path: github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/apis/webhosting/v1alpha1
25 | version: v1alpha1
26 | - api:
27 | crdVersion: v1
28 | namespaced: true
29 | domain: webhosting.timebertt.dev
30 | group: config
31 | kind: WebhostingOperatorConfig
32 | path: github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/apis/config/v1alpha1
33 | version: v1alpha1
34 | version: "3"
35 |
--------------------------------------------------------------------------------
/webhosting-operator/cmd/measure/test.yaml:
--------------------------------------------------------------------------------
1 | queries:
2 | - name: queue-rate-by-pod
3 | type: range # returns a matrix
4 | slo: 200
5 | query: sum(rate(workqueue_adds_total{job="webhosting-operator"}[1m])) by (pod)
6 | - name: queue-latency-by-controller
7 | type: instant # returns a vector
8 | slo: 0.1
9 | query: |
10 | histogram_quantile(0.99,
11 | sum by (name, le) (rate(
12 | workqueue_queue_duration_seconds_bucket{
13 | job="webhosting-operator",
14 | }[$__range]
15 | ))
16 | )
17 |
--------------------------------------------------------------------------------
/webhosting-operator/config/experiment/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: experiment
5 |
6 | labels:
7 | - includeSelectors: true
8 | pairs:
9 | app.kubernetes.io/name: experiment
10 |
11 | images:
12 | - name: experiment
13 | newName: ghcr.io/timebertt/kubernetes-controller-sharding/experiment
14 | newTag: latest
15 |
16 | resources:
17 | - namespace.yaml
18 | - job.yaml
19 | - rbac.yaml
20 | - service.yaml
21 | # provide prometheus running in namespace "monitoring" with the permissions required for service discovery in namespace
22 | # "experiment"
23 | - prometheus_rbac.yaml
24 | - servicemonitor.yaml
25 |
--------------------------------------------------------------------------------
/webhosting-operator/config/experiment/base/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: experiment
5 |
--------------------------------------------------------------------------------
/webhosting-operator/config/experiment/base/prometheus_rbac.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: prometheus
7 | app.kubernetes.io/instance: k8s
8 | app.kubernetes.io/name: prometheus
9 | name: prometheus-k8s
10 | rules:
11 | - apiGroups:
12 | - ""
13 | resources:
14 | - services
15 | - endpoints
16 | - pods
17 | verbs:
18 | - get
19 | - list
20 | - watch
21 | ---
22 | apiVersion: rbac.authorization.k8s.io/v1
23 | kind: RoleBinding
24 | metadata:
25 | labels:
26 | app.kubernetes.io/component: prometheus
27 | app.kubernetes.io/instance: k8s
28 | app.kubernetes.io/name: prometheus
29 | name: prometheus-k8s
30 | roleRef:
31 | apiGroup: rbac.authorization.k8s.io
32 | kind: Role
33 | name: prometheus-k8s
34 | subjects:
35 | - kind: ServiceAccount
36 | name: prometheus-k8s
37 | namespace: monitoring
38 |
--------------------------------------------------------------------------------
/webhosting-operator/config/experiment/base/rbac.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: experiment
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1
8 | kind: ClusterRole
9 | metadata:
10 | name: experiment
11 | rules:
12 | - apiGroups:
13 | - webhosting.timebertt.dev
14 | resources:
15 | - themes
16 | - websites
17 | verbs:
18 | - get
19 | - list
20 | - watch
21 | - create
22 | - patch
23 | - update
24 | - delete
25 | - deletecollection
26 | - apiGroups:
27 | - ""
28 | resources:
29 | - namespaces
30 | verbs:
31 | - get
32 | - list
33 | - watch
34 | - create
35 | - patch
36 | - update
37 | - delete
38 | - deletecollection
39 | - apiGroups:
40 | - rbac.authorization.k8s.io
41 | resources:
42 | - clusterroles
43 | verbs:
44 | - create
45 | - delete
46 | - apiGroups:
47 | - apps
48 | resources:
49 | - deployments
50 | verbs:
51 | - get
52 | - list
53 | - watch
54 | - patch
55 | - apiGroups:
56 | - coordination.k8s.io
57 | resources:
58 | - leases
59 | verbs:
60 | - get
61 | - list
62 | - watch
63 | - deletecollection
64 | ---
65 | apiVersion: rbac.authorization.k8s.io/v1
66 | kind: ClusterRoleBinding
67 | metadata:
68 | name: experiment
69 | roleRef:
70 | apiGroup: rbac.authorization.k8s.io
71 | kind: ClusterRole
72 | name: experiment
73 | subjects:
74 | - kind: ServiceAccount
75 | name: experiment
76 |
--------------------------------------------------------------------------------
/webhosting-operator/config/experiment/base/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: experiment
5 | namespace: system
6 | spec:
7 | ports:
8 | - name: metrics
9 | port: 8080
10 | protocol: TCP
11 | targetPort: metrics
12 |
--------------------------------------------------------------------------------
/webhosting-operator/config/experiment/base/servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: experiment
5 | spec:
6 | endpoints:
7 | - path: /metrics
8 | port: metrics
9 | scheme: http
10 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | interval: 10s
12 | scrapeTimeout: 10s
13 | relabelings:
14 | - targetLabel: job
15 | replacement: experiment
16 | selector:
17 | matchLabels:
18 | app.kubernetes.io/name: experiment
19 |
--------------------------------------------------------------------------------
/webhosting-operator/config/experiment/basic/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../base
6 |
7 | patches:
8 | - target:
9 | kind: Job
10 | name: experiment
11 | patch: |
12 | - op: add
13 | path: /spec/template/spec/containers/0/args/-
14 | value: basic
15 |
--------------------------------------------------------------------------------
/webhosting-operator/config/experiment/scale-out/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../base
6 |
7 | patches:
8 | - target:
9 | kind: Job
10 | name: experiment
11 | patch: |
12 | - op: add
13 | path: /spec/template/spec/containers/0/args/-
14 | value: scale-out
15 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | # Adds namespace to all resources.
5 | namespace: webhosting-system
6 |
7 | # Value of this field is prepended to the
8 | # names of all resources, e.g. a deployment named
9 | # "wordpress" becomes "alices-wordpress".
10 | # Note that it should also match with the prefix (text before '-') of the namespace
11 | # field above.
12 | namePrefix: webhosting-
13 |
14 | # Labels to add to all resources and selectors.
15 | labels:
16 | - includeSelectors: true
17 | pairs:
18 | app.kubernetes.io/name: webhosting-operator
19 |
20 | images:
21 | - name: controller
22 | newName: ghcr.io/timebertt/kubernetes-controller-sharding/webhosting-operator
23 | newTag: latest
24 |
25 | resources:
26 | - namespace.yaml
27 | - manager.yaml
28 | - service.yaml
29 | - ../crds
30 | - ../rbac
31 | - metrics_auth.yaml
32 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/base/manager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: operator
5 | namespace: system
6 | spec:
7 | replicas: 3
8 | template:
9 | metadata:
10 | annotations:
11 | kubectl.kubernetes.io/default-container: manager
12 | spec:
13 | automountServiceAccountToken: true
14 | securityContext:
15 | runAsNonRoot: true
16 | containers:
17 | - name: manager
18 | image: controller:latest
19 | args: []
20 | env:
21 | - name: DISABLE_HTTP2
22 | value: "true"
23 | - name: WEBSITE_CONCURRENT_SYNCS
24 | value: "15"
25 | ports:
26 | - name: metrics
27 | containerPort: 8080
28 | protocol: TCP
29 | securityContext:
30 | allowPrivilegeEscalation: false
31 | livenessProbe:
32 | httpGet:
33 | path: /healthz
34 | port: 8081
35 | initialDelaySeconds: 15
36 | periodSeconds: 20
37 | readinessProbe:
38 | httpGet:
39 | path: /readyz
40 | port: 8081
41 | initialDelaySeconds: 5
42 | periodSeconds: 10
43 | resources:
44 | limits:
45 | cpu: "2"
46 | memory: 1Gi
47 | requests:
48 | cpu: "1"
49 | memory: 512Mi
50 | serviceAccountName: operator
51 | terminationGracePeriodSeconds: 30
52 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/base/metrics_auth.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: operator-metrics-auth
6 | rules:
7 | - apiGroups:
8 | - authentication.k8s.io
9 | resources:
10 | - tokenreviews
11 | verbs:
12 | - create
13 | - apiGroups:
14 | - authorization.k8s.io
15 | resources:
16 | - subjectaccessreviews
17 | verbs:
18 | - create
19 | ---
20 | apiVersion: rbac.authorization.k8s.io/v1
21 | kind: ClusterRoleBinding
22 | metadata:
23 | name: operator-metrics-auth
24 | roleRef:
25 | apiGroup: rbac.authorization.k8s.io
26 | kind: ClusterRole
27 | name: operator-metrics-auth
28 | subjects:
29 | - kind: ServiceAccount
30 | name: operator
31 | namespace: system
32 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/base/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: system
5 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/base/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: operator
5 | spec:
6 | type: ClusterIP
7 | ports:
8 | - port: 8080
9 | name: metrics
10 | protocol: TCP
11 | targetPort: metrics
12 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/controllerring/controllerring.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: sharding.timebertt.dev/v1alpha1
2 | kind: ControllerRing
3 | metadata:
4 | name: webhosting-operator
5 | spec:
6 | resources:
7 | - group: webhosting.timebertt.dev
8 | resource: websites
9 | controlledResources:
10 | - group: apps
11 | resource: deployments
12 | - group: ""
13 | resource: configmaps
14 | - group: ""
15 | resource: services
16 | - group: networking.k8s.io
17 | resource: ingresses
18 | namespaceSelector:
19 | matchLabels:
20 | webhosting.timebertt.dev/project: "true"
21 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/controllerring/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1alpha1
2 | kind: Component
3 |
4 | resources:
5 | - controllerring.yaml
6 | - sharder_rbac.yaml
7 |
8 | patches:
9 | - path: manager_patch.yaml
10 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/controllerring/manager_patch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: operator
5 | namespace: system
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: manager
11 | env:
12 | - name: ENABLE_SHARDING
13 | value: "true"
14 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/controllerring/sharder_rbac.yaml:
--------------------------------------------------------------------------------
1 | # These manifests grant the sharder controller permissions to act on resources that we listed in the ControllerRing.
2 | # We need to grant these permissions explicitly depending on what we configured. Otherwise, the sharder would require
3 | # cluster-admin access.
4 | ---
5 | apiVersion: rbac.authorization.k8s.io/v1
6 | kind: ClusterRole
7 | metadata:
8 | name: sharding:controllerring:webhosting-operator
9 | rules:
10 | - apiGroups:
11 | - webhosting.timebertt.dev
12 | resources:
13 | - websites
14 | verbs:
15 | - list
16 | - patch
17 | - apiGroups:
18 | - apps
19 | resources:
20 | - deployments
21 | verbs:
22 | - list
23 | - patch
24 | - apiGroups:
25 | - ""
26 | resources:
27 | - configmaps
28 | - services
29 | verbs:
30 | - list
31 | - patch
32 | - apiGroups:
33 | - networking.k8s.io
34 | resources:
35 | - ingresses
36 | verbs:
37 | - list
38 | - patch
39 | ---
40 | apiVersion: rbac.authorization.k8s.io/v1
41 | kind: ClusterRoleBinding
42 | metadata:
43 | name: sharding:controllerring:webhosting-operator
44 | roleRef:
45 | apiGroup: rbac.authorization.k8s.io
46 | kind: ClusterRole
47 | name: sharding:controllerring:webhosting-operator
48 | subjects:
49 | - kind: ServiceAccount
50 | name: sharder
51 | namespace: sharding-system
52 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/crds/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # This kustomization.yaml is not intended to be run by itself,
2 | # since it depends on service name and namespace that are out of this kustomize package.
3 | # It should be run by config/manager/default
4 | resources:
5 | - webhosting.timebertt.dev_websites.yaml
6 | - webhosting.timebertt.dev_themes.yaml
7 | #+kubebuilder:scaffold:crdkustomizeresource
8 |
9 | # the following config is for teaching kustomize how to do kustomization for CRDs.
10 | configurations:
11 | - kustomizeconfig.yaml
12 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/crds/kustomizeconfig.yaml:
--------------------------------------------------------------------------------
1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD
2 | nameReference:
3 | - kind: Service
4 | version: v1
5 | fieldSpecs:
6 | - kind: CustomResourceDefinition
7 | version: v1
8 | group: apiextensions.k8s.io
9 | path: spec/conversion/webhook/clientConfig/service/name
10 |
11 | namespace:
12 | - kind: CustomResourceDefinition
13 | version: v1
14 | group: apiextensions.k8s.io
15 | path: spec/conversion/webhook/clientConfig/service/namespace
16 | create: false
17 |
18 | varReference:
19 | - path: metadata/annotations
20 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/devel/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1alpha1
2 | kind: Component
3 |
4 | patches:
5 | - target:
6 | group: apps
7 | kind: Deployment
8 | name: webhosting-operator
9 | namespace: webhosting-system
10 | patch: |
11 | - op: add
12 | path: /spec/template/spec/containers/0/args/-
13 | value: --zap-devel
14 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/debug/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../base
6 |
7 | patches:
8 | - path: manager_debug_patch.yaml
9 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/debug/manager_debug_patch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: operator
5 | namespace: system
6 | spec:
7 | strategy:
8 | type: Recreate
9 | template:
10 | spec:
11 | securityContext:
12 | # delve can't run as non-root (when using skaffold debug)
13 | runAsNonRoot: false
14 | containers:
15 | - name: manager
16 | env:
17 | # disable leader election for debugging and use Deployment strategy Recreate.
18 | # other option would have been to increase the durations and rely on ReleaseOnCancel to release the lease,
19 | # however the delve debugger seems to kill the child process too fast for the leader elector to release the lease
20 | # (probably, this is because the network connection to the skaffold/dlv client breaks off during termination)
21 | - name: LEADER_ELECT
22 | value: "false"
23 | terminationGracePeriodSeconds: 5
24 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/default/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../base
6 |
7 | components:
8 | - ../../controllerring
9 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/devel/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../default
6 |
7 | components:
8 | - ../../devel
9 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/non-sharded-devel/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../non-sharded
6 |
7 | components:
8 | - ../../devel
9 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/non-sharded/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../base
6 |
7 | patches:
8 | - path: manager_patch.yaml
9 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/non-sharded/manager_patch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: operator
5 | namespace: system
6 | spec:
7 | replicas: 1
8 | template:
9 | spec:
10 | containers:
11 | - name: manager
12 | env:
13 | - name: ENABLE_SHARDING
14 | value: "false"
15 | # When comparing singleton vs sharded setups, the singleton will fail to verify the SLOs because it has too few
16 | # website workers. Increase the worker count to allow comparing the setups.
17 | - name: WEBSITE_CONCURRENT_SYNCS
18 | value: "50"
19 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/shoot/default/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../default
6 |
7 | components:
8 | - ../../../with-dns
9 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/shoot/devel/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../default
6 |
7 | components:
8 | - ../../../devel
9 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/shoot/non-sharded-devel/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../non-sharded
6 |
7 | components:
8 | - ../../../devel
9 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/overlays/shoot/non-sharded/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../../non-sharded
6 |
7 | components:
8 | - ../../../with-dns
9 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/rbac/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | # All RBAC will be applied under this service account in
3 | # the deployment namespace. You may comment out this resource
4 | # if your manager will use a service account that exists at
5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding
6 | # subjects if changing service account names.
7 | - service_account.yaml
8 | - role.yaml
9 | - role_binding.yaml
10 | - leader_election_role.yaml
11 | - leader_election_role_binding.yaml
12 | # provide parca running in namespace "parca" with the permissions required for service discovery in namespace
13 | # "webhosting-system" and scrape the pprof endpoints of webhosting-operator
14 | - parca_rbac.yaml
15 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/rbac/leader_election_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions to do leader election.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | name: leader-election
6 | rules:
7 | - apiGroups:
8 | - coordination.k8s.io
9 | resources:
10 | - leases
11 | verbs:
12 | - get
13 | - list
14 | - watch
15 | - create
16 | - update
17 | - patch
18 | - delete
19 | - apiGroups:
20 | - ""
21 | resources:
22 | - events
23 | verbs:
24 | - create
25 | - patch
26 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/rbac/leader_election_role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | name: leader-election
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: Role
8 | name: leader-election
9 | subjects:
10 | - kind: ServiceAccount
11 | name: operator
12 | namespace: system
13 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/rbac/role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: operator
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: operator
9 | subjects:
10 | - kind: ServiceAccount
11 | name: operator
12 | namespace: system
13 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/rbac/service_account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: operator
5 | namespace: system
6 | automountServiceAccountToken: false
7 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/rbac/theme_editor_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to edit themes.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: theme-editor-role
6 | rules:
7 | - apiGroups:
8 | - webhosting.timebertt.dev
9 | resources:
10 | - themes
11 | verbs:
12 | - create
13 | - delete
14 | - get
15 | - list
16 | - patch
17 | - update
18 | - watch
19 | - apiGroups:
20 | - webhosting.timebertt.dev
21 | resources:
22 | - themes/status
23 | verbs:
24 | - get
25 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/rbac/theme_viewer_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to view themes.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: theme-viewer-role
6 | rules:
7 | - apiGroups:
8 | - webhosting.timebertt.dev
9 | resources:
10 | - themes
11 | verbs:
12 | - get
13 | - list
14 | - watch
15 | - apiGroups:
16 | - webhosting.timebertt.dev
17 | resources:
18 | - themes/status
19 | verbs:
20 | - get
21 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/rbac/website_editor_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to edit websites.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: website-editor-role
6 | rules:
7 | - apiGroups:
8 | - webhosting.timebertt.dev
9 | resources:
10 | - websites
11 | verbs:
12 | - create
13 | - delete
14 | - get
15 | - list
16 | - patch
17 | - update
18 | - watch
19 | - apiGroups:
20 | - webhosting.timebertt.dev
21 | resources:
22 | - websites/status
23 | verbs:
24 | - get
25 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/rbac/website_viewer_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to view websites.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: website-viewer-role
6 | rules:
7 | - apiGroups:
8 | - webhosting.timebertt.dev
9 | resources:
10 | - websites
11 | verbs:
12 | - get
13 | - list
14 | - watch
15 | - apiGroups:
16 | - webhosting.timebertt.dev
17 | resources:
18 | - websites/status
19 | verbs:
20 | - get
21 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/with-dns/config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: config.webhosting.timebertt.dev/v1alpha1
2 | kind: WebhostingOperatorConfig
3 | clientConnection:
4 | qps: 800
5 | burst: 1000
6 | ingress:
7 | hosts:
8 | - webhosting.timebertt.dev
9 | tls:
10 | - hosts:
11 | - webhosting.timebertt.dev
12 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/with-dns/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1alpha1
2 | kind: Component
3 |
4 | generatorOptions:
5 | disableNameSuffixHash: true
6 |
7 | configMapGenerator:
8 | - name: webhosting-operator
9 | namespace: webhosting-system
10 | files:
11 | - config.yaml
12 |
13 | patches:
14 | - path: manager_patch.yaml
15 | - target:
16 | group: apps
17 | kind: Deployment
18 | name: webhosting-operator
19 | namespace: webhosting-system
20 | patch: |
21 | - op: add
22 | path: /spec/template/spec/containers/0/args/-
23 | value: --config=/config.yaml
24 |
--------------------------------------------------------------------------------
/webhosting-operator/config/manager/with-dns/manager_patch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: webhosting-operator
5 | namespace: webhosting-system
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - name: manager
11 | volumeMounts:
12 | - name: config
13 | mountPath: /config.yaml
14 | subPath: config.yaml
15 | volumes:
16 | - name: config
17 | configMap:
18 | name: webhosting-operator
19 |
--------------------------------------------------------------------------------
/webhosting-operator/config/monitoring/default/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - ../webhosting-operator
6 |
7 | generatorOptions:
8 | disableNameSuffixHash: true
9 |
10 | configMapGenerator:
11 | - files:
12 | - dashboards/sharding.json
13 | - dashboards/webhosting.json
14 | - dashboards/experiments.json
15 | name: grafana-dashboards-sharding
16 | namespace: monitoring
17 | options:
18 | labels:
19 | grafana_dashboard: "true"
20 |
--------------------------------------------------------------------------------
/webhosting-operator/config/monitoring/webhosting-operator/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | namespace: webhosting-system
5 |
6 | labels:
7 | - includeSelectors: true
8 | pairs:
9 | app.kubernetes.io/name: webhosting-operator
10 |
11 | resources:
12 | # provide prometheus running in namespace "monitoring" with the permissions required for service discovery in namespace
13 | # "webhosting-system"
14 | - prometheus_rbac.yaml
15 | - servicemonitor.yaml
16 | - prometheusrule.yaml
17 |
--------------------------------------------------------------------------------
/webhosting-operator/config/monitoring/webhosting-operator/prometheus_rbac.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | labels:
6 | app.kubernetes.io/component: prometheus
7 | app.kubernetes.io/instance: k8s
8 | app.kubernetes.io/name: prometheus
9 | name: prometheus-k8s
10 | rules:
11 | - apiGroups:
12 | - ""
13 | resources:
14 | - services
15 | - endpoints
16 | - pods
17 | verbs:
18 | - get
19 | - list
20 | - watch
21 | ---
22 | apiVersion: rbac.authorization.k8s.io/v1
23 | kind: RoleBinding
24 | metadata:
25 | labels:
26 | app.kubernetes.io/component: prometheus
27 | app.kubernetes.io/instance: k8s
28 | app.kubernetes.io/name: prometheus
29 | name: prometheus-k8s
30 | roleRef:
31 | apiGroup: rbac.authorization.k8s.io
32 | kind: Role
33 | name: prometheus-k8s
34 | subjects:
35 | - kind: ServiceAccount
36 | name: prometheus-k8s
37 | namespace: monitoring
38 |
--------------------------------------------------------------------------------
/webhosting-operator/config/monitoring/webhosting-operator/prometheusrule.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: PrometheusRule
3 | metadata:
4 | name: webhosting-operator
5 | spec:
6 | groups:
7 | - name: webhosting-website.rules
8 | rules:
9 | - record: namespace_run:kube_website_info:sum
10 | expr: sum by (namespace, run_id) (kube_website_info)
11 | - record: namespace_theme:kube_website_info:sum
12 | expr: sum by (namespace, theme) (kube_website_info)
13 | - record: namespace_phase:kube_website_status_phase:sum
14 | expr: sum by (namespace, phase) (kube_website_status_phase)
15 | - record: namespace_shard:kube_website_shard:sum
16 | expr: sum by (namespace, shard) (kube_website_shard)
17 | - record: namespace_shard_drain:kube_website_shard:sum
18 | expr: sum by (namespace, shard, drain) (kube_website_shard)
19 |
--------------------------------------------------------------------------------
/webhosting-operator/config/monitoring/webhosting-operator/servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: webhosting-operator
5 | spec:
6 | endpoints:
7 | - path: /metrics
8 | port: metrics
9 | scheme: https
10 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | honorLabels: true
12 | interval: 10s
13 | scrapeTimeout: 10s
14 | tlsConfig:
15 | insecureSkipVerify: true
16 | relabelings:
17 | - action: labelmap
18 | regex: "__meta_kubernetes_pod_label_label_prometheus_io_(.*)"
19 | replacement: "${1}"
20 | jobLabel: app.kubernetes.io/name
21 | selector:
22 | matchLabels:
23 | app.kubernetes.io/name: webhosting-operator
24 |
--------------------------------------------------------------------------------
/webhosting-operator/config/policy/experiment-scheduling.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: experiment-scheduling
5 | spec:
6 | failurePolicy: Fail
7 | rules:
8 | # schedule experiment on dedicated worker pool for better isolation in load tests
9 | - name: add-scheduling-constraints
10 | match:
11 | any:
12 | - resources:
13 | kinds:
14 | - Pod
15 | namespaces:
16 | - experiment
17 | selector:
18 | matchLabels:
19 | app.kubernetes.io/name: experiment
20 | operations:
21 | - CREATE
22 | mutate:
23 | patchesJson6902: |-
24 | - op: add
25 | path: "/spec/tolerations/-"
26 | value: {"key":"dedicated-for","operator":"Equal","value":"experiment","effect":"NoSchedule"}
27 | - op: add
28 | path: "/spec/affinity/nodeAffinity/requiredDuringSchedulingIgnoredDuringExecution/nodeSelectorTerms/-"
29 | value: {"matchExpressions": [{"key":"dedicated-for","operator":"In","values":["experiment"]}]}
30 |
--------------------------------------------------------------------------------
/webhosting-operator/config/policy/guaranteed-resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: guaranteed-resources
5 | spec:
6 | failurePolicy: Fail
7 | rules:
8 | # set resource requests to limits to guarantee the resources during load test experiments
9 | - name: guaranteed-resources
10 | match:
11 | any:
12 | - resources:
13 | kinds:
14 | - Pod
15 | namespaces:
16 | - experiment
17 | - sharding-system
18 | - webhosting-system
19 | selector:
20 | matchExpressions:
21 | - key: app.kubernetes.io/name
22 | operator: In
23 | values:
24 | - experiment
25 | - controller-sharding
26 | - webhosting-operator
27 | operations:
28 | - CREATE
29 | mutate:
30 | foreach:
31 | - list: request.object.spec.containers
32 | patchStrategicMerge:
33 | spec:
34 | containers:
35 | - (name): "{{element.name}}"
36 | resources:
37 | requests:
38 | cpu: "{{element.resources.limits.cpu}}"
39 | memory: "{{element.resources.limits.memory}}"
40 |
--------------------------------------------------------------------------------
/webhosting-operator/config/policy/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - experiment-scheduling.yaml
6 | - scale-up-worker-experiment.yaml
7 | - webhosting-operator-scheduling.yaml
8 | - guaranteed-resources.yaml
9 |
10 | images:
11 | - name: pause
12 | newName: registry.k8s.io/pause
13 | newTag: "3.10"
14 |
--------------------------------------------------------------------------------
/webhosting-operator/config/policy/webhosting-operator-scheduling.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: webhosting-operator-scheduling
5 | spec:
6 | failurePolicy: Fail
7 | rules:
8 | # schedule webhosting-operator on dedicated worker pool for better isolation in load tests
9 | - name: add-scheduling-constraints
10 | match:
11 | any:
12 | - resources:
13 | kinds:
14 | - Pod
15 | namespaces:
16 | - webhosting-system
17 | selector:
18 | matchLabels:
19 | app.kubernetes.io/name: webhosting-operator
20 | operations:
21 | - CREATE
22 | mutate:
23 | patchesJson6902: |-
24 | - op: add
25 | path: "/spec/tolerations/-"
26 | value: {"key":"dedicated-for","operator":"Equal","value":"sharding","effect":"NoSchedule"}
27 | - op: add
28 | path: "/spec/affinity/nodeAffinity/requiredDuringSchedulingIgnoredDuringExecution/nodeSelectorTerms/-"
29 | value: {"matchExpressions": [{"key":"dedicated-for","operator":"In","values":["sharding"]}]}
30 |
--------------------------------------------------------------------------------
/webhosting-operator/config/samples/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 |
4 | resources:
5 | - project_namespace.yaml
6 | - theme_exciting.yaml
7 | - theme_lame.yaml
8 | - website_kubecon.yaml
9 | - website_library.yaml
10 |
--------------------------------------------------------------------------------
/webhosting-operator/config/samples/project_namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: project-foo
5 | labels:
6 | webhosting.timebertt.dev/project: "true"
7 |
--------------------------------------------------------------------------------
/webhosting-operator/config/samples/theme_exciting.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: webhosting.timebertt.dev/v1alpha1
2 | kind: Theme
3 | metadata:
4 | name: exciting
5 | spec:
6 | color: darkcyan
7 | fontFamily: Futura
8 |
--------------------------------------------------------------------------------
/webhosting-operator/config/samples/theme_lame.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: webhosting.timebertt.dev/v1alpha1
2 | kind: Theme
3 | metadata:
4 | name: lame
5 | spec:
6 | color: darkgray
7 | fontFamily: Times
8 |
--------------------------------------------------------------------------------
/webhosting-operator/config/samples/website_kubecon.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: webhosting.timebertt.dev/v1alpha1
2 | kind: Website
3 | metadata:
4 | name: kubecon
5 | namespace: project-foo
6 | spec:
7 | theme: exciting
8 |
--------------------------------------------------------------------------------
/webhosting-operator/config/samples/website_library.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: webhosting.timebertt.dev/v1alpha1
2 | kind: Website
3 | metadata:
4 | name: library
5 | namespace: project-foo
6 | spec:
7 | theme: lame
8 |
--------------------------------------------------------------------------------
/webhosting-operator/config/samples/website_museum.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: webhosting.timebertt.dev/v1alpha1
2 | kind: Website
3 | metadata:
4 | name: museum
5 | namespace: project-foo
6 | spec:
7 | theme: lame
8 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/apis/config/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // +groupName=config.webhosting.timebertt.dev
18 |
19 | package config // import "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/apis/config"
20 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/apis/config/v1alpha1/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // +k8s:defaulter-gen=TypeMeta
18 |
19 | package v1alpha1 // import "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/apis/config/v1alpha1"
20 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/apis/webhosting/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // +groupName=webhosting.timebertt.dev
18 |
19 | package config // import "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/apis/webhosting"
20 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/apis/webhosting/v1alpha1/doc.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package v1alpha1 // import "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/apis/webhosting/v1alpha1"
18 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/controllers/webhosting/templates/index.tmpl:
--------------------------------------------------------------------------------
1 |
2 |
3 | {{ .Website.Name }}
4 |
34 |
35 |
36 |
37 |
Welcome to {{ .Website.Name }}
38 |
Server name: {{ .ServerName }}
39 |
This page is hosted on Kubernetes using
40 |
42 | webhosting-operator.
43 |
44 |
45 |
46 |
47 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/controllers/webhosting/templates/index_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2022 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package templates_test
18 |
19 | import (
20 | . "github.com/onsi/ginkgo/v2"
21 | . "github.com/onsi/gomega"
22 |
23 | . "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/controllers/webhosting/templates"
24 | "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/controllers/webhosting/templates/internal"
25 | )
26 |
27 | var _ = Describe("index.html", func() {
28 | It("should successfully render index page", func() {
29 | // just assert that the template can be rendered properly
30 | Expect(RenderIndexHTML(internal.CreateExamples())).To(ContainSubstring("Welcome to"))
31 | })
32 | })
33 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/controllers/webhosting/templates/nginx.conf.tmpl:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | listen [::]:80;
4 | server_name localhost;
5 |
6 | # rewrite /namespace/name to / for requests proxied from Ingress controller
7 | # this way, we don't need any Ingress controller specific configuration or objects
8 | rewrite ^/{{ .Website.ObjectMeta.Namespace }}/{{ .Website.ObjectMeta.Name }}(.*)$ /$1 last;
9 | location / {
10 | root /usr/share/nginx/html;
11 | index index.html;
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/controllers/webhosting/templates/templates_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2022 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package templates_test
18 |
19 | import (
20 | "testing"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | )
25 |
26 | func TestTemplates(t *testing.T) {
27 | RegisterFailHandler(Fail)
28 | RunSpecs(t, "HTML Templates Suite")
29 | }
30 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/controllers/webhosting/templates/testserver/server.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2022 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package main
18 |
19 | import (
20 | "fmt"
21 | "net/http"
22 |
23 | "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/controllers/webhosting/templates"
24 | "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/controllers/webhosting/templates/internal"
25 | )
26 |
27 | func main() {
28 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
29 | serverName, website, theme := internal.CreateExamples()
30 | if err := templates.ExecuteIndexHTMLTemplate(w, serverName, website, theme); err != nil {
31 | http.Error(w, fmt.Sprintf("internal server error: %v", err), http.StatusInternalServerError)
32 | }
33 | })
34 | // nolint:gosec // this is just for testing
35 | if err := http.ListenAndServe(":9090", nil); err != nil {
36 | panic(err)
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/experiment/scenario/all/all.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2022 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // Package all imports all scenarios.
18 | package all
19 |
20 | import (
21 | _ "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/experiment/scenario/basic"
22 | _ "github.com/timebertt/kubernetes-controller-sharding/webhosting-operator/pkg/experiment/scenario/scale-out"
23 | )
24 |
--------------------------------------------------------------------------------
/webhosting-operator/pkg/utils/utils.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2022 Tim Ebert.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package utils
18 |
19 | import (
20 | "math/rand"
21 | )
22 |
23 | // PickRandom picks a random element from the given slice.
24 | func PickRandom[T any](in []T) T {
25 | // nolint:gosec // doesn't need to be cryptographically secure
26 | return in[rand.Intn(len(in))]
27 | }
28 |
29 | // RandomName generates a random string with n characters that can be used as part of API object names.
30 | func RandomName(n int) string {
31 | const charset = "abcdefghijklmnopqrstuvwxyz"
32 | result := make([]byte, n)
33 | for i := range result {
34 | // nolint:gosec // doesn't need to be cryptographically secure
35 | result[i] = charset[rand.Intn(len(charset))]
36 | }
37 | return string(result)
38 | }
39 |
--------------------------------------------------------------------------------