├── .github └── workflows │ ├── build-images-registry.yml │ ├── deploy-weekly.yml │ ├── deploy-weekly │ ├── install.sh │ ├── kubesphere.tf │ └── var.tf │ ├── e2e-TEST.yml │ ├── issue_comment_webhook.yml │ ├── kind │ └── kind.yaml │ ├── nightly-builds.yml │ ├── release.yml │ └── sync-release-images.yml ├── .gitignore ├── CONTRIBUTING.md ├── Dockerfile ├── Dockerfile.complete ├── Dockerfile.shelloperator ├── LICENSE ├── Makefile ├── OWNERS ├── README.md ├── README_zh.md ├── controller ├── installRunner.py ├── lib │ ├── __init__.py │ ├── ansible │ │ ├── __init__.py │ │ ├── ansible_tasks.py │ │ └── task_management.py │ ├── config │ │ ├── __init__.py │ │ ├── cluster_config_generator.py │ │ └── cluster_config_management.py │ └── observer │ │ ├── __init__.py │ │ └── classes.py └── schedule.sh ├── deploy ├── cluster-configuration.yaml └── kubesphere-installer.yaml ├── docs ├── DeveloperGuide.md ├── IstioUpgradeGuide.md ├── JenkinsUpgradeGuide.md ├── NetWorkAccess.md ├── img │ └── Design.png ├── ks3.0-install-ks-with-default-csi.md ├── offlineEnvironment.md ├── roadmap.md ├── troubleshooting.md └── vars.md ├── env ├── cmdline └── extravars ├── playbooks ├── alerting.yaml ├── auditing.yaml ├── common.yaml ├── devops.yaml ├── edgeruntime.yaml ├── events.yaml ├── gatekeeper.yaml ├── gitlab.yaml ├── harbor.yaml ├── ks-config.yaml ├── ks-core.yaml ├── ks-migration.yaml ├── logging.yaml ├── metering.yaml ├── metrics_server.yaml ├── monitoring.yaml ├── multicluster.yaml ├── network.yaml ├── notification.yaml ├── openpitrix.yaml ├── preinstall.yaml ├── result-info.yaml ├── servicemesh.yaml └── telemetry.yaml ├── release.yml ├── roles ├── check-result │ ├── tasks │ │ └── main.yaml │ └── templates │ │ └── welcome.j2 ├── common │ ├── defaults │ │ └── main.yaml │ ├── files │ │ ├── common │ │ │ └── password.txt │ │ ├── elasticsearch-curator │ │ │ └── elasticsearch-curator-1.3.3.tgz │ │ ├── fluentbit-operator │ │ │ ├── fluentbit │ │ │ │ └── fluentbitconfig-fluentBitConfig.yaml │ │ │ └── init │ │ │ │ ├── fluentbit-operator-clusterRole.yaml │ │ │ │ ├── fluentbit-operator-clusterRoleBinding.yaml │ │ │ │ ├── fluentbit-operator-configmap.yaml │ │ │ │ ├── fluentbit-operator-filterCustomResourceDefinition.yaml │ │ │ │ ├── fluentbit-operator-fluentbitCustomResourceDefinition.yaml │ │ │ │ ├── fluentbit-operator-fluentbitconfigCustomResourceDefinition.yaml │ │ │ │ ├── fluentbit-operator-inputCustomResourceDefinition.yaml │ │ │ │ ├── fluentbit-operator-outputCustomResourceDefinition.yaml │ │ │ │ ├── fluentbit-operator-parserCustomResourceDefinition.yaml │ │ │ │ └── fluentbit-operator-serviceAccount.yaml │ │ ├── minio-ha │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── OWNERS │ │ │ ├── README.md │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helper_create_bucket.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── configmap.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── networkpolicy.yaml │ │ │ │ ├── post-install-create-bucket-job.yaml │ │ │ │ ├── pvc.yaml │ │ │ │ ├── secrets.yaml │ │ │ │ ├── service.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ ├── servicemonitor.yaml │ │ │ │ └── statefulset.yaml │ │ │ └── values.yaml │ │ ├── openldap-ha │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── bootstrap.yaml │ │ │ │ ├── statefulset.yaml │ │ │ │ └── tests │ │ │ │ │ └── test-connection.yaml │ │ │ └── values.yaml │ │ ├── opensearch │ │ │ ├── opensearch-2.6.0.tgz │ │ │ ├── opensearch-curator-1.3.3.tgz │ │ │ └── opensearch-dashboards-2.6.0.tgz │ │ ├── redis-ha │ │ │ ├── Chart.yaml │ │ │ ├── OWNERS │ │ │ ├── README.md │ │ │ ├── ci │ │ │ │ └── haproxy-enabled-values.yaml │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _configs.tpl │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── redis-auth-secret.yaml │ │ │ │ ├── redis-ha-announce-service.yaml │ │ │ │ ├── redis-ha-configmap.yaml │ │ │ │ ├── redis-ha-pdb.yaml │ │ │ │ ├── redis-ha-role.yaml │ │ │ │ ├── redis-ha-rolebinding.yaml │ │ │ │ ├── redis-ha-service.yaml │ │ │ │ ├── redis-ha-serviceaccount.yaml │ │ │ │ ├── redis-ha-statefulset.yaml │ │ │ │ ├── redis-haproxy-deployment.yaml │ │ │ │ ├── redis-haproxy-service.yaml │ │ │ │ ├── redis-haproxy-serviceaccount.yaml │ │ │ │ └── tests │ │ │ │ │ ├── test-redis-ha-configmap.yaml │ │ │ │ │ └── test-redis-ha-pod.yaml │ │ │ └── values.yaml │ │ └── snapshot-controller │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── crds │ │ │ ├── snapshot.storage.k8s.io_volumesnapshot.yaml │ │ │ └── snapshot.storage.k8s.io_volumesnapshot_before119.yaml │ │ │ ├── templates │ │ │ ├── rbac-snapshot-controller.yaml │ │ │ └── setup-snapshot-controller.yaml │ │ │ └── values.yaml │ ├── tasks │ │ ├── common-install.yaml │ │ ├── escurator-install.yaml │ │ ├── fluentbit-install.yaml │ │ ├── get_old_config.yaml │ │ ├── init-namespaces.yaml │ │ ├── main.yaml │ │ ├── minio-install.yaml │ │ ├── minio-migration.yaml │ │ ├── mysql-install.yaml │ │ ├── openldap-install.yaml │ │ ├── openldap-migration.yaml │ │ ├── opensearch-install.yaml │ │ ├── pv-migration.yaml │ │ ├── redis-install.yaml │ │ └── redis-migration.yaml │ └── templates │ │ ├── custom-fluentbit-fluentBit.yaml.j2 │ │ ├── custom-fluentbit-operator-deployment.yaml.j2 │ │ ├── custom-values-elasticsearch-curator.yaml.j2 │ │ ├── custom-values-minio.yaml.j2 │ │ ├── custom-values-openldap.yaml.j2 │ │ ├── custom-values-opensearch-curator.yaml.j2 │ │ ├── custom-values-opensearch-dashboard.yaml.j2 │ │ ├── custom-values-opensearch-data.yaml.j2 │ │ ├── custom-values-opensearch-master.yaml.j2 │ │ ├── custom-values-redis.yaml.j2 │ │ ├── custom-values-snapshot-controller.yaml.j2 │ │ ├── etcd.yaml.j2 │ │ ├── minio.yaml.j2 │ │ ├── mysql.yaml.j2 │ │ ├── openldap.yaml.j2 │ │ ├── rclone.conf.j2 │ │ ├── redis.yaml.j2 │ │ └── redisMigrate.py.j2 ├── download │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── generate_images_list.yml │ │ ├── main.yml │ │ └── sync_images.yml │ └── templates │ │ └── images-list.txt.j2 ├── edgeruntime │ ├── files │ │ └── kubeedge │ │ │ └── cloudcore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── crds │ │ │ ├── apps_v1alpha1_edgeapplication.yaml │ │ │ ├── apps_v1alpha1_nodegroup.yaml │ │ │ ├── cluster_objectsync_v1alpha1.yaml │ │ │ ├── devices_v1alpha2_device.yaml │ │ │ ├── devices_v1alpha2_devicemodel.yaml │ │ │ ├── objectsync_v1alpha1.yaml │ │ │ ├── operations_v1alpha1_nodeupgradejob.yaml │ │ │ ├── router_v1_rule.yaml │ │ │ └── router_v1_ruleEndpoint.yaml │ │ │ ├── templates │ │ │ ├── _helpers.tpl │ │ │ ├── configmap_cloudcore.yaml │ │ │ ├── daemonset_iptablesmanager.yaml │ │ │ ├── deployment_cloudcore.yaml │ │ │ ├── deployment_controllermanager.yaml │ │ │ ├── deployment_edgeservice.yaml │ │ │ ├── rbac-edgeservice.yaml │ │ │ ├── rbac_cloudcore.yaml │ │ │ ├── rbac_controllermanager.yaml │ │ │ ├── rbac_iptablesmanager.yaml │ │ │ ├── secret_cloudcore.yaml │ │ │ ├── service_cloudcore.yaml │ │ │ └── service_edgeservice.yaml │ │ │ └── values.yaml │ ├── tasks │ │ ├── kubeedge.yaml │ │ └── main.yaml │ └── templates │ │ └── custom-values-kubeedge.yaml.j2 ├── gatekeeper │ ├── files │ │ └── gatekeeper │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── crds │ │ │ ├── assign-customresourcedefinition.yaml │ │ │ ├── assignmetadata-customresourcedefinition.yaml │ │ │ ├── config-customresourcedefinition.yaml │ │ │ ├── constraintpodstatus-customresourcedefinition.yaml │ │ │ ├── constrainttemplate-customresourcedefinition.yaml │ │ │ ├── constrainttemplatepodstatus-customresourcedefinition.yaml │ │ │ └── mutatorpodstatus-customresourcedefinition.yaml │ │ │ ├── templates │ │ │ ├── _helpers.tpl │ │ │ ├── gatekeeper-admin-podsecuritypolicy.yaml │ │ │ ├── gatekeeper-admin-serviceaccount.yaml │ │ │ ├── gatekeeper-audit-deployment.yaml │ │ │ ├── gatekeeper-controller-manager-deployment.yaml │ │ │ ├── gatekeeper-controller-manager-poddisruptionbudget.yaml │ │ │ ├── gatekeeper-critical-pods-resourcequota.yaml │ │ │ ├── gatekeeper-manager-role-clusterrole.yaml │ │ │ ├── gatekeeper-manager-role-role.yaml │ │ │ ├── gatekeeper-manager-rolebinding-clusterrolebinding.yaml │ │ │ ├── gatekeeper-manager-rolebinding-rolebinding.yaml │ │ │ ├── gatekeeper-mutating-webhook-configuration-mutatingwebhookconfiguration.yaml │ │ │ ├── gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml │ │ │ ├── gatekeeper-webhook-server-cert-secret.yaml │ │ │ ├── gatekeeper-webhook-service-service.yaml │ │ │ └── namespace-post-install.yaml │ │ │ └── values.yaml │ ├── tasks │ │ └── main.yaml │ └── templates │ │ └── custom-values-gatekeeper.yaml.j2 ├── ks-auditing │ ├── files │ │ └── kube-auditing │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── crds │ │ │ ├── auditing.kubesphere.io_rules.yaml │ │ │ └── auditing.kubesphere.io_webhooks.yaml │ │ │ └── templates │ │ │ ├── _helpers.tpl │ │ │ ├── operator.yaml │ │ │ ├── roles.yaml │ │ │ ├── rules.yaml │ │ │ └── webhook.yaml │ ├── tasks │ │ ├── fluentbit-operator.yaml │ │ └── main.yaml │ └── templates │ │ ├── custom-filter-auditing.yaml.j2 │ │ ├── custom-input-auditing.yaml.j2 │ │ ├── custom-output-elasticsearch-auditing.yaml.j2 │ │ ├── custom-output-opensearch-auditing.yaml.j2 │ │ └── custom-values.yaml.j2 ├── ks-core │ ├── config │ │ ├── files │ │ │ ├── alerting-migration │ │ │ │ └── .gitkeep │ │ │ └── openpitrix │ │ │ │ └── .gitkeep │ │ ├── tasks │ │ │ ├── ks-restart.yaml │ │ │ └── main.yaml │ │ └── templates │ │ │ ├── ks-alerting-migration.yaml.j2 │ │ │ ├── ks-openpitrix-upgrade.yaml.j2 │ │ │ └── kubesphere-config.yaml.j2 │ ├── init-token │ │ ├── files │ │ │ └── jwt-script │ │ │ │ └── jwt.sh │ │ └── tasks │ │ │ └── main.yaml │ ├── ks-core │ │ ├── defaults │ │ │ └── main.yaml │ │ ├── files │ │ │ └── ks-core │ │ │ │ ├── .helmignore │ │ │ │ ├── Chart.yaml │ │ │ │ ├── crds │ │ │ │ ├── alerting.kubesphere.io_clusterrulegroups.yaml │ │ │ │ ├── alerting.kubesphere.io_globalrulegroups.yaml │ │ │ │ ├── alerting.kubesphere.io_rulegroups.yaml │ │ │ │ ├── app_v1beta1_application.yaml │ │ │ │ ├── application.kubesphere.io_helmapplications.yaml │ │ │ │ ├── application.kubesphere.io_helmapplicationversions.yaml │ │ │ │ ├── application.kubesphere.io_helmcategories.yaml │ │ │ │ ├── application.kubesphere.io_helmreleases.yaml │ │ │ │ ├── application.kubesphere.io_helmrepos.yaml │ │ │ │ ├── cluster.kubesphere.io_clusters.yaml │ │ │ │ ├── gateway.kubesphere.io_gateways.yaml │ │ │ │ ├── gateway.kubesphere.io_nginxes.yaml │ │ │ │ ├── iam.kubesphere.io_federatedrolebindings.yaml │ │ │ │ ├── iam.kubesphere.io_federatedroles.yaml │ │ │ │ ├── iam.kubesphere.io_federatedusers.yaml │ │ │ │ ├── iam.kubesphere.io_globalrolebindings.yaml │ │ │ │ ├── iam.kubesphere.io_globalroles.yaml │ │ │ │ ├── iam.kubesphere.io_groupbindings.yaml │ │ │ │ ├── iam.kubesphere.io_groups.yaml │ │ │ │ ├── iam.kubesphere.io_loginrecords.yaml │ │ │ │ ├── iam.kubesphere.io_rolebases.yaml │ │ │ │ ├── iam.kubesphere.io_users.yaml │ │ │ │ ├── iam.kubesphere.io_workspacerolebindings.yaml │ │ │ │ ├── iam.kubesphere.io_workspaceroles.yaml │ │ │ │ ├── network.kubesphere.io_ipamblocks.yaml │ │ │ │ ├── network.kubesphere.io_ipamhandles.yaml │ │ │ │ ├── network.kubesphere.io_ippools.yaml │ │ │ │ ├── network.kubesphere.io_namespacenetworkpolicies.yaml │ │ │ │ ├── quota.kubesphere.io_resourcequotas.yaml │ │ │ │ ├── servicemesh.kubesphere.io_servicepolicies.yaml │ │ │ │ ├── servicemesh.kubesphere.io_strategies.yaml │ │ │ │ ├── storage.kubesphere.io_storageclasseraccessor.yaml │ │ │ │ ├── tenant.kubesphere.io_workspaces.yaml │ │ │ │ └── tenant.kubesphere.io_workspacetemplates.yaml │ │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── account.yaml │ │ │ │ ├── ks-apiserver.yml │ │ │ │ ├── ks-console-config.yml │ │ │ │ ├── ks-console.yml │ │ │ │ ├── ks-controller-manager.yaml │ │ │ │ ├── ks-router-cm.yaml │ │ │ │ ├── ks-router-config.tpl │ │ │ │ ├── kubesphere-config.yaml │ │ │ │ ├── kubesphere-controls-system.yaml │ │ │ │ ├── sample-bookinfo-configmap.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ └── webhook.yaml │ │ │ │ └── values.yaml │ │ ├── tasks │ │ │ └── main.yaml │ │ └── templates │ │ │ ├── custom-values-ks-core.yaml.j2 │ │ │ └── ks-upgrade.yaml.j2 │ ├── meta │ │ └── main.yaml │ └── prepare │ │ ├── defaults │ │ └── main.yaml │ │ ├── files │ │ └── ks-init │ │ │ ├── generate-kubeconfig.sh │ │ │ ├── ks-vpa.yaml │ │ │ └── role-templates.yaml │ │ ├── tasks │ │ └── main.yaml │ │ └── templates │ │ ├── alerting-db-init-job.yaml.j2 │ │ ├── custom-values-istio-init.yaml.j2 │ │ └── kubesphere-controls-system.yaml.j2 ├── ks-devops │ ├── OWNERS │ ├── defaults │ │ └── main.yaml │ ├── files │ │ └── ks-devops │ │ │ └── charts │ │ │ ├── README.md │ │ │ ├── argo-cd-4.4.0.tgz │ │ │ └── ks-devops-0.2.3.tgz │ ├── tasks │ │ ├── check_sonar.yaml │ │ ├── cleanup.yaml │ │ ├── get_old_config.yaml │ │ ├── main.yaml │ │ ├── sonar_token.yaml │ │ └── uninstall_update_center.yaml │ └── templates │ │ ├── argo-cd-values.yaml.j2 │ │ └── ks-devops-values.yaml.j2 ├── ks-events │ ├── files │ │ └── kube-events │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── crds │ │ │ └── bundle.yaml │ │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── crs │ │ │ │ ├── cluster-rules-default.yaml │ │ │ │ ├── exporter.yaml │ │ │ │ └── ruler.yaml │ │ │ └── operator │ │ │ │ ├── admission.yaml │ │ │ │ ├── cleanup.yaml │ │ │ │ ├── deploy.yaml │ │ │ │ ├── rbac.yaml │ │ │ │ └── serviceaccount.yaml │ │ │ └── values.yaml │ ├── tasks │ │ ├── fluentbit-operator.yaml │ │ └── main.yaml │ └── templates │ │ ├── custom-filter-events.yaml.j2 │ │ ├── custom-input-events.yaml.j2 │ │ ├── custom-output-elasticsearch-events.yaml.j2 │ │ ├── custom-output-opensearch-events.yaml.j2 │ │ └── custom-values-events.yaml.j2 ├── ks-istio │ ├── files │ │ ├── istio │ │ │ ├── clusterroles.yaml │ │ │ ├── istio-1.14.6-linux-amd64.tar.gz │ │ │ ├── istio-1.14.6-linux-arm64.tar.gz │ │ │ └── readme.md │ │ ├── jaeger │ │ │ ├── jaeger-operator-2.29.0.tgz │ │ │ └── readme.md │ │ └── kiali │ │ │ ├── custom-values-kiali.yaml │ │ │ ├── kiali-cr.yaml │ │ │ ├── kiali-operator-1.50.1.tgz │ │ │ └── readme.md │ ├── tasks │ │ ├── jaeger-install.yaml │ │ ├── kiali-install.yaml │ │ ├── main.yaml │ │ └── upgrade-pre.yaml │ └── templates │ │ ├── custom-values-jaeger.yaml.j2 │ │ ├── custom-values-kiali.yaml.j2 │ │ ├── istio-custom-profile.yaml.j2 │ │ ├── istio-profile.yaml.j2 │ │ ├── jaeger-cr.yaml.j2 │ │ └── kiali-cr.yaml.j2 ├── ks-logging │ ├── defaults │ │ └── main.yaml │ ├── files │ │ ├── fluentbit-operator-cri │ │ │ ├── filter-containerd.yaml │ │ │ └── fluentbit-containerd-config.yaml │ │ ├── fluentbit-operator │ │ │ ├── filter-logging.yaml │ │ │ ├── filter-systemd.yaml │ │ │ └── input-systemd-kubelet.yaml │ │ └── logsidecar-injector │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── admission.yaml │ │ │ ├── configmap.yaml │ │ │ └── deploy.yaml │ │ │ └── values.yaml │ ├── tasks │ │ ├── logsidecar.yaml │ │ └── main.yaml │ └── templates │ │ ├── custom-input-logging.yaml.j2 │ │ ├── custom-input-systemd.yaml.j2 │ │ ├── custom-output-elasticsearch-logging.yaml.j2 │ │ ├── custom-output-opensearch-logging.yaml.j2 │ │ └── custom-values-logsidecar-injector.yaml.j2 ├── ks-migration │ ├── tasks │ │ └── main.yaml │ └── templates │ │ ├── ks-devops-migration.yaml.j2 │ │ └── ks-upgrade.yaml.j2 ├── ks-monitor │ ├── defaults │ │ └── main.yaml │ ├── files │ │ ├── alert-migrate │ │ │ └── migrate-alertrules.sh │ │ ├── gpu-monitoring │ │ │ ├── gpu-dashboards │ │ │ │ └── nvidia-gpu-dcgm-exporter-clusterdashboard.yaml │ │ │ └── nvidia-dcgm-exporter │ │ │ │ ├── .helmignore │ │ │ │ ├── Chart.yaml │ │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── daemonset.yaml │ │ │ │ ├── service-monitor.yaml │ │ │ │ ├── service.yaml │ │ │ │ └── serviceaccount.yaml │ │ │ │ └── values.yaml │ │ ├── ks-istio-monitoring │ │ │ └── prometheus-operator.yaml │ │ ├── monitoring-dashboard │ │ │ ├── monitoring-clusterdashboard-customResourceDefinition.yaml │ │ │ └── monitoring-dashboard-customResourceDefinition.yaml │ │ ├── notification-manager │ │ │ ├── Chart.yaml │ │ │ ├── crds │ │ │ │ └── bundle.yaml │ │ │ └── templates │ │ │ │ ├── clusterrolebindings.yaml │ │ │ │ ├── clusterroles.yaml │ │ │ │ ├── hooks.yaml │ │ │ │ ├── notificationmanagers.yaml │ │ │ │ ├── operator.yaml │ │ │ │ ├── rolebindings.yaml │ │ │ │ ├── roles.yaml │ │ │ │ ├── serviceaccount.yaml │ │ │ │ ├── services.yaml │ │ │ │ ├── template.yaml │ │ │ │ ├── tls.yaml │ │ │ │ ├── validating.yaml │ │ │ │ └── zh-cn.yaml │ │ └── prometheus │ │ │ ├── alertmanager │ │ │ ├── alertmanager-alertRuleGroups.yaml │ │ │ ├── alertmanager-alertmanager.yaml │ │ │ ├── alertmanager-podDisruptionBudget.yaml │ │ │ ├── alertmanager-prometheusRule.yaml │ │ │ ├── alertmanager-secret.yaml │ │ │ ├── alertmanager-service.yaml │ │ │ ├── alertmanager-serviceAccount.yaml │ │ │ └── alertmanager-serviceMonitor.yaml │ │ │ ├── etcd │ │ │ ├── prometheus-alertRuleGroupsEtcd.yaml │ │ │ ├── prometheus-endpointsEtcd.yaml │ │ │ ├── prometheus-rulesEtcd.yaml │ │ │ ├── prometheus-secretEtcdCerts.yaml │ │ │ ├── prometheus-serviceEtcd.yaml │ │ │ └── prometheus-serviceMonitorEtcd.yaml │ │ │ ├── grafana │ │ │ ├── grafana-config.yaml │ │ │ ├── grafana-dashboardDatasources.yaml │ │ │ ├── grafana-dashboardDefinitions.yaml │ │ │ ├── grafana-dashboardSources.yaml │ │ │ ├── grafana-deployment.yaml │ │ │ ├── grafana-service.yaml │ │ │ ├── grafana-serviceAccount.yaml │ │ │ ├── grafana-serviceMonitor.yaml │ │ │ └── grafana-storage.yaml │ │ │ ├── kube-prometheus │ │ │ ├── kube-prometheus-alertRuleGroups.yaml │ │ │ └── kube-prometheus-prometheusRule.yaml │ │ │ ├── kube-state-metrics │ │ │ ├── kube-state-metrics-alertRuleGroups.yaml │ │ │ ├── kube-state-metrics-clusterRole.yaml │ │ │ ├── kube-state-metrics-clusterRoleBinding.yaml │ │ │ ├── kube-state-metrics-deployment.yaml │ │ │ ├── kube-state-metrics-prometheusRule.yaml │ │ │ ├── kube-state-metrics-service.yaml │ │ │ ├── kube-state-metrics-serviceAccount.yaml │ │ │ └── kube-state-metrics-serviceMonitor.yaml │ │ │ ├── kubernetes │ │ │ ├── kubernetes-alertRuleGroups.yaml │ │ │ ├── kubernetes-prometheusRule.yaml │ │ │ ├── kubernetes-serviceKubeControllerManager.yaml │ │ │ ├── kubernetes-serviceKubeScheduler.yaml │ │ │ ├── kubernetes-serviceMonitorApiserver.yaml │ │ │ ├── kubernetes-serviceMonitorCoreDNS.yaml │ │ │ ├── kubernetes-serviceMonitorKubeControllerManager.yaml │ │ │ ├── kubernetes-serviceMonitorKubeScheduler.yaml │ │ │ └── kubernetes-serviceMonitorKubelet.yaml │ │ │ ├── kubesphere │ │ │ ├── kubesphere-alertRuleGroups.yaml │ │ │ ├── kubesphere-serviceMonitorKsApiserver.yaml │ │ │ └── kubesphere-serviceMonitorKsControllerManager.yaml │ │ │ ├── node-exporter │ │ │ ├── node-exporter-alertRuleGroups.yaml │ │ │ ├── node-exporter-clusterRole.yaml │ │ │ ├── node-exporter-clusterRoleBinding.yaml │ │ │ ├── node-exporter-daemonset.yaml │ │ │ ├── node-exporter-prometheusRule.yaml │ │ │ ├── node-exporter-service.yaml │ │ │ ├── node-exporter-serviceAccount.yaml │ │ │ └── node-exporter-serviceMonitor.yaml │ │ │ ├── prometheus-operator │ │ │ ├── prometheus-operator-0alertmanagerConfigCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0alertmanagerCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0podmonitorCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0probeCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0prometheusCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0prometheusruleCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0servicemonitorCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-0thanosrulerCustomResourceDefinition.yaml │ │ │ ├── prometheus-operator-alertRuleGroups.yaml │ │ │ ├── prometheus-operator-clusterRole.yaml │ │ │ ├── prometheus-operator-clusterRoleBinding.yaml │ │ │ ├── prometheus-operator-deployment.yaml │ │ │ ├── prometheus-operator-prometheusRule.yaml │ │ │ ├── prometheus-operator-service.yaml │ │ │ ├── prometheus-operator-serviceAccount.yaml │ │ │ └── prometheus-operator-serviceMonitor.yaml │ │ │ ├── prometheus │ │ │ ├── prometheus-alertRuleGroups.yaml │ │ │ ├── prometheus-clusterRole.yaml │ │ │ ├── prometheus-clusterRoleBinding.yaml │ │ │ ├── prometheus-podDisruptionBudget.yaml │ │ │ ├── prometheus-prometheus.yaml │ │ │ ├── prometheus-prometheusRule.yaml │ │ │ ├── prometheus-roleBindingConfig.yaml │ │ │ ├── prometheus-roleConfig.yaml │ │ │ ├── prometheus-service.yaml │ │ │ ├── prometheus-serviceAccount.yaml │ │ │ └── prometheus-serviceMonitor.yaml │ │ │ └── thanos-ruler │ │ │ ├── thanos-ruler-alertRuleGroups.yaml │ │ │ ├── thanos-ruler-podDisruptionBudget.yaml │ │ │ ├── thanos-ruler-prometheusRule.yaml │ │ │ ├── thanos-ruler-service.yaml │ │ │ ├── thanos-ruler-serviceMonitor.yaml │ │ │ └── thanos-ruler-thanosRuler.yaml │ ├── tasks │ │ ├── alert-migrate.yaml │ │ ├── alertmanager.yaml │ │ ├── cleanup.yaml │ │ ├── etcd.yaml │ │ ├── generate_manifests.yaml │ │ ├── get_old_config.yaml │ │ ├── gpu-monitoring.yaml │ │ ├── grafana.yaml │ │ ├── k8s-monitor.yaml │ │ ├── ks-core-monitor.yaml │ │ ├── ks-istio-monitoring.yaml │ │ ├── kube-state-metrics.yaml │ │ ├── main.yaml │ │ ├── monitoring-dashboard.yaml │ │ ├── node-exporter.yaml │ │ ├── notification-manager.yaml │ │ ├── prometheus-operator.yaml │ │ ├── prometheus-stack.yaml │ │ ├── prometheus.yaml │ │ └── thanos-ruler.yaml │ └── templates │ │ ├── alertmanager-alertmanager.yaml.j2 │ │ ├── alertmanager-podDisruptionBudget.yaml.j2 │ │ ├── custom-values-gpu-monitoring.yaml.j2 │ │ ├── custom-values-notification.yaml.j2 │ │ ├── grafana-deployment.yaml.j2 │ │ ├── kube-state-metrics-deployment.yaml.j2 │ │ ├── node-exporter-daemonset.yaml.j2 │ │ ├── prometheus-endpointsEtcd.yaml.j2 │ │ ├── prometheus-operator-deployment.yaml.j2 │ │ ├── prometheus-podDisruptionBudget.yaml.j2 │ │ ├── prometheus-prometheus.yaml.j2 │ │ ├── prometheus-serviceMonitorEtcd.yaml.j2 │ │ ├── thanos-ruler-podDisruptionBudget.yaml.j2 │ │ └── thanos-ruler-thanosRuler.yaml.j2 ├── ks-multicluster │ ├── files │ │ ├── kubefed │ │ │ ├── federatedcrds │ │ │ │ ├── applications.app.k8s.io.yaml │ │ │ │ ├── clusterrolebindings.rbac.authorization.k8s.io.yaml │ │ │ │ ├── clusterroles.rbac.authorization.k8s.io.yaml │ │ │ │ ├── configmaps.yaml │ │ │ │ ├── configs.notification.kubesphere.io.yaml │ │ │ │ ├── deployments.apps.yaml │ │ │ │ ├── globalrolebindings.iam.kubesphere.io.yaml │ │ │ │ ├── globalroles.iam.kubesphere.io.yaml │ │ │ │ ├── groupbindings.iam.kubesphere.io.yaml │ │ │ │ ├── groups.iam.kubesphere.io.yaml │ │ │ │ ├── ingresses.networking.k8s.io.yaml │ │ │ │ ├── jobs.batch.yaml │ │ │ │ ├── limitranges.yaml │ │ │ │ ├── namespaces.yaml │ │ │ │ ├── notificationmanagers.notification.kubesphere.io.yaml │ │ │ │ ├── persistentvolumeclaims.yaml │ │ │ │ ├── receivers.notification.kubesphere.io.yaml │ │ │ │ ├── replicasets.apps.yaml │ │ │ │ ├── routers.notification.kubesphere.io.yaml │ │ │ │ ├── secrets.yaml │ │ │ │ ├── serviceaccounts.yaml │ │ │ │ ├── services.yaml │ │ │ │ ├── silences.notification.kubesphere.io.yaml │ │ │ │ ├── statefulsets.apps.yaml │ │ │ │ ├── update-files.sh │ │ │ │ ├── users.iam.kubesphere.io.yaml │ │ │ │ ├── workspacerolebindings.iam.kubesphere.io.yaml │ │ │ │ ├── workspaceroles.iam.kubesphere.io.yaml │ │ │ │ └── workspaces.tenant.kubesphere.io.yaml │ │ │ └── kubefed │ │ │ │ ├── .helmignore │ │ │ │ ├── Chart.yaml │ │ │ │ ├── Chart.yaml.backup │ │ │ │ ├── LICENSE │ │ │ │ ├── README.md │ │ │ │ ├── charts │ │ │ │ └── controllermanager │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── crds │ │ │ │ │ └── crds.yaml │ │ │ │ │ └── templates │ │ │ │ │ ├── _helpers.tpl │ │ │ │ │ ├── aggregate_clusterroles.yaml │ │ │ │ │ ├── clusterrole.yaml │ │ │ │ │ ├── clusterrolebindings.yaml │ │ │ │ │ ├── deployments.yaml │ │ │ │ │ ├── kubefedconfig.yaml │ │ │ │ │ ├── post-install-job.yaml │ │ │ │ │ ├── rolebindings.yaml │ │ │ │ │ ├── roles.yaml │ │ │ │ │ ├── service.yaml │ │ │ │ │ ├── serviceaccounts.yaml │ │ │ │ │ └── webhook.yaml │ │ │ │ ├── crds │ │ │ │ └── crds.yaml │ │ │ │ ├── templates │ │ │ │ ├── _helpers.tpl │ │ │ │ └── federatedtypeconfig.yaml │ │ │ │ ├── values.yaml │ │ │ │ └── values.yaml.backup │ │ └── notification │ │ │ ├── kubesphere-monitoring-federated.yaml │ │ │ └── update_federated_crds.sh │ ├── tasks │ │ ├── main.yml │ │ └── notification-manager.yaml │ └── templates │ │ ├── custom-values-kubefed.yaml.j2 │ │ └── tower.yaml.j2 ├── ks-network │ └── topology │ │ └── weave-scope │ │ ├── files │ │ └── weave-scope │ │ │ └── weave-scope.yaml │ │ ├── tasks │ │ └── main.yaml │ │ └── templates │ │ └── weave-scope.yaml.j2 ├── kubesphere-defaults │ ├── defaults │ │ └── main.yaml │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yaml ├── metrics-server │ ├── files │ │ └── metrics-server │ │ │ └── metrics-server.yaml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── metrics-server.yaml.j2 ├── openpitrix │ ├── defaults │ │ └── main.yaml │ ├── files │ │ └── openpitrix │ │ │ └── ks-openpitrix-import.yaml │ ├── tasks │ │ └── main.yaml │ └── templates │ │ ├── builtin-repo.yaml.j2 │ │ └── ks-openpitrix-import.yaml.j2 ├── preinstall │ └── tasks │ │ ├── helm-migrate.yaml │ │ ├── main.yaml │ │ ├── preCheckSize.yaml │ │ └── precheck.yaml └── telemetry │ └── tasks │ └── main.yaml └── scripts ├── build ├── Dockerfile.registry ├── Dockerfile.skopeo └── Dockerfile.tools ├── check_cluster_status.sh ├── create_project_harbor.sh ├── harbor_images_backup.sh ├── harbor_images_push.sh ├── kubesphere-delete.sh ├── offline-installation-tool.sh └── sync-images.sh /.github/workflows/deploy-weekly/kubesphere.tf: -------------------------------------------------------------------------------- 1 | resource "qingcloud_security_group" "basic"{ 2 | name = "cd-deploy" 3 | description = "cd-deploy" 4 | } 5 | 6 | resource "qingcloud_security_group_rule" "openport" { 7 | security_group_id = "${qingcloud_security_group.basic.id}" 8 | protocol = "tcp" 9 | priority = 0 10 | action = "accept" 11 | direction = 0 12 | from_port = 22 13 | to_port = 40000 14 | } 15 | 16 | resource "qingcloud_instance" "init"{ 17 | count = 1 18 | name = "cd-deploy" 19 | image_id = "centos76x64a" 20 | cpu = "8" 21 | memory = "16384" 22 | instance_class = "0" 23 | managed_vxnet_id="vxnet-0" 24 | login_passwd = "${var.password}" 25 | security_group_id ="${qingcloud_security_group.basic.id}" 26 | eip_id = "${var.eip_id}" 27 | os_disk_size = "100" 28 | } 29 | 30 | resource "null_resource" "install_kubesphere" { 31 | provisioner "file" { 32 | destination = "./install.sh" 33 | source = "./install.sh" 34 | 35 | connection { 36 | type = "ssh" 37 | user = "root" 38 | host = "${var.eip}" 39 | password = "${var.password}" 40 | port = "22" 41 | } 42 | } 43 | 44 | provisioner "remote-exec" { 45 | inline = [ 46 | "sh install.sh" 47 | ] 48 | 49 | connection { 50 | type = "ssh" 51 | user = "root" 52 | host = "${var.eip}" 53 | password = "${var.password}" 54 | port = "22" 55 | } 56 | } 57 | } -------------------------------------------------------------------------------- /.github/workflows/deploy-weekly/var.tf: -------------------------------------------------------------------------------- 1 | variable "access_key" { 2 | default = "QING_ACCESS_KEY" 3 | } 4 | 5 | variable "secret_key" { 6 | default = "QING_SECRET_KEY" 7 | } 8 | 9 | variable "eip_id" { 10 | default = "QING_EIP_ID" 11 | } 12 | 13 | variable "eip" { 14 | default = "QING_EIP" 15 | } 16 | 17 | variable "password" { 18 | default = "QING_PASSWORD" 19 | } 20 | 21 | variable "zone" { 22 | default = "ap2a" 23 | } 24 | 25 | terraform { 26 | required_providers { 27 | qingcloud = { 28 | source = "shaowenchen/qingcloud" 29 | version = "1.2.6" 30 | } 31 | } 32 | } 33 | 34 | terraform { 35 | backend "s3" { 36 | bucket = "terraform-deploy" 37 | key = "GITHUB_REPOSITORY" 38 | region = "sh1a" 39 | endpoint = "s3.sh1a.qingstor.com" 40 | skip_region_validation = true 41 | skip_metadata_api_check = true 42 | skip_credentials_validation = true 43 | access_key = "QING_ACCESS_KEY" 44 | secret_key = "QING_SECRET_KEY" 45 | } 46 | } 47 | 48 | provider "qingcloud" { 49 | access_key = "${var.access_key}" 50 | secret_key = "${var.secret_key}" 51 | zone = "${var.zone}" 52 | } -------------------------------------------------------------------------------- /.github/workflows/kind/kind.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | image: kindest/node:v1.23.17 6 | extraMounts: 7 | - hostPath: /etc/localtime 8 | containerPath: /etc/localtime 9 | - containerPath: /var/lib/kubelet/config.json 10 | hostPath: /root/.docker/config.json 11 | extraPortMappings: 12 | - containerPort: 30881 13 | hostPort: 9090 14 | -------------------------------------------------------------------------------- /.github/workflows/sync-release-images.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | # Sequence of patterns matched against refs/tags 4 | tags: 5 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 6 | name: Build Release 7 | 8 | jobs: 9 | release-images: 10 | runs-on: ubuntu-latest 11 | if: github.repository == 'kubesphere/ks-installer' 12 | steps: 13 | - uses: actions/checkout@v2 14 | - name: Get Version 15 | id: get_version 16 | run: echo ::set-output name=VERSION::${GITHUB_REF#refs/tags/} 17 | 18 | - name: Sync published images to mirror registry 19 | run: > 20 | docker run --rm 21 | -v "$PWD":/usr/src/myapp -w /usr/src/myapp kubespheredev/release-tools:v0.0.1 sh -c 22 | "skopeo login -u ${{ secrets.DOCKER_USERNAME }} -p \"${{ secrets.DOCKER_PASSWORD }}\" docker.io && 23 | skopeo login -u ${{ secrets.REGISTRY_ALIYUN_USERNAME }} -p \"${{ secrets.REGISTRY_ALIYUN_PASSWORD }}\" registry.cn-beijing.aliyuncs.com && 24 | ansible-playbook release.yml --skip-tags=images_list" 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | results 3 | 4 | # GitHub Codespaces 5 | .venv/ 6 | .vscode/ -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Welcome to kubesphere/ks-installer! To learn more about contributing to the [ks-installer code repo](README.md), check out the [Developer Guide](https://github.com/kubesphere/community/tree/master/developer-guide/development). 4 | 5 | The [KubeSphere community repo](https://github.com/kubesphere/community) contains information about how to get started, how the community organizes, and more. 6 | 7 | # Manual Test 8 | 9 | If you want to test against a component with `ks-installer`. Please follow these steps: 10 | 11 | * Create a new git feature branch 12 | * Change the Docker image tags of your desired component in file [main.yml](roles/download/defaults/main.yml) 13 | * Build the Docker image of `ks-installer` 14 | * Please provide a accessible image path, e.g. `make all -e REPO=surenpi/ks-installer` 15 | * Create a Kubernetes cluster without KubeSphere 16 | * Install a [k3s](https://github.com/k3s-io/k3s/) might be easy solution for the test purpose 17 | * Install the ks-installer 18 | * Switch the image to your desired one in file `deploy/kubesphere-installer.yaml` 19 | * Install it via: `kubectl apply -f deploy/` 20 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM kubespheredev/shell-operator:v1.0.0-beta.5-alpine3.16 2 | 3 | ENV ANSIBLE_ROLES_PATH /kubesphere/installer/roles 4 | WORKDIR /kubesphere 5 | ADD controller/* /hooks/kubesphere/ 6 | 7 | ADD roles /kubesphere/installer/roles 8 | ADD env /kubesphere/results/env 9 | ADD playbooks /kubesphere/playbooks 10 | 11 | #RUN chown kubesphere:kubesphere -R /kubesphere /hooks/kubesphere 12 | USER kubesphere 13 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | REPO?=kubespheredev/ks-installer 2 | TAG:=$(shell git rev-parse --abbrev-ref HEAD | sed -e 's/\//-/g')-dev-$(shell git rev-parse --short HEAD) 3 | CONTAINER_CLI?=docker 4 | 5 | build: 6 | $(CONTAINER_CLI) build . --file Dockerfile --tag $(REPO):$(TAG) 7 | push: 8 | $(CONTAINER_CLI) push $(REPO):$(TAG) 9 | all: build push 10 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - pixiake 3 | - Forest-L 4 | - rayzhou2017 5 | - zryfish 6 | - 24sama 7 | - benjaminhuo 8 | 9 | reviewers: 10 | - pixiake 11 | - Forest-L 12 | - rayzhou2017 13 | - zryfish 14 | - shaowenchen 15 | - benjaminhuo 16 | - calvinyv 17 | - FeynmanZhou 18 | - huanggze 19 | - wansir 20 | - 24sama 21 | -------------------------------------------------------------------------------- /controller/lib/__init__.py: -------------------------------------------------------------------------------- 1 | from .config.cluster_config_generator import generateConfig, generate_new_cluster_configuration 2 | from .config.cluster_config_management import ks_hook 3 | from .observer.classes import Info, InfoViewer 4 | from .ansible.ansible_tasks import preInstallTasks, resultInfo 5 | from .ansible.task_management import getResultInfo 6 | 7 | __all__ = ["ks_hook","generateConfig","generate_new_cluster_configuration","Info","InfoViewer","preInstallTasks","getResultInfo","resultInfo"] -------------------------------------------------------------------------------- /controller/lib/ansible/__init__.py: -------------------------------------------------------------------------------- 1 | from .ansible_tasks import component, preInstallTasks, resultInfo 2 | from .task_management import generateTaskLists, getComponentLists, getResultInfo 3 | 4 | __all__ = ["component","preInstallTasks","resultInfo","generateTaskLists","getComponentLists","getResultInfo"] -------------------------------------------------------------------------------- /controller/lib/config/__init__.py: -------------------------------------------------------------------------------- 1 | from .cluster_config_management import ks_hook, cluster_configuration, get_cluster_configuration, create_cluster_configuration, delete_cluster_configuration 2 | from .cluster_config_generator import generate_new_cluster_configuration, generateConfig 3 | 4 | __all__ = ["ks_hook","cluster_configuration","get_cluster_configuration","create_cluster_configuration","delete_cluster_configuration","generateConfig","generate_new_cluster_configuration"] -------------------------------------------------------------------------------- /controller/lib/observer/__init__.py: -------------------------------------------------------------------------------- 1 | from .classes import Subject, InfoViewer 2 | 3 | __all__ = ["Subject","InfoViewer"] -------------------------------------------------------------------------------- /controller/lib/observer/classes.py: -------------------------------------------------------------------------------- 1 | 2 | # Using the Observer pattern to get the info of task execution 3 | import logging 4 | 5 | logging.basicConfig(level=logging.INFO, format="%(message)s") 6 | 7 | class Subject(object): 8 | 9 | def __init__(self): 10 | self._observers = [] 11 | 12 | def attach(self, observer): 13 | if observer not in self._observers: 14 | self._observers.append(observer) 15 | 16 | def detach(self, observer): 17 | try: 18 | self._observers.remove(observer) 19 | except ValueError: 20 | pass 21 | 22 | def notify(self, modifier=None): 23 | for observer in self._observers: 24 | if modifier != observer: 25 | observer.update(self) 26 | 27 | 28 | class Info(Subject): 29 | 30 | def __init__(self, name=''): 31 | Subject.__init__(self) 32 | self.name = name 33 | self._info = None 34 | 35 | @property 36 | def info(self): 37 | return self._info 38 | 39 | @info.setter 40 | def info(self, value): 41 | self._info = value 42 | self.notify() 43 | 44 | 45 | class InfoViewer: 46 | def update(self, subject): 47 | logging.info(u'%s' % (subject.info)) 48 | -------------------------------------------------------------------------------- /controller/schedule.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ $1 == "--config" ]] ; then 4 | cat < 9 | status.events is not defined or status.events.status is not defined or status.events.status != 'enabled' 10 | or 11 | (events.ruler.enabled is defined and events.ruler.enabled == True and status.events.ruler is not defined) -------------------------------------------------------------------------------- /playbooks/gatekeeper.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - role: gatekeeper 8 | when: 9 | - "gatekeeper is defined and gatekeeper.enabled == true" 10 | - "status.gatekeeper is not defined or status.gatekeeper.status is not defined or status.gatekeeper.status != 'enabled'" -------------------------------------------------------------------------------- /playbooks/gitlab.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - ks-devops/gitlab -------------------------------------------------------------------------------- /playbooks/harbor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - ks-devops/harbor -------------------------------------------------------------------------------- /playbooks/ks-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - ks-core/config 8 | 9 | -------------------------------------------------------------------------------- /playbooks/ks-core.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - ks-core -------------------------------------------------------------------------------- /playbooks/ks-migration.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | roles: 5 | - kubesphere-defaults 6 | - ks-migration 7 | -------------------------------------------------------------------------------- /playbooks/logging.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - role: ks-logging 8 | when: 9 | - "status.logging is not defined or status.logging.status is not defined or status.logging.status != 'enabled'" -------------------------------------------------------------------------------- /playbooks/metering.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults -------------------------------------------------------------------------------- /playbooks/metrics_server.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - role: metrics-server 8 | when: 9 | - "metrics_server is defined and metrics_server.enabled == true" 10 | - "status.metricsServer is not defined or status.metricsServer.status is not defined or status.metricsServer.status != 'enabled'" 11 | -------------------------------------------------------------------------------- /playbooks/monitoring.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - ks-monitor -------------------------------------------------------------------------------- /playbooks/multicluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - role: ks-multicluster 8 | when: 9 | - "multicluster is defined and multicluster.clusterRole == 'host'" 10 | - "status.multicluster is not defined or status.multicluster.status is not defined or status.multicluster.status != 'enabled'" 11 | -------------------------------------------------------------------------------- /playbooks/network.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - role: ks-network/topology/weave-scope 8 | when: 9 | - "network is defined and network.topology is defined and network.topology.type == 'weave-scope'" 10 | - "status.network is not defined or status.network.topology is not defined or status.network.topology.status != 'enabled' " -------------------------------------------------------------------------------- /playbooks/notification.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | # - role: ks-notification 8 | # when: 9 | # - "status.notification is not defined or status.notification.status is not defined or status.notification.status != 'enabled'" -------------------------------------------------------------------------------- /playbooks/openpitrix.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - role: openpitrix 8 | when: 9 | - openpitrix is defined 10 | - openpitrix.store is defined 11 | - openpitrix.store.enabled is defined 12 | - openpitrix.store.enabled -------------------------------------------------------------------------------- /playbooks/preinstall.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - preinstall -------------------------------------------------------------------------------- /playbooks/result-info.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - ks-core/config 8 | - check-result -------------------------------------------------------------------------------- /playbooks/servicemesh.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - role: ks-istio 8 | when: 9 | - "status.servicemesh is not defined or status.servicemesh.status is not defined or status.servicemesh.status != 'enabled'" -------------------------------------------------------------------------------- /playbooks/telemetry.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | gather_facts: false 5 | roles: 6 | - kubesphere-defaults 7 | - role: telemetry 8 | when: 9 | - "telemetry_enabled | default(true)" -------------------------------------------------------------------------------- /release.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | roles: 4 | - role: download 5 | skip_sync: false 6 | -------------------------------------------------------------------------------- /roles/common/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | common: 4 | mysqlVolumeSize: 20Gi 5 | minioVolumeSize: 20Gi 6 | etcdVolumeSize: 20Gi 7 | openldapVolumeSize: 2Gi 8 | redisVolumSize: 2Gi 9 | monitoring: 10 | endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090 11 | es: 12 | elasticsearchMasterReplicas: 1 13 | elasticsearchDataReplicas: 1 14 | elasticsearchMasterVolumeSize: 4Gi 15 | elasticsearchDataVolumeSize: 20Gi 16 | logMaxAge: 7 17 | elkPrefix: logstash 18 | # externalElasticsearchHost: 19 | # externalElasticsearchPort: 20 | opensearch: 21 | opensearchsearchMasterReplicas: 1 22 | opensearchsearchDataReplicas: 1 23 | opensearchMasterVolumeSize: 4Gi 24 | opensearchDataVolumeSize: 20Gi 25 | logMaxAge: 7 26 | opensearchPrefix: whizard 27 | # externalOpensearchHost: 28 | # externalOpensearchPort: 29 | 30 | 31 | op_minio_accesskey: openpitrixminioaccesskey 32 | op_minio_secretkey: openpitrixminiosecretkey 33 | 34 | 35 | op_minio_svc: http://openpitrix-minio.openpitrix-system.svc:9000 36 | ks_minio_svc: http://minio.kubesphere-system.svc:9000 37 | 38 | 39 | ks_redis_svc: redis.kubesphere-system.svc 40 | -------------------------------------------------------------------------------- /roles/common/files/common/password.txt: -------------------------------------------------------------------------------- 1 | password -------------------------------------------------------------------------------- /roles/common/files/elasticsearch-curator/elasticsearch-curator-1.3.3.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/common/files/elasticsearch-curator/elasticsearch-curator-1.3.3.tgz -------------------------------------------------------------------------------- /roles/common/files/fluentbit-operator/fluentbit/fluentbitconfig-fluentBitConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: FluentBitConfig 3 | metadata: 4 | name: fluent-bit-config 5 | namespace: kubesphere-logging-system 6 | labels: 7 | app.kubernetes.io/name: fluent-bit 8 | spec: 9 | service: 10 | parsersFile: parsers.conf 11 | httpServer: true 12 | inputSelector: 13 | matchLabels: 14 | logging.kubesphere.io/enabled: "true" 15 | filterSelector: 16 | matchLabels: 17 | logging.kubesphere.io/enabled: "true" 18 | outputSelector: 19 | matchLabels: 20 | logging.kubesphere.io/enabled: "true" 21 | -------------------------------------------------------------------------------- /roles/common/files/fluentbit-operator/init/fluentbit-operator-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: fluentbit-operator 7 | name: kubesphere:operator:fluentbit-operator 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: kubesphere:operator:fluentbit-operator 12 | subjects: 13 | - kind: ServiceAccount 14 | name: fluentbit-operator 15 | namespace: kubesphere-logging-system 16 | -------------------------------------------------------------------------------- /roles/common/files/fluentbit-operator/init/fluentbit-operator-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | systemd.lua: | 4 | function add_time(tag, timestamp, record) 5 | new_record = {} 6 | timeStr = os.date("!*t", timestamp["sec"]) 7 | t = string.format("%4d-%02d-%02dT%02d:%02d:%02d.%sZ", 8 | timeStr["year"], timeStr["month"], timeStr["day"], 9 | timeStr["hour"], timeStr["min"], timeStr["sec"], 10 | timestamp["nsec"]) 11 | kubernetes = {} 12 | kubernetes["pod_name"] = record["_HOSTNAME"] 13 | kubernetes["container_name"] = record["SYSLOG_IDENTIFIER"] 14 | kubernetes["namespace_name"] = "kube-system" 15 | new_record["time"] = t 16 | new_record["log"] = record["MESSAGE"] 17 | new_record["kubernetes"] = kubernetes 18 | return 1, timestamp, new_record 19 | end 20 | kind: ConfigMap 21 | metadata: 22 | labels: 23 | app.kubernetes.io/component: operator 24 | app.kubernetes.io/name: fluent-bit-lua 25 | name: fluent-bit-lua 26 | namespace: kubesphere-logging-system -------------------------------------------------------------------------------- /roles/common/files/fluentbit-operator/init/fluentbit-operator-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: operator 6 | app.kubernetes.io/name: fluentbit-operator 7 | name: fluentbit-operator 8 | namespace: kubesphere-logging-system -------------------------------------------------------------------------------- /roles/common/files/minio-ha/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /roles/common/files/minio-ha/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | description: MinIO is a high performance distributed object storage server, designed for large-scale private cloud infrastructure. 3 | name: minio 4 | version: 2.5.16 5 | appVersion: RELEASE.2019-08-07T01-59-21Z 6 | keywords: 7 | - storage 8 | - object-storage 9 | - S3 10 | home: https://min.io 11 | icon: https://min.io/resources/img/logo/MINIO_wordmark.png 12 | sources: 13 | - https://github.com/minio/minio 14 | maintainers: 15 | - name: Acaleph 16 | email: hello@acale.ph 17 | - name: Minio 18 | email: dev@minio.io 19 | -------------------------------------------------------------------------------- /roles/common/files/minio-ha/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - wlan0 3 | - nitisht 4 | - krisis 5 | - harshavardhana 6 | reviewers: 7 | - wlan0 8 | - nitisht 9 | - krisis 10 | - harshavardhana 11 | -------------------------------------------------------------------------------- /roles/common/files/minio-ha/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ template "minio.fullname" . }} 5 | labels: 6 | app: {{ template "minio.name" . }} 7 | chart: {{ template "minio.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | data: 11 | initialize: |- 12 | {{ include (print $.Template.BasePath "/_helper_create_bucket.txt") . | indent 4 }} 13 | -------------------------------------------------------------------------------- /roles/common/files/minio-ha/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "minio.fullname" . -}} 3 | {{- $servicePort := .Values.service.port -}} 4 | {{- $ingressPath := .Values.ingress.path -}} 5 | apiVersion: {{ template "minio.ingress.apiVersion" . }} 6 | kind: Ingress 7 | metadata: 8 | name: {{ $fullName }} 9 | labels: 10 | app: {{ template "minio.name" . }} 11 | chart: {{ template "minio.chart" . }} 12 | release: {{ .Release.Name }} 13 | heritage: {{ .Release.Service }} 14 | {{- with .Values.ingress.annotations }} 15 | annotations: 16 | {{ toYaml . | indent 4 }} 17 | {{- end }} 18 | spec: 19 | {{- if .Values.ingress.tls }} 20 | tls: 21 | {{- range .Values.ingress.tls }} 22 | - hosts: 23 | {{- range .hosts }} 24 | - {{ . | quote }} 25 | {{- end }} 26 | secretName: {{ .secretName }} 27 | {{- end }} 28 | {{- end }} 29 | rules: 30 | {{- range .Values.ingress.hosts }} 31 | - host: {{ . | quote }} 32 | http: 33 | paths: 34 | - path: {{ $ingressPath }} 35 | backend: 36 | serviceName: {{ $fullName }} 37 | servicePort: {{ $servicePort }} 38 | {{- end }} 39 | {{- end }} 40 | -------------------------------------------------------------------------------- /roles/common/files/minio-ha/templates/networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.networkPolicy.enabled }} 2 | kind: NetworkPolicy 3 | apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} 4 | metadata: 5 | name: {{ template "minio.fullname" . }} 6 | labels: 7 | app: {{ template "minio.name" . }} 8 | chart: {{ template "minio.chart" . }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | spec: 12 | podSelector: 13 | matchLabels: 14 | app: {{ template "minio.name" . }} 15 | release: {{ .Release.Name }} 16 | ingress: 17 | - ports: 18 | - port: {{ .Values.service.port }} 19 | {{- if not .Values.networkPolicy.allowExternal }} 20 | from: 21 | - podSelector: 22 | matchLabels: 23 | {{ template "minio.name" . }}-client: "true" 24 | {{- end }} 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /roles/common/files/minio-ha/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.mode "standalone" }} 2 | {{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | name: {{ template "minio.fullname" . }} 7 | labels: 8 | app: {{ template "minio.name" . }} 9 | chart: {{ template "minio.chart" . }} 10 | release: {{ .Release.Name }} 11 | heritage: {{ .Release.Service }} 12 | spec: 13 | {{- if and .Values.nasgateway.enabled .Values.nasgateway.pv }} 14 | selector: 15 | matchLabels: 16 | pv: {{ .Values.nasgateway.pv | quote }} 17 | {{- end }} 18 | accessModes: 19 | - {{ .Values.persistence.accessMode | quote }} 20 | resources: 21 | requests: 22 | storage: {{ .Values.persistence.size | quote }} 23 | 24 | {{- if .Values.persistence.storageClass }} 25 | {{- if (eq "-" .Values.persistence.storageClass) }} 26 | storageClassName: "" 27 | {{- else }} 28 | storageClassName: "{{ .Values.persistence.storageClass }}" 29 | {{- end }} 30 | {{- end }} 31 | {{- if .Values.persistence.VolumeName }} 32 | volumeName: "{{ .Values.persistence.VolumeName }}" 33 | {{- end }} 34 | {{- end }} 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /roles/common/files/minio-ha/templates/secrets.yaml: -------------------------------------------------------------------------------- 1 | {{- if not .Values.existingSecret }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "minio.fullname" . }} 6 | labels: 7 | app: {{ template "minio.name" . }} 8 | chart: {{ template "minio.chart" . }} 9 | release: {{ .Release.Name }} 10 | heritage: {{ .Release.Service }} 11 | type: Opaque 12 | data: 13 | accesskey: {{ if .Values.accessKey }}{{ .Values.accessKey | b64enc | quote }}{{ else }}{{ randAlphaNum 20 | b64enc | quote }}{{ end }} 14 | secretkey: {{ if .Values.secretKey }}{{ .Values.secretKey | b64enc | quote }}{{ else }}{{ randAlphaNum 40 | b64enc | quote }}{{ end }} 15 | {{- if .Values.gcsgateway.enabled }} 16 | gcs_key.json: {{ .Values.gcsgateway.gcsKeyJson | b64enc }} 17 | {{- end }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /roles/common/files/minio-ha/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "minio.fullname" . }} 5 | labels: 6 | app: {{ template "minio.name" . }} 7 | chart: {{ template "minio.chart" . }} 8 | release: {{ .Release.Name }} 9 | heritage: {{ .Release.Service }} 10 | {{- if .Values.service.annotations }} 11 | annotations: 12 | {{ toYaml .Values.service.annotations | indent 4 }} 13 | {{- end }} 14 | spec: 15 | {{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }} 16 | type: ClusterIP 17 | {{- if not (empty .Values.service.clusterIP) }} 18 | clusterIP: {{ .Values.service.clusterIP }} 19 | {{end}} 20 | {{- else if eq .Values.service.type "LoadBalancer" }} 21 | type: {{ .Values.service.type }} 22 | loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} 23 | {{- else }} 24 | type: {{ .Values.service.type }} 25 | {{- end }} 26 | ports: 27 | - name: service 28 | port: {{ .Values.service.port }} 29 | protocol: TCP 30 | {{- if (and (eq .Values.service.type "NodePort") ( .Values.service.nodePort)) }} 31 | nodePort: {{ .Values.service.nodePort }} 32 | {{- else }} 33 | targetPort: 9000 34 | {{- end}} 35 | {{- if .Values.service.externalIPs }} 36 | externalIPs: 37 | {{- range $i , $ip := .Values.service.externalIPs }} 38 | - {{ $ip }} 39 | {{- end }} 40 | {{- end }} 41 | selector: 42 | app: {{ template "minio.name" . }} 43 | release: {{ .Release.Name }} 44 | -------------------------------------------------------------------------------- /roles/common/files/minio-ha/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "minio.serviceAccountName" . | quote }} 6 | namespace: {{ .Release.Namespace | quote }} 7 | {{- end -}} 8 | -------------------------------------------------------------------------------- /roles/common/files/minio-ha/templates/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.metrics.serviceMonitor.enabled }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ template "minio.fullname" . }} 6 | {{- if .Values.metrics.serviceMonitor.namespace }} 7 | namespace: {{ .Values.metrics.serviceMonitor.namespace }} 8 | {{- end }} 9 | labels: 10 | app: {{ template "minio.name" . }} 11 | chart: {{ template "minio.chart" . }} 12 | release: {{ .Release.Name }} 13 | heritage: {{ .Release.Service }} 14 | {{- if .Values.metrics.serviceMonitor.additionalLabels }} 15 | {{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }} 16 | {{- end }} 17 | spec: 18 | endpoints: 19 | - port: service 20 | path: /minio/prometheus/metrics 21 | {{- if .Values.metrics.serviceMonitor.interval }} 22 | interval: {{ .Values.metrics.serviceMonitor.interval }} 23 | {{- end }} 24 | {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} 25 | scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} 26 | {{- end }} 27 | namespaceSelector: 28 | matchNames: 29 | - {{ .Release.Namespace }} 30 | selector: 31 | matchLabels: 32 | app: {{ include "minio.name" . }} 33 | release: {{ .Release.Name }} 34 | {{- end }} 35 | -------------------------------------------------------------------------------- /roles/common/files/openldap-ha/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /roles/common/files/openldap-ha/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: openldap-ha 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /roles/common/files/openldap-ha/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if contains "NodePort" .Values.service.type }} 3 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "openldap-ha.fullname" . }}) 4 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 5 | echo http://$NODE_IP:$NODE_PORT 6 | {{- else if contains "LoadBalancer" .Values.service.type }} 7 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 8 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "openldap-ha.fullname" . }}' 9 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "openldap-ha.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 10 | echo http://$SERVICE_IP:{{ .Values.service.port }} 11 | {{- else if contains "ClusterIP" .Values.service.type }} 12 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "openldap-ha.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 13 | echo "Visit ldap://127.0.0.1:389 to use your application" 14 | kubectl port-forward $POD_NAME 389:389 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /roles/common/files/openldap-ha/templates/bootstrap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | bootstrap.ldif: | 4 | dn: ou=Users,dc=kubesphere,dc=io 5 | objectClass: organizationalUnit 6 | objectClass: top 7 | ou: Users 8 | 9 | dn: ou=Groups,dc=kubesphere,dc=io 10 | objectClass: organizationalUnit 11 | objectClass: top 12 | ou: Users 13 | ou: Groups 14 | 15 | dn: uid=admin,ou=Users,dc=kubesphere,dc=io 16 | objectClass: inetOrgPerson 17 | objectClass: top 18 | cn: admin 19 | sn: admin 20 | uid: admin 21 | userPassword:: e1NTSEF9RHRIdmQxOWlkMnBINENyYWJYa0NsSzY1OFNXeUVHSDAK 22 | kind: ConfigMap 23 | metadata: 24 | name: openldap-bootstrap -------------------------------------------------------------------------------- /roles/common/files/openldap-ha/templates/tests/test-connection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: "{{ include "openldap-ha.fullname" . }}-test-connection" 5 | labels: 6 | {{ include "openldap-ha.labels" . | indent 4 }} 7 | annotations: 8 | "helm.sh/hook": test-success 9 | spec: 10 | containers: 11 | - name: wget 12 | image: busybox 13 | command: ['wget'] 14 | args: ['{{ include "openldap-ha.fullname" . }}:{{ .Values.service.port }}'] 15 | restartPolicy: Never 16 | -------------------------------------------------------------------------------- /roles/common/files/openldap-ha/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for openldap-ha. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | image: 5 | repository: osixia/openldap 6 | tag: 1.3.0 7 | pullPolicy: IfNotPresent 8 | 9 | imagePullSecrets: [] 10 | nameOverride: "" 11 | fullnameOverride: "" 12 | 13 | service: 14 | type: ClusterIP 15 | port: 389 16 | 17 | 18 | ldap: 19 | replication: false 20 | organisation: kubesphere 21 | domain: kubesphere.io 22 | adminPassword: admin 23 | 24 | resources: {} 25 | # We usually recommend not to specify default resources and to leave this as a conscious 26 | # choice for the user. This also increases chances charts run on environments with little 27 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 28 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 29 | # limits: 30 | # cpu: 100m 31 | # memory: 128Mi 32 | # requests: 33 | # cpu: 100m 34 | # memory: 128Mi 35 | 36 | nodeSelector: {} 37 | 38 | tolerations: [] 39 | -------------------------------------------------------------------------------- /roles/common/files/opensearch/opensearch-2.6.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/common/files/opensearch/opensearch-2.6.0.tgz -------------------------------------------------------------------------------- /roles/common/files/opensearch/opensearch-curator-1.3.3.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/common/files/opensearch/opensearch-curator-1.3.3.tgz -------------------------------------------------------------------------------- /roles/common/files/opensearch/opensearch-dashboards-2.6.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/common/files/opensearch/opensearch-dashboards-2.6.0.tgz -------------------------------------------------------------------------------- /roles/common/files/redis-ha/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: redis-ha 3 | home: http://redis.io/ 4 | engine: gotpl 5 | keywords: 6 | - redis 7 | - keyvalue 8 | - database 9 | version: 3.9.0 10 | appVersion: 5.0.5 11 | description: Highly available Kubernetes implementation of Redis 12 | icon: https://upload.wikimedia.org/wikipedia/en/thumb/6/6b/Redis_Logo.svg/1200px-Redis_Logo.svg.png 13 | maintainers: 14 | - email: salimsalaues@gmail.com 15 | name: ssalaues 16 | - email: aaron.layfield@gmail.com 17 | name: dandydeveloper 18 | details: 19 | This Helm chart provides a highly available Redis implementation with a master/slave configuration 20 | and uses Sentinel sidecars for failover management 21 | sources: 22 | - https://redis.io/download 23 | - https://github.com/scality/Zenko/tree/development/1.0/kubernetes/zenko/charts/redis-ha 24 | - https://github.com/oliver006/redis_exporter 25 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - ssalaues 3 | - dandydeveloper 4 | reviewers: 5 | - ssalaues 6 | - dandydeveloper -------------------------------------------------------------------------------- /roles/common/files/redis-ha/ci/haproxy-enabled-values.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Enable HAProxy to manage Load Balancing 3 | haproxy: 4 | enabled: true 5 | annotations: 6 | any.domain/key: "value" 7 | serviceAccount: 8 | create: true 9 | exporter: 10 | enabled: false 11 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Redis can be accessed via port {{ .Values.redis.port }} and Sentinel can be accessed via port {{ .Values.sentinel.port }} on the following DNS name from within your cluster: 2 | {{ template "redis-ha.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local 3 | 4 | To connect to your Redis server: 5 | 6 | {{- if .Values.auth }} 7 | 1. To retrieve the redis password: 8 | echo $(kubectl get secret {{ template "redis-ha.fullname" . }} -o "jsonpath={.data['auth']}" | base64 --decode) 9 | 10 | 2. Connect to the Redis master pod that you can use as a client. By default the {{ template "redis-ha.fullname" . }}-server-0 pod is configured as the master: 11 | 12 | kubectl exec -it {{ template "redis-ha.fullname" . }}-server-0 sh -n {{ .Release.Namespace }} 13 | 14 | 3. Connect using the Redis CLI (inside container): 15 | 16 | redis-cli -a 17 | {{- else }} 18 | 1. Run a Redis pod that you can use as a client: 19 | 20 | kubectl exec -it {{ template "redis-ha.fullname" . }}-server-0 sh -n {{ .Release.Namespace }} 21 | 22 | 2. Connect using the Redis CLI: 23 | 24 | redis-cli -h {{ template "redis-ha.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local 25 | {{- end }} 26 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/redis-auth-secret.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.auth (not .Values.existingSecret) -}} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "redis-ha.fullname" . }} 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | type: Opaque 9 | data: 10 | {{ .Values.authKey }}: {{ .Values.redisPassword | b64enc | quote }} 11 | {{- end -}} 12 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/redis-ha-announce-service.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "redis-ha.fullname" . }} 2 | {{- $replicas := int .Values.replicas }} 3 | {{- $root := . }} 4 | {{- range $i := until $replicas }} 5 | --- 6 | apiVersion: v1 7 | kind: Service 8 | metadata: 9 | name: {{ $fullName }}-announce-{{ $i }} 10 | labels: 11 | {{ include "labels.standard" $root | indent 4 }} 12 | annotations: 13 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 14 | {{- if $root.Values.serviceAnnotations }} 15 | {{ toYaml $root.Values.serviceAnnotations | indent 4 }} 16 | {{- end }} 17 | spec: 18 | publishNotReadyAddresses: true 19 | type: ClusterIP 20 | ports: 21 | - name: server 22 | port: {{ $root.Values.redis.port }} 23 | protocol: TCP 24 | targetPort: redis 25 | - name: sentinel 26 | port: {{ $root.Values.sentinel.port }} 27 | protocol: TCP 28 | targetPort: sentinel 29 | {{- if $root.Values.exporter.enabled }} 30 | - name: exporter 31 | port: {{ $root.Values.exporter.port }} 32 | protocol: TCP 33 | targetPort: exporter-port 34 | {{- end }} 35 | selector: 36 | release: {{ $root.Release.Name }} 37 | app: {{ include "redis-ha.name" $root }} 38 | "statefulset.kubernetes.io/pod-name": {{ $fullName }}-server-{{ $i }} 39 | {{- end }} 40 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/redis-ha-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ template "redis-ha.fullname" . }}-configmap 5 | labels: 6 | heritage: {{ .Release.Service }} 7 | release: {{ .Release.Name }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | app: {{ template "redis-ha.fullname" . }} 10 | data: 11 | redis.conf: | 12 | {{- include "config-redis.conf" . }} 13 | 14 | sentinel.conf: | 15 | {{- include "config-sentinel.conf" . }} 16 | 17 | init.sh: | 18 | {{- include "config-init.sh" . }} 19 | {{ if .Values.haproxy.enabled }} 20 | haproxy.cfg: |- 21 | {{- include "config-haproxy.cfg" . }} 22 | {{- end }} 23 | haproxy_init.sh: | 24 | {{- include "config-haproxy_init.sh" . }} 25 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/redis-ha-pdb.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.podDisruptionBudget -}} 2 | apiVersion: policy/v1beta1 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: {{ template "redis-ha.fullname" . }}-pdb 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | spec: 9 | selector: 10 | matchLabels: 11 | release: {{ .Release.Name }} 12 | app: {{ template "redis-ha.name" . }} 13 | {{ toYaml .Values.podDisruptionBudget | indent 2 }} 14 | {{- end -}} 15 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/redis-ha-role.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.serviceAccount.create .Values.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: {{ template "redis-ha.fullname" . }} 6 | labels: 7 | heritage: {{ .Release.Service }} 8 | release: {{ .Release.Name }} 9 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 10 | app: {{ template "redis-ha.fullname" . }} 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - endpoints 16 | verbs: 17 | - get 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/redis-ha-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.serviceAccount.create .Values.rbac.create }} 2 | kind: RoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "redis-ha.fullname" . }} 6 | labels: 7 | heritage: {{ .Release.Service }} 8 | release: {{ .Release.Name }} 9 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 10 | app: {{ template "redis-ha.fullname" . }} 11 | subjects: 12 | - kind: ServiceAccount 13 | name: {{ template "redis-ha.serviceAccountName" . }} 14 | roleRef: 15 | apiGroup: rbac.authorization.k8s.io 16 | kind: Role 17 | name: {{ template "redis-ha.fullname" . }} 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/redis-ha-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "redis-ha.fullname" . }} 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | annotations: 8 | {{- if .Values.serviceAnnotations }} 9 | {{ toYaml .Values.serviceAnnotations | indent 4 }} 10 | {{- end }} 11 | spec: 12 | type: ClusterIP 13 | clusterIP: None 14 | ports: 15 | - name: server 16 | port: {{ .Values.redis.port }} 17 | protocol: TCP 18 | targetPort: redis 19 | - name: sentinel 20 | port: {{ .Values.sentinel.port }} 21 | protocol: TCP 22 | targetPort: sentinel 23 | {{- if .Values.exporter.enabled }} 24 | - name: exporter-port 25 | port: {{ .Values.exporter.port }} 26 | protocol: TCP 27 | targetPort: exporter-port 28 | {{- end }} 29 | selector: 30 | release: {{ .Release.Name }} 31 | app: {{ template "redis-ha.name" . }} -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/redis-ha-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "redis-ha.serviceAccountName" . }} 6 | labels: 7 | heritage: {{ .Release.Service }} 8 | release: {{ .Release.Name }} 9 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 10 | app: {{ template "redis-ha.fullname" . }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/redis-haproxy-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.haproxy.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: redis 6 | labels: 7 | {{ include "labels.standard" . | indent 4 }} 8 | annotations: 9 | {{- if .Values.haproxy.service.annotations }} 10 | {{ toYaml .Values.haproxy.service.annotations | indent 4 }} 11 | {{- end }} 12 | spec: 13 | type: {{ default "ClusterIP" .Values.haproxy.service.type }} 14 | {{- if and (eq .Values.haproxy.service.type "LoadBalancer") .Values.haproxy.service.loadBalancerIP }} 15 | loadBalancerIP: {{ .Values.haproxy.service.loadBalancerIP }} 16 | {{- end }} 17 | ports: 18 | - name: haproxy 19 | port: {{ .Values.redis.port }} 20 | protocol: TCP 21 | targetPort: redis 22 | {{- if and (eq .Values.haproxy.service.type "NodePort") .Values.haproxy.service.nodePort }} 23 | nodePort: {{ .Values.haproxy.service.nodePort }} 24 | {{- end }} 25 | {{- if .Values.haproxy.readOnly.enabled }} 26 | - name: haproxyreadonly 27 | port: {{ .Values.haproxy.readOnly.port }} 28 | protocol: TCP 29 | targetPort: {{ .Values.haproxy.readOnly.port }} 30 | {{- end }} 31 | {{- if .Values.exporter.enabled }} 32 | - name: exporter-port 33 | port: {{ .Values.haproxy.exporter.port }} 34 | protocol: TCP 35 | targetPort: exporter-port 36 | {{- end }} 37 | selector: 38 | release: {{ .Release.Name }} 39 | app: {{ template "redis-ha.name" . }}-haproxy 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/redis-haproxy-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.haproxy.serviceAccount.create .Values.haproxy.enabled }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "redis-ha.serviceAccountName" . }}-haproxy 6 | labels: 7 | heritage: {{ .Release.Service }} 8 | release: {{ .Release.Name }} 9 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 10 | app: {{ template "redis-ha.fullname" . }} 11 | {{- end }} 12 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/tests/test-redis-ha-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: {{ template "redis-ha.fullname" . }}-configmap-test 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | annotations: 8 | "helm.sh/hook": test-success 9 | spec: 10 | containers: 11 | - name: check-init 12 | image: koalaman/shellcheck:v0.5.0 13 | args: 14 | - --shell=sh 15 | - /readonly-config/init.sh 16 | volumeMounts: 17 | - name: config 18 | mountPath: /readonly-config 19 | readOnly: true 20 | volumes: 21 | - name: config 22 | configMap: 23 | name: {{ template "redis-ha.fullname" . }}-configmap 24 | restartPolicy: Never 25 | -------------------------------------------------------------------------------- /roles/common/files/redis-ha/templates/tests/test-redis-ha-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: {{ template "redis-ha.fullname" . }}-service-test 5 | labels: 6 | {{ include "labels.standard" . | indent 4 }} 7 | annotations: 8 | "helm.sh/hook": test-success 9 | spec: 10 | containers: 11 | - name: "{{ .Release.Name }}-service-test" 12 | image: {{ .Values.image.repository }}:{{ .Values.image.tag }} 13 | command: 14 | - sh 15 | - -c 16 | - redis-cli -h {{ template "redis-ha.fullname" . }} -p {{ .Values.redis.port }} info server 17 | restartPolicy: Never 18 | -------------------------------------------------------------------------------- /roles/common/files/snapshot-controller/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /roles/common/files/snapshot-controller/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | 3 | appVersion: 4.0.0 4 | name: snapshot-controller 5 | description: A Helm chart for snapshot-controller 6 | version: 0.2.0 7 | kubeVersion: ">=1.17.0-0" 8 | home: https://github.com/kubernetes-csi/external-snapshotter 9 | sources: 10 | - https://github.com/kubesphere/helm-charts 11 | keywords: 12 | - snapshot 13 | - controller 14 | - csi 15 | maintainers: 16 | - name: Yang Zhou 17 | email: yangzhou@yunify.com 18 | -------------------------------------------------------------------------------- /roles/common/files/snapshot-controller/README.md: -------------------------------------------------------------------------------- 1 | # csi-neonsan 2 | 3 | ## TL;DR; 4 | 5 | ```console 6 | helm install test/snapshot-controller 7 | ``` 8 | 9 | ## Installing 10 | 11 | To install the chart with the release name `snapshot-controller`: 12 | 13 | ```console 14 | helm repo add test https://charts.kubesphere.io/test 15 | helm install test/snapshot-controller --name-template snapshot-controller --namespace kube-system 16 | ``` 17 | 18 | The command deploys the snapshot-controller chart on the Kubernetes cluster in the default configuration. The configuration section lists the parameters that can be configured during installation. 19 | 20 | ## Uninstalling 21 | 22 | To uninstall/delete the `snapshot-controller` deployment: 23 | 24 | ```console 25 | helm delete snapshot-controller --namespace kube-system 26 | ``` 27 | 28 | The command removes all the Kubernetes components associated with the chart and deletes the release. 29 | 30 | ## Configuration 31 | 32 | The following table lists the configurable parameters of the chart and their default values. 33 | 34 | Parameter | Description | Default 35 | --- | --- | --- 36 | `repository` | Image of snapshot-controller | `csiplugin/snapshot-controller` 37 | `tag` | Tag of snapshot-controller | `v2.0.1` 38 | `pullPolicy` | Image pull policy of snapshot-controller | `IfNotPresent` 39 | 40 | -------------------------------------------------------------------------------- /roles/common/files/snapshot-controller/templates/setup-snapshot-controller.yaml: -------------------------------------------------------------------------------- 1 | # This YAML file shows how to deploy the snapshot controller 2 | 3 | # The snapshot controller implements the control loop for CSI snapshot functionality. 4 | # It should be installed as part of the base Kubernetes distribution in an appropriate 5 | # namespace for components implementing base system functionality. For installing with 6 | # Vanilla Kubernetes, kube-system makes sense for the namespace. 7 | 8 | --- 9 | kind: StatefulSet 10 | apiVersion: apps/v1 11 | metadata: 12 | name: snapshot-controller 13 | namespace: {{ .Release.Namespace }} 14 | spec: 15 | serviceName: "snapshot-controller" 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: snapshot-controller 20 | template: 21 | metadata: 22 | labels: 23 | app: snapshot-controller 24 | spec: 25 | serviceAccount: snapshot-controller 26 | containers: 27 | - name: snapshot-controller 28 | image: "{{ .Values.repository }}:{{ .Values.tag }}" 29 | args: 30 | - "--v=5" 31 | - "--leader-election=false" 32 | imagePullPolicy: {{ .Values.pullPolicy}} 33 | -------------------------------------------------------------------------------- /roles/common/files/snapshot-controller/values.yaml: -------------------------------------------------------------------------------- 1 | repository: csiplugin/snapshot-controller 2 | tag: v4.0.0 3 | pullPolicy: IfNotPresent 4 | -------------------------------------------------------------------------------- /roles/common/tasks/minio-migration.yaml: -------------------------------------------------------------------------------- 1 | - name: Empty backup directory 2 | file: 3 | state: absent 4 | path: "/tmp/minio-backup/{{ item }}/" 5 | 6 | - name: Recreate backup directory 7 | file: 8 | state: directory 9 | path: "/tmp/minio-backup/{{ item }}/" 10 | 11 | - name: KubeSphere | Fetch openpitrix-minio data 12 | shell: > 13 | rclone copy 14 | op-minio:openpitrix-internal-repo/{{ item }} 15 | /tmp/minio-backup/{{ item }}/ 16 | 17 | - name: KubeSphere | Checking minio status 18 | shell: > 19 | {{ bin_dir }}/kubectl get pod -n kubesphere-system | grep 'minio' | grep -v 'Running' | wc -l 20 | register: minio_result 21 | until: minio_result.stdout == "0" 22 | retries: 30 23 | delay: 30 24 | 25 | - name: KubeSphere | Sync openpitrix-minio data 26 | shell: > 27 | rclone copy 28 | /tmp/minio-backup/{{ item }}/ 29 | ks-minio:openpitrix-internal-repo/{{ item }} 30 | -------------------------------------------------------------------------------- /roles/common/tasks/mysql-install.yaml: -------------------------------------------------------------------------------- 1 | 2 | - name: KubeSphere | Deploying common component 3 | shell: > 4 | {{ bin_dir }}/kubectl -n kubesphere-system apply -f {{ kubesphere_dir }}/common/{{ item }} 5 | loop: 6 | - "mysql.yaml" 7 | register: mysql_result 8 | failed_when: 9 | - "mysql_result.stderr and 'is immutable after creation except resources.requests for bound claims' not in mysql_result.stderr" 10 | - "mysql_result.stderr and 'is forbidden' not in mysql_result.stderr" 11 | # when: devops.enabled or openpitrix.enabled or notification.enabled or alerting.enabled 12 | 13 | - name: KubeSphere | Importing mysql status 14 | shell: > 15 | {{ bin_dir }}/kubectl patch cc ks-installer 16 | --type merge 17 | -p '{"status": {"mysql": {"status": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}' 18 | -n kubesphere-system 19 | register: cc_result 20 | failed_when: "cc_result.stderr and 'Warning' not in cc_result.stderr" 21 | until: cc_result is succeeded 22 | retries: 5 23 | delay: 3 -------------------------------------------------------------------------------- /roles/common/tasks/redis-migration.yaml: -------------------------------------------------------------------------------- 1 | 2 | - name: KubeSphere | Creating redis migration script 3 | template: 4 | src: "{{ item.file }}.j2" 5 | dest: "{{ kubesphere_dir }}/{{ item.file }}" 6 | with_items: 7 | - { path: /etc/kubesphere, file: redisMigrate.py } 8 | 9 | 10 | - name: KubeSphere | Checking redis-ha status 11 | shell: > 12 | {{ bin_dir }}/kubectl get pod -n kubesphere-system | grep 'redis-ha' | grep -v 'Running' | wc -l 13 | register: redis_result 14 | until: redis_result.stdout == "0" 15 | retries: 30 16 | delay: 30 17 | 18 | 19 | - name: ks-logging | Migrating redis data 20 | shell: > 21 | chmod +x {{ kubesphere_dir }}/redisMigrate.py && 22 | {{ kubesphere_dir }}/redisMigrate.py 23 | 24 | 25 | - name: KubeSphere | Disabling old redis 26 | shell: > 27 | {{ bin_dir }}/kubectl -n kubesphere-system scale deployment redis --replicas=0 -------------------------------------------------------------------------------- /roles/common/templates/custom-fluentbit-fluentBit.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: FluentBit 3 | metadata: 4 | name: fluent-bit 5 | namespace: kubesphere-logging-system 6 | labels: 7 | app.kubernetes.io/name: fluent-bit 8 | spec: 9 | image: {{ ks_fluent_bit_repo }}:{{ ks_fluent_bit_tag }} 10 | positionDB: 11 | emptyDir: {} 12 | resources: 13 | limits: 14 | cpu: 500m 15 | memory: 500Mi 16 | requests: 17 | cpu: 10m 18 | memory: 25Mi 19 | fluentBitConfigName: fluent-bit-config 20 | tolerations: 21 | - operator: Exists 22 | affinity: 23 | nodeAffinity: 24 | requiredDuringSchedulingIgnoredDuringExecution: 25 | nodeSelectorTerms: 26 | - matchExpressions: 27 | - key: node-role.kubernetes.io/edge 28 | operator: DoesNotExist 29 | -------------------------------------------------------------------------------- /roles/common/templates/custom-values-snapshot-controller.yaml.j2: -------------------------------------------------------------------------------- 1 | repository: {{ snapshot_controller_repo }} 2 | tag: {{ snapshot_controller_tag }} 3 | pullPolicy: IfNotPresent 4 | -------------------------------------------------------------------------------- /roles/common/templates/rclone.conf.j2: -------------------------------------------------------------------------------- 1 | [op-minio] 2 | type = s3 3 | provider = Minio 4 | env_auth = false 5 | access_key_id = {{ op_minio_accesskey }} 6 | secret_access_key = {{ op_minio_secretkey }} 7 | region = us-east-1 8 | endpoint = {{ op_minio_svc }} 9 | 10 | 11 | [ks-minio] 12 | type = s3 13 | provider = Minio 14 | env_auth = false 15 | access_key_id = {{ op_minio_accesskey }} 16 | secret_access_key = {{ op_minio_secretkey }} 17 | region = us-east-1 18 | endpoint = {{ ks_minio_svc }} -------------------------------------------------------------------------------- /roles/download/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | allow_duplicates: true 3 | -------------------------------------------------------------------------------- /roles/download/tasks/generate_images_list.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - set_fact: 3 | images_group: |- 4 | { 5 | {% for key, value in images.items() %} 6 | '{{ value.group }}': ''{% if not loop.last %},{% endif %} 7 | {% endfor %} 8 | } 9 | 10 | - set_fact: 11 | images_group_list: |- 12 | [ 13 | {% for key, value in images_group.items() %} 14 | '{{ key }}'{% if not loop.last %},{% endif %} 15 | {% endfor %} 16 | ] 17 | 18 | - name: Generating images list 19 | template: 20 | src: "{{ item.file }}.j2" 21 | dest: "{{ item.file }}" 22 | with_items: 23 | - { path: ks-core, file: images-list.txt} -------------------------------------------------------------------------------- /roles/download/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Generating images list 3 | include_tasks: "generate_images_list.yml" 4 | tags: 5 | - images_list 6 | when: 7 | - not skip_sync|default(false) 8 | 9 | - name: Synchronizing images 10 | include_tasks: "sync_images.yml" 11 | vars: 12 | image: "{{ image_default | combine(item.value) }}" 13 | with_dict: "{{ images }}" 14 | tags: 15 | - sync_images 16 | when: 17 | - not skip_sync|default(false) 18 | 19 | -------------------------------------------------------------------------------- /roles/download/tasks/sync_images.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - set_fact: 3 | pull_args: >- 4 | {{ image.repo }}:{{ image.tag }} 5 | when: 6 | - not skip_sync|default(false) 7 | 8 | - name: images_sync ( {{ pull_args }} ) 9 | command: > 10 | skopeo sync --src docker --dest docker {{ pull_args }} {{ images_mirror | default("registry.cn-beijing.aliyuncs.com/kubesphereio") }} -a 11 | register: pull_task_result 12 | until: pull_task_result is succeeded 13 | retries: 4 14 | run_once: yes 15 | when: 16 | - not skip_sync|default(false) 17 | 18 | -------------------------------------------------------------------------------- /roles/download/templates/images-list.txt.j2: -------------------------------------------------------------------------------- 1 | {% for group in images_group_list %} 2 | ##{{ group }} 3 | {% for key, value in images.items() %} 4 | {% if value.group == group %} 5 | {{ value.repo }}:{{ value.tag }} 6 | {% endif %} 7 | {% endfor %} 8 | {% endfor %} -------------------------------------------------------------------------------- /roles/edgeruntime/files/kubeedge/cloudcore/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: cloudcore 3 | version: 1.13.0 4 | appVersion: 1.13.0 5 | description: The KubeEdge cloudcore component. 6 | sources: 7 | - https://github.com/kubeedge/kubeedge 8 | -------------------------------------------------------------------------------- /roles/edgeruntime/files/kubeedge/cloudcore/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "cloudcore.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | */}} 13 | {{- define "cloudcore.fullname" -}} 14 | {{- if .Values.fullnameOverride -}} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 16 | {{- else -}} 17 | {{- $name := default .Chart.Name .Values.nameOverride -}} 18 | {{- if contains $name .Release.Name -}} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 20 | {{- else -}} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 22 | {{- end -}} 23 | {{- end -}} 24 | {{- end -}} 25 | 26 | {{/* 27 | Generate certificates for kubeedge cloudstream server 28 | */}} 29 | {{- define "cloudcore.gen-certs" -}} 30 | {{- $altNames := list ( printf "%s.%s" (include "cloudcore.name" .) .Release.Namespace ) ( printf "%s.%s.svc" (include "cloudcore.name" .) .Release.Namespace ) -}} 31 | {{- $ca := genCA "cloudcore-ca" 365 -}} 32 | {{- $cert := genSignedCert ( include "cloudcore.name" . ) nil $altNames 365 $ca -}} 33 | streamCA.crt: {{ $ca.Cert | b64enc }} 34 | stream.crt: {{ $cert.Cert | b64enc }} 35 | stream.key: {{ $cert.Key | b64enc }} 36 | {{- end -}} 37 | -------------------------------------------------------------------------------- /roles/edgeruntime/files/kubeedge/cloudcore/templates/rbac-edgeservice.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | {{- with .Values.edgeService.labels }} 5 | labels: {{- toYaml . | nindent 4 }} 6 | {{- end }} 7 | name: edgeservice-sa 8 | namespace: {{ .Release.Namespace }} 9 | 10 | --- 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | kind: ClusterRoleBinding 13 | metadata: 14 | name: edgeservice 15 | {{- with .Values.edgeService.labels }} 16 | labels: {{- toYaml . | nindent 4 }} 17 | {{- end }} 18 | roleRef: 19 | apiGroup: rbac.authorization.k8s.io 20 | kind: ClusterRole 21 | name: edgeservice 22 | subjects: 23 | - kind: ServiceAccount 24 | name: edgeservice-sa 25 | namespace: {{ .Release.Namespace }} 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | kind: ClusterRole 29 | metadata: 30 | name: edgeservice 31 | {{- with .Values.edgeService.labels }} 32 | labels: {{- toYaml . | nindent 4 }} 33 | {{- end }} 34 | rules: 35 | - apiGroups: [""] 36 | resources: ["nodes","configmaps","secrets","services"] 37 | verbs: ["get", "list", "watch"] -------------------------------------------------------------------------------- /roles/edgeruntime/files/kubeedge/cloudcore/templates/rbac_iptablesmanager.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (.Values.iptablesManager.enable) (eq .Values.iptablesManager.mode "external") }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | {{- with .Values.iptablesManager.labels }} 6 | labels: {{- toYaml . | nindent 4 }} 7 | {{- end }} 8 | name: iptables-manager-sa 9 | namespace: {{ .Release.Namespace }} 10 | 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRoleBinding 14 | metadata: 15 | name: iptables-manager 16 | {{- with .Values.iptablesManager.labels }} 17 | labels: {{- toYaml . | nindent 4 }} 18 | {{- end }} 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: ClusterRole 22 | name: iptables-manager 23 | subjects: 24 | - kind: ServiceAccount 25 | name: iptables-manager-sa 26 | namespace: {{ .Release.Namespace }} 27 | 28 | --- 29 | apiVersion: rbac.authorization.k8s.io/v1 30 | kind: ClusterRole 31 | metadata: 32 | name: iptables-manager 33 | {{- with .Values.iptablesManager.labels }} 34 | labels: {{- toYaml . | nindent 4 }} 35 | {{- end }} 36 | rules: 37 | - apiGroups: [""] 38 | resources: ["configmaps"] 39 | verbs: ["get", "list", "watch"] 40 | {{- end }} -------------------------------------------------------------------------------- /roles/edgeruntime/files/kubeedge/cloudcore/templates/secret_cloudcore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: cloudcore 5 | {{- with .Values.cloudCore.labels }} 6 | labels: {{- toYaml . | nindent 4 }} 7 | {{- end }} 8 | annotations: 9 | "helm.sh/hook": "pre-install" 10 | "helm.sh/hook-delete-policy": "before-hook-creation" 11 | data: 12 | {{ ( include "cloudcore.gen-certs" . ) | indent 2 }} -------------------------------------------------------------------------------- /roles/edgeruntime/files/kubeedge/cloudcore/templates/service_edgeservice.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | {{- with .Values.edgeService.labels }} 5 | labels: {{- toYaml . | nindent 4 }} 6 | {{- end }} 7 | name: edgeservice 8 | spec: 9 | ports: 10 | - name: http 11 | port: 80 12 | protocol: TCP 13 | targetPort: 8081 14 | selector: 15 | {{- with .Values.edgeService.labels }} 16 | {{- toYaml . | nindent 4 }} 17 | {{- end }} -------------------------------------------------------------------------------- /roles/edgeruntime/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: kubeedge.yaml 3 | when: 4 | - edgeruntime.kubeedge is defined 5 | - edgeruntime.kubeedge.enabled -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | description: A Helm chart for Gatekeeper 3 | name: gatekeeper 4 | keywords: 5 | - open policy agent 6 | version: 3.5.2 7 | home: https://github.com/open-policy-agent/gatekeeper 8 | sources: 9 | - https://github.com/open-policy-agent/gatekeeper.git 10 | appVersion: v3.5.2 11 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "gatekeeper.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "gatekeeper.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "gatekeeper.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Adds additional pod labels to the common ones 36 | */}} 37 | {{- define "gatekeeper.podLabels" -}} 38 | {{- if .Values.podLabels }} 39 | {{- toYaml .Values.podLabels | nindent 8 }} 40 | {{- end }} 41 | {{- end -}} -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/gatekeeper-admin-podsecuritypolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | annotations: 5 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' 6 | labels: 7 | app: '{{ template "gatekeeper.name" . }}' 8 | chart: '{{ template "gatekeeper.name" . }}' 9 | gatekeeper.sh/system: "yes" 10 | heritage: '{{ .Release.Service }}' 11 | release: '{{ .Release.Name }}' 12 | name: gatekeeper-admin 13 | spec: 14 | allowPrivilegeEscalation: false 15 | fsGroup: 16 | ranges: 17 | - max: 65535 18 | min: 1 19 | rule: MustRunAs 20 | requiredDropCapabilities: 21 | - ALL 22 | runAsUser: 23 | rule: MustRunAsNonRoot 24 | seLinux: 25 | rule: RunAsAny 26 | supplementalGroups: 27 | ranges: 28 | - max: 65535 29 | min: 1 30 | rule: MustRunAs 31 | volumes: 32 | - configMap 33 | - projected 34 | - secret 35 | - downwardAPI 36 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/gatekeeper-admin-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app: '{{ template "gatekeeper.name" . }}' 6 | chart: '{{ template "gatekeeper.name" . }}' 7 | gatekeeper.sh/system: "yes" 8 | heritage: '{{ .Release.Service }}' 9 | release: '{{ .Release.Name }}' 10 | name: gatekeeper-admin 11 | namespace: '{{ .Release.Namespace }}' 12 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/gatekeeper-controller-manager-poddisruptionbudget.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | labels: 5 | app: '{{ template "gatekeeper.name" . }}' 6 | chart: '{{ template "gatekeeper.name" . }}' 7 | gatekeeper.sh/system: "yes" 8 | heritage: '{{ .Release.Service }}' 9 | release: '{{ .Release.Name }}' 10 | name: gatekeeper-controller-manager 11 | namespace: '{{ .Release.Namespace }}' 12 | spec: 13 | minAvailable: {{ .Values.pdb.controllerManager.minAvailable }} 14 | selector: 15 | matchLabels: 16 | app: '{{ template "gatekeeper.name" . }}' 17 | chart: '{{ template "gatekeeper.name" . }}' 18 | control-plane: controller-manager 19 | gatekeeper.sh/operation: webhook 20 | gatekeeper.sh/system: "yes" 21 | heritage: '{{ .Release.Service }}' 22 | release: '{{ .Release.Name }}' 23 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/gatekeeper-critical-pods-resourcequota.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ResourceQuota 3 | metadata: 4 | labels: 5 | app: '{{ template "gatekeeper.name" . }}' 6 | chart: '{{ template "gatekeeper.name" . }}' 7 | gatekeeper.sh/system: "yes" 8 | heritage: '{{ .Release.Service }}' 9 | release: '{{ .Release.Name }}' 10 | name: gatekeeper-critical-pods 11 | namespace: '{{ .Release.Namespace }}' 12 | spec: 13 | hard: 14 | pods: {{ .Values.podCountLimit }} 15 | scopeSelector: 16 | matchExpressions: 17 | - operator: In 18 | scopeName: PriorityClass 19 | values: 20 | - {{ .Values.controllerManager.priorityClassName }} 21 | - {{ .Values.audit.priorityClassName }} 22 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/gatekeeper-manager-role-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: '{{ template "gatekeeper.name" . }}' 7 | chart: '{{ template "gatekeeper.name" . }}' 8 | gatekeeper.sh/system: "yes" 9 | heritage: '{{ .Release.Service }}' 10 | release: '{{ .Release.Name }}' 11 | name: gatekeeper-manager-role 12 | namespace: '{{ .Release.Namespace }}' 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - events 18 | verbs: 19 | - create 20 | - patch 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - secrets 25 | verbs: 26 | - create 27 | - delete 28 | - get 29 | - list 30 | - patch 31 | - update 32 | - watch 33 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app: '{{ template "gatekeeper.name" . }}' 6 | chart: '{{ template "gatekeeper.name" . }}' 7 | gatekeeper.sh/system: "yes" 8 | heritage: '{{ .Release.Service }}' 9 | release: '{{ .Release.Name }}' 10 | name: gatekeeper-manager-rolebinding 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: gatekeeper-manager-role 15 | subjects: 16 | - kind: ServiceAccount 17 | name: gatekeeper-admin 18 | namespace: '{{ .Release.Namespace }}' 19 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/gatekeeper-manager-rolebinding-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app: '{{ template "gatekeeper.name" . }}' 6 | chart: '{{ template "gatekeeper.name" . }}' 7 | gatekeeper.sh/system: "yes" 8 | heritage: '{{ .Release.Service }}' 9 | release: '{{ .Release.Name }}' 10 | name: gatekeeper-manager-rolebinding 11 | namespace: '{{ .Release.Namespace }}' 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: gatekeeper-manager-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: gatekeeper-admin 19 | namespace: '{{ .Release.Namespace }}' 20 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/gatekeeper-mutating-webhook-configuration-mutatingwebhookconfiguration.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.experimentalEnableMutation }} 2 | apiVersion: admissionregistration.k8s.io/v1 3 | kind: MutatingWebhookConfiguration 4 | metadata: 5 | creationTimestamp: null 6 | labels: 7 | app: '{{ template "gatekeeper.name" . }}' 8 | chart: '{{ template "gatekeeper.name" . }}' 9 | heritage: '{{ .Release.Service }}' 10 | release: '{{ .Release.Name }}' 11 | name: gatekeeper-mutating-webhook-configuration 12 | webhooks: 13 | - admissionReviewVersions: 14 | - v1 15 | - v1beta1 16 | clientConfig: 17 | service: 18 | name: gatekeeper-webhook-service 19 | namespace: '{{ .Release.Namespace }}' 20 | path: /v1/mutate 21 | failurePolicy: Ignore 22 | matchPolicy: Exact 23 | name: mutation.gatekeeper.sh 24 | namespaceSelector: 25 | matchExpressions: 26 | - key: admission.gatekeeper.sh/ignore 27 | operator: DoesNotExist 28 | rules: 29 | - apiGroups: 30 | - '*' 31 | apiVersions: 32 | - '*' 33 | operations: 34 | - CREATE 35 | - UPDATE 36 | resources: 37 | - '*' 38 | sideEffects: None 39 | timeoutSeconds: 3 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/gatekeeper-webhook-server-cert-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | annotations: {{- toYaml .Values.secretAnnotations | trim | nindent 4 }} 5 | labels: 6 | app: '{{ template "gatekeeper.name" . }}' 7 | chart: '{{ template "gatekeeper.name" . }}' 8 | gatekeeper.sh/system: "yes" 9 | heritage: '{{ .Release.Service }}' 10 | release: '{{ .Release.Name }}' 11 | name: gatekeeper-webhook-server-cert 12 | namespace: '{{ .Release.Namespace }}' 13 | -------------------------------------------------------------------------------- /roles/gatekeeper/files/gatekeeper/templates/gatekeeper-webhook-service-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: '{{ template "gatekeeper.name" . }}' 6 | chart: '{{ template "gatekeeper.name" . }}' 7 | gatekeeper.sh/system: "yes" 8 | heritage: '{{ .Release.Service }}' 9 | release: '{{ .Release.Name }}' 10 | name: gatekeeper-webhook-service 11 | namespace: '{{ .Release.Namespace }}' 12 | spec: 13 | {{- if .Values.service }} 14 | type: {{ .Values.service.type | default "ClusterIP" }} 15 | {{- if .Values.service.loadBalancerIP }} 16 | loadBalancerIP: {{ .Values.service.loadBalancerIP }} 17 | {{- end }} 18 | {{- end }} 19 | ports: 20 | - port: 443 21 | targetPort: 8443 22 | selector: 23 | app: '{{ template "gatekeeper.name" . }}' 24 | chart: '{{ template "gatekeeper.name" . }}' 25 | control-plane: controller-manager 26 | gatekeeper.sh/operation: webhook 27 | gatekeeper.sh/system: "yes" 28 | heritage: '{{ .Release.Service }}' 29 | release: '{{ .Release.Name }}' 30 | -------------------------------------------------------------------------------- /roles/gatekeeper/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: GateKeeper | Getting GateKeeper installation files 3 | copy: 4 | src: "gatekeeper" 5 | dest: "{{ kubesphere_dir }}/" 6 | 7 | - name: GateKeeper | Creating manifests 8 | template: 9 | src: "{{ item.file }}.j2" 10 | dest: "{{ kubesphere_dir }}/{{ item.path }}/{{ item.file }}" 11 | with_items: 12 | - { path: gatekeeper, file: custom-values-gatekeeper.yaml } 13 | 14 | - name: GateKeeper | Installing GateKeeper 15 | shell: > 16 | {{ bin_dir }}/helm upgrade --install gatekeeper 17 | {{ kubesphere_dir }}/gatekeeper 18 | --namespace gatekeeper-system 19 | --create-namespace 20 | -f {{ kubesphere_dir }}/gatekeeper/custom-values-gatekeeper.yaml 21 | register: result 22 | until: result is succeeded 23 | retries: 3 24 | 25 | - name: GateKeeper | Labeling GateKeeper namespace 26 | shell: > 27 | {{ bin_dir }}/kubectl label namespace gatekeeper-system kubesphere.io/workspace=system-workspace --overwrite 28 | 29 | - name: GateKeeper | Importing GateKeeper status 30 | shell: > 31 | {{ bin_dir }}/kubectl patch cc ks-installer 32 | --type merge 33 | -p '{"status": {"gatekeeper": {"status": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}' 34 | -n kubesphere-system 35 | register: cc_result 36 | failed_when: "cc_result.stderr and 'Warning' not in cc_result.stderr" 37 | until: cc_result is succeeded 38 | retries: 5 39 | delay: 3 40 | -------------------------------------------------------------------------------- /roles/ks-auditing/files/kube-auditing/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /roles/ks-auditing/files/kube-auditing/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: 0.2.0 3 | description: A audit tool for kubesphere 4 | name: kube-auditing 5 | version: 0.2.0 6 | -------------------------------------------------------------------------------- /roles/ks-auditing/files/kube-auditing/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Common labels 3 | */}} 4 | {{- define "operator.labels" -}} 5 | app.kubernetes.io/name: {{ "kube-auditing-operator" }} 6 | helm.sh/chart: {{ .Chart.Version }} 7 | app.kubernetes.io/instance: {{ .Release.Name }} 8 | {{- if .Chart.AppVersion }} 9 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 10 | {{- end }} 11 | app.kubernetes.io/managed-by: {{ .Release.Service }} 12 | {{- end -}} 13 | 14 | -------------------------------------------------------------------------------- /roles/ks-auditing/files/kube-auditing/templates/webhook.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: auditing.kubesphere.io/v1alpha1 2 | kind: Webhook 3 | metadata: 4 | name: kube-auditing-webhook 5 | spec: 6 | {{- if .Values.webhook.replicas }} 7 | replicas: {{ .Values.webhook.replicas }} 8 | {{- end}} 9 | image: "{{ .Values.webhook.image.repository }}:{{ .Values.webhook.image.tag }}" 10 | imagePullPolicy: {{ .Values.webhook.image.pullPolicy }} 11 | imagePullSecrets: 12 | {{- toYaml .Values.webhook.imagePullSecrets | nindent 4 }} 13 | args: 14 | {{- toYaml .Values.webhook.args | nindent 4 }} 15 | nodeSelector: 16 | {{- toYaml .Values.webhook.nodeSelector | nindent 4 }} 17 | affinity: 18 | {{- toYaml .Values.webhook.affinity | nindent 4 }} 19 | tolerations: 20 | {{- toYaml .Values.webhook.tolerations | nindent 4 }} 21 | resources: 22 | {{- toYaml .Values.webhook.resources | nindent 4 }} 23 | receivers: 24 | {{- toYaml .Values.webhook.receivers | nindent 4 }} 25 | archivingPriority: {{ .Values.webhook.archivingPriority }} 26 | alertingPriority: {{ .Values.webhook.alertingPriority }} 27 | auditLevel: {{ .Values.webhook.auditLevel }} 28 | k8sAuditingEnabled: {{ .Values.webhook.k8sAuditingEnabled }} 29 | useHTTPS: {{ .Values.webhook.useHTTPS }} 30 | {{- if .Values.webhook.port }} 31 | port: {{ .Values.webhook.port }} 32 | {{- end }} 33 | 34 | -------------------------------------------------------------------------------- /roles/ks-auditing/templates/custom-filter-auditing.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: Filter 3 | metadata: 4 | labels: 5 | logging.kubesphere.io/enabled: 'true' 6 | logging.kubesphere.io/component: "auditing" 7 | name: filter-auditing 8 | namespace: kubesphere-logging-system 9 | spec: 10 | filters: 11 | - parser: 12 | {% if (logging_container_runtime== 'containerd') or (logging_container_runtime== 'crio') %} 13 | keyName: message 14 | {% else %} 15 | keyName: log 16 | {% endif %} 17 | parser: json 18 | - modify: 19 | conditions: 20 | - keyDoesNotExist: 21 | AuditID: '' 22 | rules: 23 | - add: 24 | ignore: 'true' 25 | - grep: 26 | exclude: ignore true 27 | match: kube_auditing 28 | 29 | -------------------------------------------------------------------------------- /roles/ks-auditing/templates/custom-input-auditing.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: Input 3 | metadata: 4 | labels: 5 | logging.kubesphere.io/enabled: 'true' 6 | logging.kubesphere.io/component: "auditing" 7 | name: tail-auditing 8 | namespace: kubesphere-logging-system 9 | spec: 10 | tail: 11 | db: /fluent-bit/tail/pos-auditing.db 12 | dbSync: Normal 13 | memBufLimit: 100MB 14 | {% if (logging_container_runtime== 'containerd') or (logging_container_runtime== 'crio') %} 15 | parser: cri 16 | {% else %} 17 | parser: docker 18 | {% endif %} 19 | path: /var/log/containers/kube-auditing-webhook*_kubesphere-logging-system_kube-auditing-webhook*.log 20 | refreshIntervalSeconds: 10 21 | skipLongLines: true 22 | tag: kube_auditing 23 | 24 | -------------------------------------------------------------------------------- /roles/ks-core/config/files/alerting-migration/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/ks-core/config/files/alerting-migration/.gitkeep -------------------------------------------------------------------------------- /roles/ks-core/config/files/openpitrix/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/ks-core/config/files/openpitrix/.gitkeep -------------------------------------------------------------------------------- /roles/ks-core/config/templates/ks-alerting-migration.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: ks-alerting-migration 5 | namespace: kubesphere-system 6 | labels: 7 | app: ks-alerting-migration 8 | job: ks-alerting-migration 9 | version: v3.1.0 10 | spec: 11 | backoffLimit: 6 12 | completions: 1 13 | parallelism: 1 14 | template: 15 | metadata: 16 | labels: 17 | app: ks-alerting-migration 18 | job: ks-alerting-migration 19 | version: v3.1.0 20 | name: ks-alerting-migration 21 | spec: 22 | initContainers: 23 | - name: wait-mysql 24 | image: {{ alpine_repo }}:{{ alpine_tag }} 25 | imagePullPolicy: IfNotPresent 26 | command: ['sh', '-c', 'until nc -z mysql.kubesphere-system.svc 3306; do echo "waiting for mysql"; sleep 2; done;'] 27 | - name: wait-apiserver 28 | image: {{ alpine_repo }}:{{ alpine_tag }} 29 | imagePullPolicy: IfNotPresent 30 | command: ['sh', '-c', 'until nc -z ks-apiserver.kubesphere-system.svc 80; do echo "waiting for apiserver"; sleep 2; done;'] 31 | containers: 32 | - image: {{ ks_alerting_migration_repo }}:{{ ks_alerting_migration_tag }} 33 | imagePullPolicy: IfNotPresent 34 | name: ks-alerting-migration 35 | serviceAccount: kubesphere 36 | serviceAccountName: kubesphere 37 | restartPolicy: OnFailure -------------------------------------------------------------------------------- /roles/ks-core/config/templates/ks-openpitrix-upgrade.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | labels: 6 | app: openpitrix 7 | component: openpitrix-upgrade-job 8 | version: v3.2.0 9 | name: openpitrix-upgrade-job 10 | namespace: kubesphere-system 11 | spec: 12 | backoffLimit: 100 13 | template: 14 | metadata: 15 | labels: 16 | app: openpitrix 17 | component: openpitrix-upgrade-job 18 | version: v3.2.0 19 | spec: 20 | serviceAccountName: kubesphere 21 | restartPolicy: OnFailure 22 | containers: 23 | - name: upgrade 24 | command: 25 | - sh 26 | - -c 27 | - upgrade && import-app import 28 | image: {{ openpitrix_job_repo }}:{{ openpitrix_job_tag }} 29 | imagePullPolicy: IfNotPresent 30 | volumeMounts: 31 | - mountPath: /etc/kubesphere 32 | name: kubesphere-config 33 | volumes: 34 | - name: kubesphere-config 35 | configMap: 36 | name: kubesphere-config -------------------------------------------------------------------------------- /roles/ks-core/init-token/files/jwt-script/jwt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | secret=$1 4 | 5 | # Static header fields. 6 | header='{ 7 | "alg": "HS256", 8 | "typ": "JWT" 9 | }' 10 | 11 | # Use jq to set the dynamic `iat` and `exp` 12 | # fields on the header using the current time. 13 | # `iat` is set to now, and `exp` is now + 1 second. 14 | # header=$( 15 | # echo "${header}" | jq --arg time_str "$(date +%s)" \ 16 | # ' 17 | # ($time_str | tonumber) as $time_num 18 | # | .iat=$time_num 19 | # | .exp=($time_num + 1) 20 | # ' 21 | # ) 22 | # openpitrix {"sub": "system","role": "global_admin","iat": 1516239022,"exp": 1816239022} 23 | # ks-account {"email": "admin@kubesphere.io","exp": 1816239022,"username": "admin"} 24 | payload=$2 25 | 26 | base64_encode() 27 | { 28 | declare input=${1:-$( /dev/null 2>&1 43 | 44 | if [ $? -ne 0 ];then 45 | echo "kubeconfig-admin not exist, will be generated..." 46 | generate_configmap | kubectl create -f - 47 | fi 48 | } 49 | 50 | generate_kubeconfig_admin -------------------------------------------------------------------------------- /roles/ks-core/prepare/templates/alerting-db-init-job.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: alerting-db-init-job 5 | namespace: kubesphere-alerting-system 6 | labels: 7 | app: alerting 8 | job: alerting-db-init 9 | version: v0.1.0 10 | spec: 11 | backoffLimit: 6 12 | completions: 1 13 | parallelism: 1 14 | template: 15 | metadata: 16 | labels: 17 | app: alerting 18 | job: alerting-db-init 19 | version: v0.1.0 20 | name: alerting-db-init 21 | spec: 22 | initContainers: 23 | - name: wait-mysql 24 | image: {{ alpine_repo }}:{{ alpine_tag }} 25 | imagePullPolicy: IfNotPresent 26 | command: ['sh', '-c', 'until nc -z openpitrix-db.openpitrix-system.svc 3306; do echo "waiting for mysql"; sleep 2; done;'] 27 | containers: 28 | - command: ["/flyway/sql/ddl/ddl_init.sh", "-hopenpitrix-db.openpitrix-system.svc", "-uroot", "--connect-timeout=5"] 29 | env: 30 | - name: PASSWORD 31 | value: "password" 32 | image: {{ alerting_dbinit_repo }}:{{ alerting_dbinit_tag }} 33 | imagePullPolicy: {{ ks_image_pull_policy }} 34 | name: alerting-db-init 35 | resources: {} 36 | terminationMessagePath: /dev/termination-log 37 | terminationMessagePolicy: File 38 | dnsPolicy: ClusterFirst 39 | restartPolicy: OnFailure 40 | schedulerName: default-scheduler 41 | securityContext: {} 42 | terminationGracePeriodSeconds: 30 43 | -------------------------------------------------------------------------------- /roles/ks-core/prepare/templates/custom-values-istio-init.yaml.j2: -------------------------------------------------------------------------------- 1 | global: 2 | # Default hub for Istio images. 3 | # Releases are published to docker hub under 'istio' project. 4 | # Daily builds from prow are on gcr.io, and nightly builds from circle on docker.io/istionightly 5 | hub: {{ istio_hub }} 6 | image: {{ istio_kubectl_image }} 7 | # Default tag for Istio images. 8 | tag: {{ istio_tag }} 9 | 10 | # imagePullPolicy is applied to istio control plane components. 11 | # local tests require IfNotPresent, to avoid uploading to dockerhub. 12 | # TODO: Switch to Always as default, and override in the local tests. 13 | imagePullPolicy: IfNotPresent 14 | 15 | certmanager: 16 | enabled: false 17 | -------------------------------------------------------------------------------- /roles/ks-devops/OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - chilianyi 3 | - yudong2015 4 | reviewers: 5 | - chilianyi 6 | - yudong2015 7 | -------------------------------------------------------------------------------- /roles/ks-devops/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | devops: 3 | jenkinsVolumeSize: "16Gi" 4 | jenkinsMemoryLim: "{{ jenkins_memory_lim |default('4Gi') }}" 5 | jenkinsMemoryReq: "{{ jenkins_memory_req |default('4Gi') }}" 6 | jenkinsCpuReq: "{{ jenkins_cpu_req |default('0.5') }}" 7 | jenkinsCpuLim: "{{ jenkins_cpu_lim |default('1') }}" 8 | 9 | 10 | EMAIL_SMTP_HOST: mail.example.com 11 | EMAIL_FROM_ADDR: admin@example.com 12 | EMAIL_FROM_NAME: KubeSphere 13 | EMAIL_USE_SSL: false 14 | EMAIL_SMTP_PORT: 465 15 | EMAIL_FROM_PASS: P@ssw0rd 16 | 17 | sonar_account_user: ks-devops 18 | 19 | harbor_domain: harbor.devops.kubesphere.local 20 | 21 | 22 | sonarqube_enable: true 23 | 24 | sonarqube_user_name: sonarqube 25 | sonarqube_user_passwd: sonarqube 26 | -------------------------------------------------------------------------------- /roles/ks-devops/files/ks-devops/charts/README.md: -------------------------------------------------------------------------------- 1 | You can get the helm chart package file of `ks-devops` from https://github.com/kubesphere-sigs/ks-devops-helm-chart/releases 2 | 3 | Get the helm chart of Argo CD from https://github.com/argoproj/argo-helm/releases 4 | -------------------------------------------------------------------------------- /roles/ks-devops/files/ks-devops/charts/argo-cd-4.4.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/ks-devops/files/ks-devops/charts/argo-cd-4.4.0.tgz -------------------------------------------------------------------------------- /roles/ks-devops/files/ks-devops/charts/ks-devops-0.2.3.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/ks-devops/files/ks-devops/charts/ks-devops-0.2.3.tgz -------------------------------------------------------------------------------- /roles/ks-devops/tasks/cleanup.yaml: -------------------------------------------------------------------------------- 1 | - name: ks-devops | Deleting service monitor of devops 2 | shell: | 3 | # Those resources are going to be install by ks-devops helm chart 4 | kubectl -n kubesphere-monitoring-system delete --ignore-not-found=true ServiceMonitor s2i-operator 5 | kubectl -n kubesphere-monitoring-system delete --ignore-not-found=true ServiceMonitor ks-jenkins 6 | kubectl -n kubesphere-monitoring-system delete --ignore-not-found=true PrometheusRule prometheus-devops-rules 7 | kubectl delete --ignore-not-found=true namespace kubesphere-devops-worker 8 | failed_when: false 9 | -------------------------------------------------------------------------------- /roles/ks-devops/tasks/uninstall_update_center.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ks-devops | Uninstall update center 3 | shell: > 4 | {{ bin_dir }}/helm -n kubesphere-devops-system uninstall uc 5 | -------------------------------------------------------------------------------- /roles/ks-devops/templates/argo-cd-values.yaml.j2: -------------------------------------------------------------------------------- 1 | global: 2 | image: 3 | repository: "{{ argocd_repo }}" 4 | 5 | dex: 6 | image: 7 | repository: "{{ argocd_dex_repo }}" 8 | readinessProbe: 9 | timeoutSeconds: 10 10 | livenessProbe: 11 | timeoutSeconds: 10 12 | 13 | controller: 14 | readinessProbe: 15 | timeoutSeconds: 10 16 | livenessProbe: 17 | timeoutSeconds: 10 18 | 19 | applicationSet: 20 | image: 21 | repository: "{{ argocd_applicationset_repo }}" 22 | 23 | redis: 24 | image: 25 | repository: "{{ argocd_redis_repo }}" 26 | 27 | repoServer: 28 | readinessProbe: 29 | timeoutSeconds: 10 30 | livenessProbe: 31 | timeoutSeconds: 10 32 | 33 | server: 34 | readinessProbe: 35 | timeoutSeconds: 10 36 | livenessProbe: 37 | timeoutSeconds: 10 38 | -------------------------------------------------------------------------------- /roles/ks-events/files/kube-events/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /roles/ks-events/files/kube-events/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kube-events 3 | description: provides easy kube-events definitions for event exporting, filtering and alerting. 4 | icon: 5 | type: application 6 | version: 0.4.0 7 | appVersion: 0.6.0 8 | source: 9 | - https://github.com/kubesphere/kube-events 10 | keywords: 11 | - event 12 | - operator 13 | -------------------------------------------------------------------------------- /roles/ks-events/files/kube-events/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | Kube-events has been installed. Check its status by running: 2 | kubectl --namespace {{ $.Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ $.Release.Name }}" -------------------------------------------------------------------------------- /roles/ks-events/files/kube-events/templates/operator/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.operator.enabled .Values.operator.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "kube-events.operator.serviceAccountName" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{ include "kube-events.labels" . | indent 4 }} 9 | {{- end }} -------------------------------------------------------------------------------- /roles/ks-events/templates/custom-filter-events.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: Filter 3 | metadata: 4 | name: filter-events 5 | namespace: kubesphere-logging-system 6 | labels: 7 | logging.kubesphere.io/enabled: "true" 8 | logging.kubesphere.io/component: "events" 9 | spec: 10 | match: kube_events 11 | filters: 12 | - parser: 13 | {% if (logging_container_runtime== 'containerd') or (logging_container_runtime== 'crio') %} 14 | keyName: message 15 | {% else %} 16 | keyName: log 17 | {% endif %} 18 | parser: json -------------------------------------------------------------------------------- /roles/ks-events/templates/custom-input-events.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: Input 3 | metadata: 4 | name: tail-events 5 | namespace: kubesphere-logging-system 6 | labels: 7 | logging.kubesphere.io/enabled: "true" 8 | logging.kubesphere.io/component: "events" 9 | spec: 10 | tail: 11 | tag: kube_events 12 | path: /var/log/containers/*_kubesphere-logging-system_events-exporter*.log 13 | {% if (logging_container_runtime== 'containerd') or (logging_container_runtime== 'crio') %} 14 | parser: cri 15 | {% else %} 16 | parser: docker 17 | {% endif %} 18 | refreshIntervalSeconds: 10 19 | memBufLimit: 100MB 20 | skipLongLines: true 21 | db: /fluent-bit/tail/pos-events.db 22 | dbSync: Normal 23 | -------------------------------------------------------------------------------- /roles/ks-istio/files/istio/clusterroles.yaml: -------------------------------------------------------------------------------- 1 | - apiGroups: 2 | - security.istio.io 3 | resources: 4 | - '*' 5 | verbs: 6 | - '*' 7 | - apiGroups: 8 | - monitoring.kiali.io 9 | resources: 10 | - '*' 11 | verbs: 12 | - '*' 13 | - apiGroups: 14 | - networking.k8s.io 15 | resources: 16 | - '*' 17 | verbs: 18 | - '*' 19 | - apiGroups: 20 | - kiali.io 21 | resources: 22 | - '*' 23 | verbs: 24 | - '*' 25 | -------------------------------------------------------------------------------- /roles/ks-istio/files/istio/istio-1.14.6-linux-amd64.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/ks-istio/files/istio/istio-1.14.6-linux-amd64.tar.gz -------------------------------------------------------------------------------- /roles/ks-istio/files/istio/istio-1.14.6-linux-arm64.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/ks-istio/files/istio/istio-1.14.6-linux-arm64.tar.gz -------------------------------------------------------------------------------- /roles/ks-istio/files/istio/readme.md: -------------------------------------------------------------------------------- 1 | # upgrading to Istio 1.14.6 2 | 3 | With KubeSphere v3.4 release, Istio v1.14.6 will be installed when ServiceMesh is enabled. However, Istio v1.11.1 may still run in the cluster when upgrading from an older release. You have to rollout all your existing deployments before the Istio v1.11.1 be uninstalled. 4 | 5 | 6 | # install istio-1.6.10 7 | 8 | ```bash 9 | ./istio-1.6.10/bin/istioctl install --set hub=istio --set tag=1.6.10 --set addonComponents.prometheus.enabled=false --set values.global.jwtPolicy=first-party-jwt --set values.global.proxy.autoInject=disabled --set values.global.tracer.zipkin.address="jaeger-collector.istio-system.svc:9411" --set values.sidecarInjectorWebhook.enableNamespacesByDefault=true --set values.global.imagePullPolicy=IfNotPresent --set values.global.controlPlaneSecurityEnabled=false --set revision=1-6-10 10 | 11 | istio-1.6.10/bin/istioctl install -f istio-profile.yaml --set revision=1-6-10 12 | ``` 13 | 14 | # upgrade istio 1.8.4 15 | 16 | ``` 17 | curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.8.4 TARGET_ARCH=x86_64 sh - 18 | ./istio-1.8.4/bin/istioctl install --set hub=istio --set values.global.proxy.autoInject=disabled --set meshConfig.defaultConfig.tracing.zipkin.address="jaeger-collector.istio-system.svc:9411" --set values.sidecarInjectorWebhook.enableNamespacesByDefault=true --set values.global.imagePullPolicy=IfNotPresent --set revision=1-8-4 19 | 20 | ``` 21 | 22 | # uninstall 1.6.10 23 | 24 | ``` 25 | ./istio-1.8.4/bin/istioctl x uninstall --revision=1-6-10 26 | ``` -------------------------------------------------------------------------------- /roles/ks-istio/files/jaeger/jaeger-operator-2.29.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/ks-istio/files/jaeger/jaeger-operator-2.29.0.tgz -------------------------------------------------------------------------------- /roles/ks-istio/files/jaeger/readme.md: -------------------------------------------------------------------------------- 1 | # deploy jaeger-operator 2 | 3 | ```bash 4 | helm upgrade --install jaeger-operator jaeger-operator-2.29.0.tgz -f custom-values-jaeger.yaml -n istio-system 5 | ``` 6 | 7 | 8 | # deploy jaeger CR 9 | 10 | ```bash 11 | kubectl apply -f jaeger-production.yaml -n istio-system 12 | ``` -------------------------------------------------------------------------------- /roles/ks-istio/files/kiali/custom-values-kiali.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: "istio-system" 2 | fullnameOverride: "kiali-operator" 3 | 4 | image: 5 | repo: kubesphere/kiali-operator 6 | tag: v1.50.1 7 | pullPolicy: IfNotPresent 8 | pullSecrets: [] 9 | 10 | # Defines where the operator will look for Kial CR resources. "" means "all namespaces". 11 | watchNamespace: "istio-system" 12 | 13 | allowAdHocKialiImage: true -------------------------------------------------------------------------------- /roles/ks-istio/files/kiali/kiali-cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kiali.io/v1alpha1 2 | kind: Kiali 3 | metadata: 4 | name: kiali 5 | annotations: 6 | ansible.operator-sdk/verbosity: "1" 7 | spec: 8 | istio_namespace: istio-system 9 | auth: 10 | strategy: "token" 11 | deployment: 12 | accessible_namespaces: [ "**" ] 13 | image_name: "quay.io/kiali/kiali" 14 | image_pull_policy: "IfNotPresent" 15 | image_version: "v1.50" 16 | ingress_enabled: false 17 | namespace: "istio-system" 18 | service_type: "ClusterIP" 19 | external_services: 20 | prometheus: 21 | url: http://prometheus-k8s.kubesphere-monitoring-system:9090 22 | tracing: 23 | enabled: true 24 | url: http://jaeger-query.istio-system.svc:16686 25 | api: 26 | namespaces: 27 | exclude: 28 | - "kube-.*" 29 | - "kubesphere-.*system" 30 | - "openpitrix-system" 31 | -------------------------------------------------------------------------------- /roles/ks-istio/files/kiali/kiali-operator-1.50.1.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/ks-istio/files/kiali/kiali-operator-1.50.1.tgz -------------------------------------------------------------------------------- /roles/ks-istio/files/kiali/readme.md: -------------------------------------------------------------------------------- 1 | # deploy kiali-operator 2 | 3 | ```bash 4 | helm upgrade --install kiali-operator kiali-operator-1.50.1.tgz -n istio-system -f custom-values-kiali.yaml -n istio-system 5 | ``` 6 | 7 | # create kiali-cr 8 | 9 | ```bash 10 | kubectl apply -f kiali-cr.yaml -n istio-system 11 | ``` -------------------------------------------------------------------------------- /roles/ks-istio/tasks/kiali-install.yaml: -------------------------------------------------------------------------------- 1 | - name: servicemesh | Checking kiali-operator 2 | shell: > 3 | {{ bin_dir }}/helm list -n istio-system | grep "kiali-operator" 4 | register: kiali_check 5 | failed_when: false 6 | 7 | - name: servicemesh | Deleting kiali operator deployment 8 | shell: > 9 | {{ bin_dir }}/kubectl -n istio-system delete deployment kiali-operator 10 | when: 11 | - (kiali_check.stdout.find("deployed") != -1) and (kiali_check.stdout.find("v1.50.1") == -1) 12 | ignore_errors: True 13 | 14 | - name: servicemesh | Deploying kiali-operator 15 | shell: > 16 | {{ bin_dir }}/helm upgrade --install kiali-operator {{ kubesphere_dir }}/servicemesh/kiali/kiali-operator-1.50.1.tgz 17 | -f {{ kubesphere_dir }}/servicemesh/kiali/custom-values-kiali.yaml 18 | --namespace istio-system 19 | when: 20 | - (kiali_check.stdout.find("deployed") == -1) or (kiali_check.stdout.find("v1.50.1") == -1) 21 | 22 | - name: servicemesh | Deploying kiali-cr 23 | shell: "{{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/servicemesh/kiali/kiali-cr.yaml --namespace istio-system" 24 | register: kiali_cr_result 25 | until: kiali_cr_result is succeeded 26 | retries: 5 27 | delay: 10 28 | when: 29 | - (kiali_check.stdout.find("deployed") == -1) or (kiali_check.stdout.find("v1.50.1") == -1) 30 | -------------------------------------------------------------------------------- /roles/ks-istio/tasks/upgrade-pre.yaml: -------------------------------------------------------------------------------- 1 | 2 | 3 | - name: servicemesh | upgrade-pre | delete old sidecar injection 4 | shell: > 5 | {{ bin_dir }}/kubectl delete mutatingwebhookconfigurations istio-sidecar-injector-1-11-2 6 | ignore_errors: true 7 | 8 | - name: servicemesh | delete old validating webhook 9 | shell: > 10 | {{ bin_dir }}/kubectl delete validatingwebhookconfigurations istio-validator-1-11-2-istio-system 11 | ignore_errors: true -------------------------------------------------------------------------------- /roles/ks-istio/templates/custom-values-kiali.yaml.j2: -------------------------------------------------------------------------------- 1 | nameOverride: "istio-system" 2 | fullnameOverride: "kiali-operator" 3 | 4 | image: 5 | repo: {{ kiali_operator_repo }} 6 | tag: {{ kiali_operator_tag }} 7 | pullPolicy: IfNotPresent 8 | pullSecrets: [] 9 | 10 | # Defines where the operator will look for Kial CR resources. "" means "all namespaces". 11 | watchNamespace: "istio-system" 12 | 13 | allowAdHocKialiImage: true 14 | -------------------------------------------------------------------------------- /roles/ks-istio/templates/istio-custom-profile.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if servicemesh.istio is defined %} 2 | spec: 3 | {{ servicemesh.istio | to_nice_yaml | indent(2) }} 4 | {% endif %} 5 | -------------------------------------------------------------------------------- /roles/ks-istio/templates/kiali-cr.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: kiali.io/v1alpha1 2 | kind: Kiali 3 | metadata: 4 | name: kiali 5 | annotations: 6 | ansible.operator-sdk/verbosity: "1" 7 | spec: 8 | istio_namespace: istio-system 9 | auth: 10 | strategy: "token" 11 | deployment: 12 | accessible_namespaces: [ "**" ] 13 | image_name: "{{ kiali_repo }}" 14 | image_pull_policy: "IfNotPresent" 15 | image_version: "{{ kiali_tag }}" 16 | namespace: "istio-system" 17 | service_type: "ClusterIP" 18 | ingress_enabled: false 19 | external_services: 20 | prometheus: 21 | url: http://prometheus-k8s.kubesphere-monitoring-system:9090 22 | tracing: 23 | enabled: true 24 | url: http://jaeger-query.istio-system.svc:16686 25 | api: 26 | namespaces: 27 | exclude: 28 | - "kube-.*" 29 | - "kubesphere-.*system" 30 | - "openpitrix-system" 31 | -------------------------------------------------------------------------------- /roles/ks-logging/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/ks-logging/files/fluentbit-operator-cri/filter-containerd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: Filter 3 | metadata: 4 | name: containerd 5 | namespace: kubesphere-logging-system 6 | labels: 7 | logging.kubesphere.io/enabled: "true" 8 | logging.kubesphere.io/component: logging 9 | spec: 10 | match: kube.* 11 | filters: 12 | - lua: 13 | script: 14 | key: containerd.lua 15 | name: fluent-bit-containerd-config 16 | call: containerd 17 | timeAsTable: true 18 | -------------------------------------------------------------------------------- /roles/ks-logging/files/fluentbit-operator-cri/fluentbit-containerd-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: operator 6 | app.kubernetes.io/name: fluent-bit-containerd-config 7 | name: fluent-bit-containerd-config 8 | namespace: kubesphere-logging-system 9 | data: 10 | containerd.lua: | 11 | function containerd( tag, timestamp, record) 12 | if(record["logtag"]~=nil) 13 | then 14 | timeStr = os.date("!*t", timestamp["sec"]) 15 | t = string.format("%4d-%02d-%02dT%02d:%02d:%02d.%sZ", 16 | timeStr["year"], timeStr["month"], timeStr["day"], 17 | timeStr["hour"], timeStr["min"], timeStr["sec"], 18 | timestamp["nsec"]); 19 | record["time"] = t; 20 | record["log"] = record["message"]; 21 | record["message"] = nil; 22 | return 1, timestamp, record 23 | else 24 | return 0,timestamp,record 25 | end 26 | end 27 | 28 | 29 | -------------------------------------------------------------------------------- /roles/ks-logging/files/fluentbit-operator/filter-logging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: Filter 3 | metadata: 4 | name: kubernetes 5 | namespace: kubesphere-logging-system 6 | labels: 7 | logging.kubesphere.io/enabled: "true" 8 | logging.kubesphere.io/component: "logging" 9 | spec: 10 | match: kube.* 11 | filters: 12 | - kubernetes: 13 | kubeURL: https://kubernetes.default.svc:443 14 | kubeCAFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 15 | kubeTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 16 | labels: false 17 | annotations: false 18 | - nest: 19 | operation: lift 20 | nestedUnder: kubernetes 21 | addPrefix: kubernetes_ 22 | - modify: 23 | rules: 24 | - remove: stream 25 | - remove: kubernetes_pod_id 26 | - remove: kubernetes_host 27 | - remove: kubernetes_container_hash 28 | - nest: 29 | operation: nest 30 | wildcard: 31 | - kubernetes_* 32 | nestUnder: kubernetes 33 | removePrefix: kubernetes_ 34 | -------------------------------------------------------------------------------- /roles/ks-logging/files/fluentbit-operator/filter-systemd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: Filter 3 | metadata: 4 | name: systemd 5 | namespace: kubesphere-logging-system 6 | labels: 7 | logging.kubesphere.io/enabled: "true" 8 | logging.kubesphere.io/component: logging 9 | spec: 10 | match: service.* 11 | filters: 12 | - lua: 13 | script: 14 | key: systemd.lua 15 | name: fluent-bit-lua 16 | call: add_time 17 | timeAsTable: true 18 | -------------------------------------------------------------------------------- /roles/ks-logging/files/fluentbit-operator/input-systemd-kubelet.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: Input 3 | metadata: 4 | name: kubelet 5 | namespace: kubesphere-logging-system 6 | labels: 7 | logging.kubesphere.io/enabled: "true" 8 | logging.kubesphere.io/component: logging 9 | spec: 10 | systemd: 11 | tag: service.kubelet 12 | path: /var/log/journal 13 | db: /fluent-bit/tail/kubelet.db 14 | dbSync: Normal 15 | systemdFilter: 16 | - _SYSTEMD_UNIT=kubelet.service 17 | -------------------------------------------------------------------------------- /roles/ks-logging/files/logsidecar-injector/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /roles/ks-logging/files/logsidecar-injector/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: logsidecar-injector 3 | description: A Helm chart for logsidecar-injector 4 | icon: 5 | type: application 6 | version: 0.1.0 7 | appVersion: 1.2.0 8 | source: 9 | - https://github.com/kubesphere/logsidecar-injector 10 | keywords: 11 | - log 12 | - sidecar -------------------------------------------------------------------------------- /roles/ks-logging/files/logsidecar-injector/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | logsidecar-injector has been installed. Check its status by running: 2 | kubectl --namespace {{ $.Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ $.Release.Name }}" -------------------------------------------------------------------------------- /roles/ks-logging/files/logsidecar-injector/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | {{- $doubleLeftBrace := "{{" -}} 2 | {{- $doubleRightBrace := "}}" -}} 3 | apiVersion: v1 4 | kind: ConfigMap 5 | data: 6 | filebeat.yaml: |- 7 | filebeat.inputs: 8 | - type: log 9 | enabled: true 10 | paths: 11 | {{ $doubleLeftBrace }}range .Paths{{ $doubleRightBrace }} 12 | - {{ $doubleLeftBrace }}.{{ $doubleRightBrace }} 13 | {{ $doubleLeftBrace }}end{{ $doubleRightBrace }} 14 | output.console: 15 | codec.format: 16 | string: '%{[log.file.path]} %{[message]}' 17 | logging.level: warning 18 | sidecar.yaml: |- 19 | container: 20 | image: {{ .Values.sidecar.container.image.repository }}:{{ .Values.sidecar.container.image.tag }} 21 | imagePullPolicy: {{ .Values.sidecar.container.image.pullPolicy }} 22 | resources: 23 | {{ toYaml .Values.sidecar.container.resources | indent 8 }} 24 | initContainer: 25 | image: {{ .Values.sidecar.initContainer.image.repository }}:{{ .Values.sidecar.initContainer.image.tag }} 26 | imagePullPolicy: {{ .Values.sidecar.initContainer.image.pullPolicy }} 27 | resources: 28 | {{ toYaml .Values.sidecar.initContainer.resources | indent 8 }} 29 | metadata: 30 | name: {{ template "logsidecar-injector.fullname" . }}-configmap 31 | namespace: {{ .Release.Namespace }} 32 | labels: 33 | {{ include "logsidecar-injector.labels" . | indent 4 }} -------------------------------------------------------------------------------- /roles/ks-logging/files/logsidecar-injector/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for logsidecar-injector. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: kubespheredev/log-sidecar-injector 9 | tag: 1.1 10 | pullPolicy: IfNotPresent 11 | 12 | resources: {} 13 | # limits: 14 | # cpu: 100m 15 | # memory: 100Mi 16 | # requests: 17 | # cpu: 10m 18 | # memory: 10Mi 19 | 20 | configReloader: 21 | image: 22 | repository: jimmidyson/configmap-reload 23 | tag: v0.3.0 24 | pullPolicy: IfNotPresent 25 | resources: {} 26 | # limits: 27 | # cpu: 100m 28 | # memory: 100Mi 29 | # requests: 30 | # cpu: 10m 31 | # memory: 10Mi 32 | 33 | sidecar: 34 | container: 35 | image: 36 | repository: elastic/filebeat 37 | tag: 6.7.0 38 | pullPolicy: IfNotPresent 39 | resources: {} 40 | initContainer: 41 | image: 42 | repository: alpine 43 | tag: 3.9 44 | pullPolicy: IfNotPresent 45 | resources: {} -------------------------------------------------------------------------------- /roles/ks-logging/templates/custom-input-logging.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: Input 3 | metadata: 4 | name: tail 5 | namespace: kubesphere-logging-system 6 | labels: 7 | logging.kubesphere.io/enabled: "true" 8 | logging.kubesphere.io/component: "logging" 9 | spec: 10 | tail: 11 | tag: kube.* 12 | path: /var/log/containers/*.log 13 | {% if (logging_container_runtime== 'containerd') or (logging_container_runtime== 'crio') %} 14 | parser: cri 15 | {% else %} 16 | parser: docker 17 | {% endif %} 18 | refreshIntervalSeconds: 10 19 | memBufLimit: 100MB 20 | skipLongLines: true 21 | db: /fluent-bit/tail/pos.db 22 | dbSync: Normal 23 | excludePath: /var/log/containers/*_kubesphere-logging-system_events-exporter*.log,/var/log/containers/kube-auditing-webhook*_kubesphere-logging-system_kube-auditing-webhook*.log 24 | 25 | -------------------------------------------------------------------------------- /roles/ks-logging/templates/custom-input-systemd.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: logging.kubesphere.io/v1alpha2 2 | kind: Input 3 | metadata: 4 | name: {{ logging_container_runtime | default('docker') }} 5 | namespace: kubesphere-logging-system 6 | labels: 7 | logging.kubesphere.io/enabled: "true" 8 | logging.kubesphere.io/component: logging 9 | spec: 10 | systemd: 11 | tag: service.{{ logging_container_runtime | default('docker') }} 12 | path: /var/log/journal 13 | db: /fluent-bit/tail/docker.db 14 | dbSync: Normal 15 | systemdFilter: 16 | - _SYSTEMD_UNIT={{ logging_container_runtime | default('docker') }}.service -------------------------------------------------------------------------------- /roles/ks-migration/templates/ks-upgrade.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: ks-upgrade 6 | namespace: kubesphere-system 7 | labels: 8 | job: ks-upgrade 9 | version: v3.3.1 10 | spec: 11 | template: 12 | spec: 13 | serviceAccountName: ks-installer 14 | restartPolicy: OnFailure 15 | containers: 16 | - command: 17 | - ks-upgrade 18 | - --logtostderr 19 | - --v=4 20 | image: {{ ks_update_repo }}:{{ ks_update_tag }} 21 | imagePullPolicy: IfNotPresent 22 | name: ks-upgrade -------------------------------------------------------------------------------- /roles/ks-monitor/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | monitoring: 4 | prometheusReplica: 2 5 | prometheusMemoryRequest: 400Mi 6 | prometheusVolumeSize: 20Gi 7 | prometheus: 8 | limits: 9 | cpu: 8 10 | memory: 32Gi 11 | requests: 12 | cpu: 80m 13 | memory: 400Mi 14 | node_exporter: 15 | limits: 16 | cpu: 600m 17 | memory: 500Mi 18 | requests: 19 | cpu: 51m 20 | memory: 180Mi 21 | kube_rbac_proxy: 22 | limits: 23 | cpu: 1 24 | memory: 100Mi 25 | requests: 26 | cpu: 10m 27 | memory: 20Mi 28 | kube_state_metrics: 29 | limits: 30 | cpu: 4 31 | memory: 8Gi 32 | requests: 33 | cpu: 50m 34 | memory: 150Mi 35 | alertmanager: 36 | limits: 37 | cpu: 500m 38 | memory: 500Mi 39 | requests: 40 | cpu: 20m 41 | memory: 30Mi 42 | grafana: 43 | enable: false 44 | 45 | etcd.endpointIps: |- 46 | {% for item in groups['etcd'] -%} 47 | {{ hostvars[item]['access_ip'] | default(hostvars[item]['ip'] | default(hostvars[item]['ansible_default_ipv4']['address'])) }}{% if not loop.last %},{% endif %} 48 | {%- endfor %} 49 | 50 | etcd.port: 2379 51 | etcd.tlsEnable: true 52 | 53 | #etcd_config_dir: /etc/ssl/etcd 54 | #etcd_cert_dir: "{{ etcd_config_dir }}/ssl" 55 | #etcd_client_ca_crt: "{{ etcd_cert_dir }}/ca.pem" 56 | #etcd_client_crt: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}.pem" 57 | #etcd_client_key: "{{ etcd_cert_dir }}/node-{{ inventory_hostname }}-key.pem" 58 | 59 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/gpu-monitoring/nvidia-dcgm-exporter/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/gpu-monitoring/nvidia-dcgm-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: dcgm-exporter 3 | description: A Helm chart for DCGM exporter 4 | version: "2.4.0-rc.3" 5 | kubeVersion: ">= 1.13.0-0" 6 | appVersion: "2.4.0-rc.3" 7 | sources: 8 | - https://gitlab.com/nvidia/container-toolkit/gpu-monitoring-tools 9 | home: https://github.com/nvidia/gpu-monitoring-tools/ 10 | icon: https://assets.nvidiagrid.net/ngc/logos/DCGM.png 11 | keywords: 12 | - gpu 13 | - cuda 14 | - compute 15 | - monitoring 16 | - telemetry 17 | - tesla 18 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/gpu-monitoring/nvidia-dcgm-exporter/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if contains "NodePort" .Values.service.type }} 3 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "dcgm-exporter.fullname" . }}) 4 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 5 | echo http://$NODE_IP:$NODE_PORT/metrics 6 | {{- else if contains "LoadBalancer" .Values.service.type }} 7 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 8 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "dcgm-exporter.fullname" . }}' 9 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "dcgm-exporter.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 10 | echo http://$SERVICE_IP:{{ .Values.service.port }} 11 | {{- else if contains "ClusterIP" .Values.service.type }} 12 | export POD_NAME=$(kubectl get pods -n {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "dcgm-exporter.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 13 | kubectl -n {{ .Release.Namespace }} port-forward $POD_NAME 8080:{{ .Values.service.port }} & 14 | echo "Visit http://127.0.0.1:8080/metrics to use your application" 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/gpu-monitoring/nvidia-dcgm-exporter/templates/service.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | apiVersion: v1 16 | kind: Service 17 | metadata: 18 | name: {{ include "dcgm-exporter.fullname" . }} 19 | namespace: {{ .Release.Namespace }} 20 | labels: 21 | {{- include "dcgm-exporter.labels" . | nindent 4 }} 22 | app.kubernetes.io/component: "dcgm-exporter" 23 | {{- with .Values.service.annotations }} 24 | annotations: 25 | {{- toYaml . | nindent 4 }} 26 | {{- end }} 27 | spec: 28 | type: {{ .Values.service.type }} 29 | ports: 30 | - name: "metrics" 31 | port: {{ .Values.service.port }} 32 | targetPort: {{ .Values.service.port }} 33 | protocol: TCP 34 | selector: 35 | {{- include "dcgm-exporter.selectorLabels" . | nindent 4 }} 36 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/gpu-monitoring/nvidia-dcgm-exporter/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "dcgm-exporter.serviceAccountName" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "dcgm-exporter.labels" . | nindent 4 }} 9 | app.kubernetes.io/component: "dcgm-exporter" 10 | {{- with .Values.serviceAccount.annotations }} 11 | annotations: 12 | {{- toYaml . | nindent 4 }} 13 | {{- end }} 14 | {{- end -}} 15 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/notification-manager/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 2.3.0 3 | description: Notification Manager manages notifications in multi-tenant K8s environment. It receives alerts or notifications from different senders and then send notifications to various tenant receivers based on alerts/notifications' tenant label like "namespace". 4 | name: notification-manager 5 | version: 2.3.0 6 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/notification-manager/templates/clusterrolebindings.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: notification-manager-controller-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: notification-manager-controller-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: notification-manager-sa 12 | namespace: {{ .Release.Namespace }} 13 | 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: notification-manager-proxy-rolebinding 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: ClusterRole 22 | name: notification-manager-proxy-role 23 | subjects: 24 | - kind: ServiceAccount 25 | name: notification-manager-sa 26 | namespace: {{ .Release.Namespace }} 27 | 28 | {{- if eq .Values.kubesphere true }} 29 | --- 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | kind: ClusterRoleBinding 32 | metadata: 33 | name: notification-manager-tenant-sidecar-rolebinding 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: ClusterRole 37 | name: notification-manager-tenant-sidecar-role 38 | subjects: 39 | - kind: ServiceAccount 40 | name: notification-manager-sa 41 | namespace: {{ .Release.Namespace }} 42 | {{- end}} 43 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/notification-manager/templates/rolebindings.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: notification-manager-leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: notification-manager-leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: notification-manager-sa 12 | namespace: {{ .Release.Namespace }} 13 | 14 | 15 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/notification-manager/templates/roles.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: notification-manager-leader-election-role 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | verbs: 11 | - get 12 | - list 13 | - watch 14 | - create 15 | - update 16 | - patch 17 | - delete 18 | - apiGroups: 19 | - "" 20 | resources: 21 | - configmaps/status 22 | verbs: 23 | - get 24 | - update 25 | - patch 26 | - apiGroups: 27 | - coordination.k8s.io 28 | resources: 29 | - leases 30 | verbs: 31 | - get 32 | - list 33 | - watch 34 | - create 35 | - update 36 | - patch 37 | - delete 38 | - apiGroups: 39 | - "" 40 | resources: 41 | - events 42 | verbs: 43 | - create 44 | 45 | 46 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/notification-manager/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: notification-manager-sa 5 | 6 | 7 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/notification-manager/templates/services.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: notification-manager-controller-metrics 7 | spec: 8 | ports: 9 | - name: https 10 | port: 8443 11 | protocol: TCP 12 | targetPort: https 13 | selector: 14 | control-plane: controller-manager 15 | sessionAffinity: None 16 | type: ClusterIP 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: notification-manager-webhook 22 | spec: 23 | ports: 24 | - port: 443 25 | targetPort: 9443 26 | selector: 27 | control-plane: controller-manager 28 | 29 | 30 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/notification-manager/templates/zh-cn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | zh-cn: | 4 | - name: zh-cn 5 | dictionary: 6 | alert: "告警" 7 | alerts: "告警" 8 | firing: "触发中" 9 | resolved: "已解决" 10 | alertname: "告警名称" 11 | alerttype: "告警类型" 12 | alerttime: "告警时间" 13 | cluster: "集群" 14 | namespace: "项目" 15 | severity: "告警级别" 16 | container: "容器" 17 | pod: "容器组" 18 | service: "服务" 19 | deployment: "部署" 20 | job: "任务" 21 | daemonset: "守护进程集" 22 | statefulset: "有状态副本集" 23 | instance: "实例" 24 | resource: "资源" 25 | user: "用户" 26 | verb: "操作" 27 | group: "用户组" 28 | requestReceivedTimestamp: "请求接收时间" 29 | role: "角色" 30 | host_ip: "主机IP" 31 | node: "节点" 32 | rule_id: "告警规则" 33 | owner_kind: "目标类型" 34 | workload: "工作负载" 35 | kind: ConfigMap 36 | metadata: 37 | name: zh-cn 38 | namespace: kubesphere-monitoring-system 39 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/alertmanager/alertmanager-podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.23.0 10 | name: alertmanager-main 11 | namespace: kubesphere-monitoring-system 12 | spec: 13 | maxUnavailable: 1 14 | selector: 15 | matchLabels: 16 | app.kubernetes.io/component: alert-router 17 | app.kubernetes.io/instance: main 18 | app.kubernetes.io/name: alertmanager 19 | app.kubernetes.io/part-of: kube-prometheus 20 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/alertmanager/alertmanager-prometheusRule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.23.0 10 | prometheus: k8s 11 | role: alert-rules 12 | name: alertmanager-main-rules 13 | namespace: kubesphere-monitoring-system 14 | spec: 15 | groups: [] 16 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/alertmanager/alertmanager-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.23.0 10 | name: alertmanager-main 11 | namespace: kubesphere-monitoring-system 12 | spec: 13 | ports: 14 | - name: web 15 | port: 9093 16 | targetPort: web 17 | - name: reloader-web 18 | port: 8080 19 | targetPort: reloader-web 20 | selector: 21 | app.kubernetes.io/component: alert-router 22 | app.kubernetes.io/instance: main 23 | app.kubernetes.io/name: alertmanager 24 | app.kubernetes.io/part-of: kube-prometheus 25 | sessionAffinity: ClientIP 26 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/alertmanager/alertmanager-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.23.0 10 | name: alertmanager-main 11 | namespace: kubesphere-monitoring-system 12 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/alertmanager/alertmanager-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: alert-router 6 | app.kubernetes.io/instance: main 7 | app.kubernetes.io/name: alertmanager 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/vendor: kubesphere 10 | app.kubernetes.io/version: 0.23.0 11 | name: alertmanager-main 12 | namespace: kubesphere-monitoring-system 13 | spec: 14 | endpoints: 15 | - interval: 1m 16 | port: web 17 | - interval: 1m 18 | port: reloader-web 19 | selector: 20 | matchLabels: 21 | app.kubernetes.io/component: alert-router 22 | app.kubernetes.io/instance: main 23 | app.kubernetes.io/name: alertmanager 24 | app.kubernetes.io/part-of: kube-prometheus 25 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/etcd/prometheus-endpointsEtcd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Endpoints 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: etcd 6 | name: etcd 7 | namespace: kube-system 8 | subsets: 9 | - addresses: [] 10 | ports: 11 | - name: metrics 12 | port: 2379 13 | protocol: TCP 14 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/etcd/prometheus-secretEtcdCerts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | etcd-client-ca.crt: "" 4 | etcd-client.crt: "" 5 | etcd-client.key: "" 6 | kind: Secret 7 | metadata: 8 | name: kube-etcd-client-certs 9 | namespace: kubesphere-monitoring-system 10 | type: Opaque 11 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/etcd/prometheus-serviceEtcd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: etcd 6 | name: etcd 7 | namespace: kube-system 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: metrics 12 | port: 2379 13 | targetPort: 2379 14 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/etcd/prometheus-serviceMonitorEtcd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: etcd 6 | app.kubernetes.io/vendor: kubesphere 7 | name: etcd 8 | namespace: kubesphere-monitoring-system 9 | spec: 10 | endpoints: 11 | - interval: 1m 12 | port: metrics 13 | scheme: https 14 | tlsConfig: 15 | caFile: /etc/prometheus/secrets/kube-etcd-client-certs/etcd-client-ca.crt 16 | certFile: /etc/prometheus/secrets/kube-etcd-client-certs/etcd-client.crt 17 | keyFile: /etc/prometheus/secrets/kube-etcd-client-certs/etcd-client.key 18 | jobLabel: app.kubernetes.io/name 19 | selector: 20 | matchLabels: 21 | app.kubernetes.io/name: etcd 22 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/grafana/grafana-dashboardDatasources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: grafana 6 | app.kubernetes.io/name: grafana 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 8.3.3 9 | name: grafana-datasources 10 | namespace: kubesphere-monitoring-system 11 | stringData: 12 | datasources.yaml: |- 13 | { 14 | "apiVersion": 1, 15 | "datasources": [ 16 | { 17 | "access": "proxy", 18 | "editable": false, 19 | "name": "prometheus", 20 | "orgId": 1, 21 | "type": "prometheus", 22 | "url": "http://prometheus-k8s.kubesphere-monitoring-system.svc:9090", 23 | "version": 1 24 | } 25 | ] 26 | } 27 | type: Opaque 28 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/grafana/grafana-dashboardSources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | dashboards.yaml: |- 4 | { 5 | "apiVersion": 1, 6 | "providers": [ 7 | { 8 | "folder": "Default", 9 | "folderUid": "", 10 | "name": "0", 11 | "options": { 12 | "path": "/grafana-dashboard-definitions/0" 13 | }, 14 | "orgId": 1, 15 | "type": "file" 16 | } 17 | ] 18 | } 19 | kind: ConfigMap 20 | metadata: 21 | labels: 22 | app.kubernetes.io/component: grafana 23 | app.kubernetes.io/name: grafana 24 | app.kubernetes.io/part-of: kube-prometheus 25 | app.kubernetes.io/version: 8.3.3 26 | name: grafana-dashboards 27 | namespace: kubesphere-monitoring-system 28 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/grafana/grafana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: grafana 6 | app.kubernetes.io/name: grafana 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 8.3.3 9 | name: grafana 10 | namespace: kubesphere-monitoring-system 11 | spec: 12 | ports: 13 | - name: http 14 | port: 3000 15 | targetPort: http 16 | selector: 17 | app.kubernetes.io/component: grafana 18 | app.kubernetes.io/name: grafana 19 | app.kubernetes.io/part-of: kube-prometheus 20 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/grafana/grafana-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: grafana 6 | app.kubernetes.io/name: grafana 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 8.3.3 9 | name: grafana 10 | namespace: kubesphere-monitoring-system 11 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/grafana/grafana-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: grafana 6 | app.kubernetes.io/name: grafana 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/vendor: kubesphere 9 | app.kubernetes.io/version: 8.3.3 10 | name: grafana 11 | namespace: kubesphere-monitoring-system 12 | spec: 13 | endpoints: 14 | - interval: 1m 15 | port: http 16 | selector: 17 | matchLabels: 18 | app.kubernetes.io/name: grafana 19 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/grafana/grafana-storage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: grafana-storage 5 | namespace: kubesphere-monitoring-system 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 50Mi 12 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kube-prometheus/kube-prometheus-prometheusRule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-prometheus 7 | app.kubernetes.io/part-of: kube-prometheus 8 | prometheus: k8s 9 | role: alert-rules 10 | name: kube-prometheus-rules 11 | namespace: kubesphere-monitoring-system 12 | spec: 13 | groups: [] 14 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kube-state-metrics/kube-state-metrics-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.6.0 9 | name: kubesphere-kube-state-metrics 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: kubesphere-kube-state-metrics 14 | subjects: 15 | - kind: ServiceAccount 16 | name: kube-state-metrics 17 | namespace: kubesphere-monitoring-system 18 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kube-state-metrics/kube-state-metrics-prometheusRule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.6.0 9 | prometheus: k8s 10 | role: alert-rules 11 | name: kube-state-metrics-rules 12 | namespace: kubesphere-monitoring-system 13 | spec: 14 | groups: [] 15 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kube-state-metrics/kube-state-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.6.0 9 | name: kube-state-metrics 10 | namespace: kubesphere-monitoring-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: https-main 15 | port: 8443 16 | targetPort: https-main 17 | - name: https-self 18 | port: 9443 19 | targetPort: https-self 20 | selector: 21 | app.kubernetes.io/component: exporter 22 | app.kubernetes.io/name: kube-state-metrics 23 | app.kubernetes.io/part-of: kube-prometheus 24 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kube-state-metrics/kube-state-metrics-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: kube-state-metrics 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 2.6.0 9 | name: kube-state-metrics 10 | namespace: kubesphere-monitoring-system 11 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kubernetes/kubernetes-serviceKubeControllerManager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-controller-manager 6 | name: kube-controller-manager-svc 7 | namespace: kube-system 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https-metrics 12 | port: 10257 13 | targetPort: 10257 14 | selector: 15 | component: kube-controller-manager 16 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kubernetes/kubernetes-serviceKubeScheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-scheduler 6 | name: kube-scheduler-svc 7 | namespace: kube-system 8 | spec: 9 | clusterIP: None 10 | ports: 11 | - name: https-metrics 12 | port: 10259 13 | targetPort: 10259 14 | selector: 15 | component: kube-scheduler 16 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kubernetes/kubernetes-serviceMonitorCoreDNS.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: coredns 6 | app.kubernetes.io/part-of: kube-prometheus 7 | app.kubernetes.io/vendor: kubesphere 8 | name: coredns 9 | namespace: kubesphere-monitoring-system 10 | spec: 11 | endpoints: 12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | interval: 1m 14 | metricRelabelings: 15 | - action: drop 16 | regex: coredns_cache_misses_total 17 | sourceLabels: 18 | - __name__ 19 | port: metrics 20 | jobLabel: app.kubernetes.io/name 21 | namespaceSelector: 22 | matchNames: 23 | - kube-system 24 | selector: 25 | matchLabels: 26 | k8s-app: kube-dns 27 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kubernetes/kubernetes-serviceMonitorKubeControllerManager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-controller-manager 6 | app.kubernetes.io/part-of: kube-prometheus 7 | app.kubernetes.io/vendor: kubesphere 8 | name: kube-controller-manager 9 | namespace: kubesphere-monitoring-system 10 | spec: 11 | endpoints: 12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | interval: 1m 14 | metricRelabelings: 15 | - action: keep 16 | regex: up 17 | sourceLabels: 18 | - __name__ 19 | port: https-metrics 20 | scheme: https 21 | tlsConfig: 22 | insecureSkipVerify: true 23 | jobLabel: app.kubernetes.io/name 24 | namespaceSelector: 25 | matchNames: 26 | - kube-system 27 | selector: 28 | matchLabels: 29 | app.kubernetes.io/name: kube-controller-manager 30 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kubernetes/kubernetes-serviceMonitorKubeScheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-scheduler 6 | app.kubernetes.io/part-of: kube-prometheus 7 | app.kubernetes.io/vendor: kubesphere 8 | name: kube-scheduler 9 | namespace: kubesphere-monitoring-system 10 | spec: 11 | endpoints: 12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | interval: 1m 14 | metricRelabelings: 15 | - action: drop 16 | regex: scheduler_(e2e_scheduling_latency_microseconds|scheduling_algorithm_predicate_evaluation|scheduling_algorithm_priority_evaluation|scheduling_algorithm_preemption_evaluation|scheduling_algorithm_latency_microseconds|binding_latency_microseconds|scheduling_latency_seconds) 17 | sourceLabels: 18 | - __name__ 19 | port: https-metrics 20 | scheme: https 21 | tlsConfig: 22 | insecureSkipVerify: true 23 | jobLabel: app.kubernetes.io/name 24 | namespaceSelector: 25 | matchNames: 26 | - kube-system 27 | selector: 28 | matchLabels: 29 | app.kubernetes.io/name: kube-scheduler 30 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kubesphere/kubesphere-serviceMonitorKsApiserver.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: ks-apiserver 6 | app.kubernetes.io/part-of: kube-prometheus 7 | app.kubernetes.io/vendor: kubesphere 8 | name: ks-apiserver 9 | namespace: kubesphere-monitoring-system 10 | spec: 11 | endpoints: 12 | - interval: 1m 13 | path: /kapis/metrics 14 | relabelings: 15 | - action: labeldrop 16 | regex: (endpoint) 17 | targetPort: 9090 18 | namespaceSelector: 19 | matchNames: 20 | - kubesphere-system 21 | selector: 22 | matchLabels: 23 | app: ks-apiserver 24 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/kubesphere/kubesphere-serviceMonitorKsControllerManager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: ks-controller-manager 6 | app.kubernetes.io/part-of: kube-prometheus 7 | app.kubernetes.io/vendor: kubesphere 8 | name: ks-controller-manager 9 | namespace: kubesphere-monitoring-system 10 | spec: 11 | endpoints: 12 | - interval: 1m 13 | path: /kapis/metrics 14 | relabelings: 15 | - action: labeldrop 16 | regex: (endpoint) 17 | targetPort: 8080 18 | namespaceSelector: 19 | matchNames: 20 | - kubesphere-system 21 | selector: 22 | matchLabels: 23 | app: ks-controller-manager 24 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/node-exporter/node-exporter-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.3.1 9 | name: kubesphere-node-exporter 10 | namespace: kubesphere-monitoring-system 11 | rules: 12 | - apiGroups: 13 | - authentication.k8s.io 14 | resources: 15 | - tokenreviews 16 | verbs: 17 | - create 18 | - apiGroups: 19 | - authorization.k8s.io 20 | resources: 21 | - subjectaccessreviews 22 | verbs: 23 | - create 24 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/node-exporter/node-exporter-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.3.1 9 | name: kubesphere-node-exporter 10 | namespace: kubesphere-monitoring-system 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: kubesphere-node-exporter 15 | subjects: 16 | - kind: ServiceAccount 17 | name: node-exporter 18 | namespace: kubesphere-monitoring-system 19 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/node-exporter/node-exporter-prometheusRule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.3.1 9 | prometheus: k8s 10 | role: alert-rules 11 | name: node-exporter-rules 12 | namespace: kubesphere-monitoring-system 13 | spec: 14 | groups: [] 15 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/node-exporter/node-exporter-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.3.1 9 | name: node-exporter 10 | namespace: kubesphere-monitoring-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: https 15 | port: 9100 16 | targetPort: https 17 | selector: 18 | app.kubernetes.io/component: exporter 19 | app.kubernetes.io/name: node-exporter 20 | app.kubernetes.io/part-of: kube-prometheus 21 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/node-exporter/node-exporter-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: exporter 6 | app.kubernetes.io/name: node-exporter 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 1.3.1 9 | name: node-exporter 10 | namespace: kubesphere-monitoring-system 11 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus-operator/prometheus-operator-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.55.1 9 | name: kubesphere-prometheus-operator 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: kubesphere-prometheus-operator 14 | subjects: 15 | - kind: ServiceAccount 16 | name: prometheus-operator 17 | namespace: kubesphere-monitoring-system 18 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus-operator/prometheus-operator-prometheusRule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.55.1 9 | prometheus: k8s 10 | role: alert-rules 11 | name: prometheus-operator-rules 12 | namespace: kubesphere-monitoring-system 13 | spec: 14 | groups: [] 15 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus-operator/prometheus-operator-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.55.1 9 | name: prometheus-operator 10 | namespace: kubesphere-monitoring-system 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: https 15 | port: 8443 16 | targetPort: https 17 | selector: 18 | app.kubernetes.io/component: controller 19 | app.kubernetes.io/name: prometheus-operator 20 | app.kubernetes.io/part-of: kube-prometheus 21 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus-operator/prometheus-operator-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/version: 0.55.1 9 | name: prometheus-operator 10 | namespace: kubesphere-monitoring-system 11 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus-operator/prometheus-operator-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: controller 6 | app.kubernetes.io/name: prometheus-operator 7 | app.kubernetes.io/part-of: kube-prometheus 8 | app.kubernetes.io/vendor: kubesphere 9 | app.kubernetes.io/version: 0.55.1 10 | name: prometheus-operator 11 | namespace: kubesphere-monitoring-system 12 | spec: 13 | endpoints: 14 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 15 | honorLabels: true 16 | interval: 1m 17 | port: https 18 | scheme: https 19 | tlsConfig: 20 | insecureSkipVerify: true 21 | selector: 22 | matchLabels: 23 | app.kubernetes.io/component: controller 24 | app.kubernetes.io/name: prometheus-operator 25 | app.kubernetes.io/part-of: kube-prometheus 26 | app.kubernetes.io/version: 0.55.1 27 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus/prometheus-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.39.1 10 | name: kubesphere-prometheus-k8s 11 | namespace: kubesphere-monitoring-system 12 | rules: 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - nodes/metrics 17 | - nodes 18 | - services 19 | - endpoints 20 | - pods 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | - apiGroups: 26 | - extensions 27 | resources: 28 | - ingresses 29 | verbs: 30 | - get 31 | - list 32 | - watch 33 | - apiGroups: 34 | - networking.k8s.io 35 | resources: 36 | - ingresses 37 | verbs: 38 | - get 39 | - list 40 | - watch 41 | - nonResourceURLs: 42 | - /metrics 43 | verbs: 44 | - get 45 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus/prometheus-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.39.1 10 | name: kubesphere-prometheus-k8s 11 | namespace: kubesphere-monitoring-system 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: kubesphere-prometheus-k8s 16 | subjects: 17 | - kind: ServiceAccount 18 | name: prometheus-k8s 19 | namespace: kubesphere-monitoring-system 20 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus/prometheus-podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.39.1 10 | name: prometheus-k8s 11 | namespace: kubesphere-monitoring-system 12 | spec: 13 | minAvailable: 1 14 | selector: 15 | matchLabels: 16 | app.kubernetes.io/component: prometheus 17 | app.kubernetes.io/instance: k8s 18 | app.kubernetes.io/name: prometheus 19 | app.kubernetes.io/part-of: kube-prometheus 20 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus/prometheus-prometheusRule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.39.1 10 | prometheus: k8s 11 | role: alert-rules 12 | name: prometheus-k8s-prometheus-rules 13 | namespace: kubesphere-monitoring-system 14 | spec: 15 | groups: 16 | - name: prometheus.rules 17 | rules: 18 | - expr: | 19 | sum by(cluster) (up{job="prometheus-k8s",namespace="kubesphere-monitoring-system"} == 1) 20 | record: prometheus:up:sum 21 | - expr: | 22 | sum(rate(prometheus_tsdb_head_samples_appended_total{job="prometheus-k8s",namespace="kubesphere-monitoring-system"} [5m])) by (job, pod, cluster) 23 | record: prometheus:prometheus_tsdb_head_samples_appended:sum_rate 24 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus/prometheus-roleBindingConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.39.1 10 | name: prometheus-k8s-config 11 | namespace: kubesphere-monitoring-system 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: prometheus-k8s-config 16 | subjects: 17 | - kind: ServiceAccount 18 | name: prometheus-k8s 19 | namespace: kubesphere-monitoring-system 20 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus/prometheus-roleConfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.39.1 10 | name: prometheus-k8s-config 11 | namespace: kubesphere-monitoring-system 12 | rules: 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - configmaps 17 | verbs: 18 | - get 19 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.39.1 10 | name: prometheus-k8s 11 | namespace: kubesphere-monitoring-system 12 | spec: 13 | ports: 14 | - name: web 15 | port: 9090 16 | targetPort: web 17 | - name: reloader-web 18 | port: 8080 19 | targetPort: reloader-web 20 | selector: 21 | app.kubernetes.io/component: prometheus 22 | app.kubernetes.io/instance: k8s 23 | app.kubernetes.io/name: prometheus 24 | app.kubernetes.io/part-of: kube-prometheus 25 | sessionAffinity: ClientIP 26 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus/prometheus-serviceAccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 2.39.1 10 | name: prometheus-k8s 11 | namespace: kubesphere-monitoring-system 12 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/prometheus/prometheus-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: prometheus 6 | app.kubernetes.io/instance: k8s 7 | app.kubernetes.io/name: prometheus 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/vendor: kubesphere 10 | app.kubernetes.io/version: 2.39.1 11 | name: prometheus-k8s 12 | namespace: kubesphere-monitoring-system 13 | spec: 14 | endpoints: 15 | - interval: 1m 16 | port: web 17 | - interval: 1m 18 | port: reloader-web 19 | selector: 20 | matchLabels: 21 | app.kubernetes.io/component: prometheus 22 | app.kubernetes.io/instance: k8s 23 | app.kubernetes.io/name: prometheus 24 | app.kubernetes.io/part-of: kube-prometheus 25 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/thanos-ruler/thanos-ruler-podDisruptionBudget.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: thanos-ruler 6 | app.kubernetes.io/instance: kubesphere 7 | app.kubernetes.io/name: thanos-ruler 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.31.0 10 | name: thanos-ruler-kubesphere 11 | namespace: kubesphere-monitoring-system 12 | spec: 13 | minAvailable: 1 14 | selector: 15 | matchLabels: 16 | app.kubernetes.io/component: thanos-ruler 17 | app.kubernetes.io/instance: kubesphere 18 | app.kubernetes.io/name: thanos-ruler 19 | app.kubernetes.io/part-of: kube-prometheus 20 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/thanos-ruler/thanos-ruler-prometheusRule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: thanos-ruler 6 | app.kubernetes.io/instance: kubesphere 7 | app.kubernetes.io/name: thanos-ruler 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.31.0 10 | prometheus: k8s 11 | role: alert-rules 12 | name: thanos-ruler-kubesphere-rules 13 | namespace: kubesphere-monitoring-system 14 | spec: 15 | groups: [] 16 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/thanos-ruler/thanos-ruler-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: thanos-ruler 6 | app.kubernetes.io/instance: kubesphere 7 | app.kubernetes.io/name: thanos-ruler 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/version: 0.31.0 10 | name: thanos-ruler-kubesphere 11 | namespace: kubesphere-monitoring-system 12 | spec: 13 | ports: 14 | - name: web 15 | port: 10902 16 | targetPort: web 17 | selector: 18 | app.kubernetes.io/component: thanos-ruler 19 | app.kubernetes.io/instance: kubesphere 20 | app.kubernetes.io/name: thanos-ruler 21 | app.kubernetes.io/part-of: kube-prometheus 22 | sessionAffinity: ClientIP 23 | -------------------------------------------------------------------------------- /roles/ks-monitor/files/prometheus/thanos-ruler/thanos-ruler-serviceMonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: thanos-ruler 6 | app.kubernetes.io/instance: kubesphere 7 | app.kubernetes.io/name: thanos-ruler 8 | app.kubernetes.io/part-of: kube-prometheus 9 | app.kubernetes.io/vendor: kubesphere 10 | app.kubernetes.io/version: 0.31.0 11 | name: thanos-ruler-kubesphere 12 | namespace: kubesphere-monitoring-system 13 | spec: 14 | endpoints: 15 | - interval: 1m 16 | port: web 17 | selector: 18 | matchLabels: 19 | app.kubernetes.io/component: thanos-ruler 20 | app.kubernetes.io/instance: kubesphere 21 | app.kubernetes.io/name: thanos-ruler 22 | app.kubernetes.io/part-of: kube-prometheus 23 | -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/alert-migrate.yaml: -------------------------------------------------------------------------------- 1 | - name: Alerting | Getting alert migrate files 2 | copy: 3 | src: "{{ item }}" 4 | dest: "{{ kubesphere_dir }}/" 5 | loop: 6 | - "alert-migrate" 7 | 8 | - name: Alerting | Migrating alert rules 9 | shell: > 10 | /bin/bash {{ kubesphere_dir }}/alert-migrate/migrate-alertrules.sh -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/alertmanager.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Installing alertmanager 3 | shell: > 4 | {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/prometheus/alertmanager -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/cleanup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Checking old installation files 3 | stat: 4 | path: "{{ kubesphere_dir }}/prometheus/prometheus-prometheus.yaml" 5 | failed_when: false 6 | register: prometheus_old_check 7 | 8 | - name: Monitoring | Deleting old prometheus-operator files 9 | file: 10 | path: "{{ kubesphere_dir }}/prometheus/00namespace-namespace.yaml" 11 | state: absent 12 | failed_when: false 13 | when: prometheus_old_check.stat.exists 14 | 15 | - name: Monitoring | Deleting old prometheus-operator 16 | shell: "{{ bin_dir }}/kubectl delete -f {{ kubesphere_dir }}/prometheus/" 17 | register: delete_result 18 | failed_when: "delete_result.stderr and 'NotFound' not in delete_result.stderr" 19 | when: prometheus_old_check.stat.exists 20 | 21 | - name: Monitoring | Deleting old prometheus-operator files 22 | file: 23 | path: "{{ kubesphere_dir }}/{{ item }}" 24 | state: absent 25 | failed_when: false 26 | loop: 27 | - "prometheus" 28 | when: prometheus_old_check.stat.exists -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/etcd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Installing etcd monitoring 3 | shell: "{{ bin_dir }}/kubectl apply -f {{ item }} --force" 4 | loop: '{{ query("fileglob", "{{ kubesphere_dir }}/prometheus/etcd/*.yaml") }}' 5 | register: etcd_result 6 | failed_when: "etcd_result.stderr and 'Warning' not in etcd_result.stderr and 'spec.clusterIP' not in etcd_result.stderr" 7 | until: etcd_result is succeeded 8 | retries: 5 9 | delay: 3 10 | when: 11 | - etcd.monitoring is defined 12 | - etcd.monitoring == true 13 | - item.find('secret') == -1 # ignoring the secret yaml -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/generate_manifests.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Getting ks-monitoring installation files 3 | copy: 4 | src: "{{ item }}" 5 | dest: "{{ kubesphere_dir }}/" 6 | loop: 7 | - "prometheus" 8 | 9 | - import_tasks: get_old_config.yaml 10 | 11 | - name: Monitoring | Creating manifests 12 | template: 13 | src: "{{ item.file }}.j2" 14 | dest: "{{ kubesphere_dir }}/{{ item.path }}/{{ item.file }}" 15 | with_items: 16 | - { path: prometheus/prometheus-operator, file: prometheus-operator-deployment.yaml } 17 | - { path: prometheus/prometheus, file: prometheus-prometheus.yaml } 18 | - { path: prometheus/prometheus, file: prometheus-podDisruptionBudget.yaml} 19 | - { path: prometheus/kube-state-metrics, file: kube-state-metrics-deployment.yaml } 20 | - { path: prometheus/node-exporter, file: node-exporter-daemonset.yaml } 21 | - { path: prometheus/alertmanager, file: alertmanager-alertmanager.yaml } 22 | - { path: prometheus/alertmanager, file: alertmanager-podDisruptionBudget.yaml } 23 | - { path: prometheus/grafana, file: grafana-deployment.yaml } 24 | - { path: prometheus/etcd, file: prometheus-serviceMonitorEtcd.yaml } 25 | - { path: prometheus/etcd, file: prometheus-endpointsEtcd.yaml } 26 | - { path: prometheus/thanos-ruler, file: thanos-ruler-thanosRuler.yaml } 27 | - { path: prometheus/thanos-ruler, file: thanos-ruler-podDisruptionBudget.yaml } -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/grafana.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Installing grafana 3 | shell: > 4 | {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/prometheus/grafana -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/k8s-monitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Installing k8s monitoring 3 | shell: > 4 | {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/prometheus/kubernetes --force -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/ks-core-monitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Installing ks-core monitoring 3 | shell: > 4 | {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/prometheus/kubesphere -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/ks-istio-monitoring.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Getting ks-istio monitoring installation files 3 | copy: 4 | src: "{{ item }}" 5 | dest: "{{ kubesphere_dir }}/" 6 | loop: 7 | - "ks-istio-monitoring" 8 | 9 | - name: Monitoring | Installing ks-istio monitoring 10 | shell: > 11 | {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/ks-istio-monitoring --force 12 | -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/kube-state-metrics.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Installing kube-state-metrics 3 | shell: > 4 | {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/prometheus/kube-state-metrics --force -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/monitoring-dashboard.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Monitoring | Getting monitoring-dashboard installation files 4 | copy: 5 | src: "{{ item }}" 6 | dest: "{{ kubesphere_dir }}/" 7 | loop: 8 | - "monitoring-dashboard" 9 | 10 | - name: Monitoring | Installing monitoring-dashboard 11 | shell: > 12 | {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/monitoring-dashboard -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/node-exporter.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Installing node-exporter 3 | shell: > 4 | {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/prometheus/node-exporter --force -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/prometheus.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Installing Prometheus 3 | shell: "{{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/prometheus/{{ item }}" 4 | loop: 5 | - "prometheus" 6 | - "prometheus" 7 | register: prom_result 8 | failed_when: "prom_result.stderr and 'Warning' not in prom_result.stderr and 'spec.clusterIP' not in prom_result.stderr" 9 | until: prom_result is succeeded 10 | retries: 5 11 | delay: 3 -------------------------------------------------------------------------------- /roles/ks-monitor/tasks/thanos-ruler.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Monitoring | Installing thanosruler 3 | shell: > 4 | {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/prometheus/thanos-ruler 5 | 6 | - name: KubeSphere | Labeling prometheusrules for thanos ruler 7 | shell: > 8 | for ns in `kubectl get ns -o jsonpath="{.items[*].metadata.name}"`; do kubectl -n $ns label prometheusrules thanos-ruler=kubesphere role=alert-rules thanosruler- --overwrite -l thanosruler=thanos-ruler,role=thanos-alerting-rules; done 9 | failed_when: false 10 | 11 | - name: Monitoring | Importing alerting status 12 | shell: > 13 | {{ bin_dir }}/kubectl patch cc ks-installer 14 | --type merge 15 | -p '{"status": {"alerting": {"status": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}' 16 | -n kubesphere-system 17 | register: cc_result 18 | failed_when: "cc_result.stderr and 'Warning' not in cc_result.stderr" 19 | until: cc_result is succeeded 20 | retries: 5 21 | delay: 3 -------------------------------------------------------------------------------- /roles/ks-monitor/templates/alertmanager-podDisruptionBudget.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if kubernetes_version is version('v1.25.0', '>=') %} 2 | apiVersion: policy/v1 3 | {% else %} 4 | apiVersion: policy/v1beta1 5 | {% endif %} 6 | kind: PodDisruptionBudget 7 | metadata: 8 | labels: 9 | app.kubernetes.io/component: alert-router 10 | app.kubernetes.io/instance: main 11 | app.kubernetes.io/name: alertmanager 12 | app.kubernetes.io/part-of: kube-prometheus 13 | app.kubernetes.io/version: {{ alertmanager_tag | replace("v", "") }} 14 | name: alertmanager-main 15 | namespace: kubesphere-monitoring-system 16 | spec: 17 | maxUnavailable: 1 18 | selector: 19 | matchLabels: 20 | app.kubernetes.io/component: alert-router 21 | app.kubernetes.io/instance: main 22 | app.kubernetes.io/name: alertmanager 23 | app.kubernetes.io/part-of: kube-prometheus -------------------------------------------------------------------------------- /roles/ks-monitor/templates/custom-values-gpu-monitoring.yaml.j2: -------------------------------------------------------------------------------- 1 | # Default values for NVIDIA DCGM exporter. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | image: 6 | repository: nvcr.io/nvidia/k8s/dcgm-exporter 7 | pullPolicy: IfNotPresent 8 | tag: 2.1.8-2.4.0-rc.3-ubuntu18.04 9 | 10 | resources: 11 | limits: 12 | cpu: {{ monitoring.gpu.nvidia_dcgm_exporter.resources.limits.cpu | default("100m") }} 13 | memory: {{ monitoring.gpu.nvidia_dcgm_exporter.resources.limits.memory | default("128Mi") }} 14 | requests: 15 | cpu: {{ monitoring.gpu.nvidia_dcgm_exporter.resources.requests.cpu | default("100m") }} 16 | memory: {{ monitoring.gpu.nvidia_dcgm_exporter.resources.requests.memory | default("128Mi") }} 17 | 18 | imagePullSecrets: [] 19 | 20 | tolerations: [] 21 | # - operator: Exists 22 | 23 | nodeSelector: {} 24 | # node: gpu 25 | 26 | affinity: 27 | nodeAffinity: 28 | requiredDuringSchedulingIgnoredDuringExecution: 29 | nodeSelectorTerms: 30 | - matchExpressions: 31 | - key: nvidia.com/gpu.present 32 | operator: Exists 33 | 34 | -------------------------------------------------------------------------------- /roles/ks-monitor/templates/prometheus-endpointsEtcd.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: v1 4 | kind: Endpoints 5 | metadata: 6 | labels: 7 | app.kubernetes.io/name: etcd 8 | name: etcd 9 | namespace: kube-system 10 | subsets: 11 | - addresses: 12 | {% for endpoint in etcd.endpointIps.split(',') %} 13 | - ip: {{ endpoint }} 14 | {% endfor %} 15 | ports: 16 | - name: metrics 17 | port: {{ etcd.port }} 18 | protocol: TCP -------------------------------------------------------------------------------- /roles/ks-monitor/templates/prometheus-podDisruptionBudget.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if kubernetes_version is version('v1.25.0', '>=') %} 2 | apiVersion: policy/v1 3 | {% else %} 4 | apiVersion: policy/v1beta1 5 | {% endif %} 6 | kind: PodDisruptionBudget 7 | metadata: 8 | labels: 9 | app.kubernetes.io/component: prometheus 10 | app.kubernetes.io/instance: k8s 11 | app.kubernetes.io/name: prometheus 12 | app.kubernetes.io/part-of: kube-prometheus 13 | app.kubernetes.io/version: {{ prometheus_tag | replace("v", "") }} 14 | name: prometheus-k8s 15 | namespace: kubesphere-monitoring-system 16 | spec: 17 | minAvailable: 1 18 | selector: 19 | matchLabels: 20 | app.kubernetes.io/component: prometheus 21 | app.kubernetes.io/instance: k8s 22 | app.kubernetes.io/name: prometheus 23 | app.kubernetes.io/part-of: kube-prometheus -------------------------------------------------------------------------------- /roles/ks-monitor/templates/prometheus-serviceMonitorEtcd.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: etcd 6 | app.kubernetes.io/vendor: kubesphere 7 | name: etcd 8 | namespace: kubesphere-monitoring-system 9 | spec: 10 | endpoints: 11 | - interval: 1m 12 | port: metrics 13 | {% if etcd.tlsEnable is defined and etcd.tlsEnable == true %} 14 | scheme: https 15 | tlsConfig: 16 | caFile: /etc/prometheus/secrets/kube-etcd-client-certs/etcd-client-ca.crt 17 | certFile: /etc/prometheus/secrets/kube-etcd-client-certs/etcd-client.crt 18 | keyFile: /etc/prometheus/secrets/kube-etcd-client-certs/etcd-client.key 19 | {% endif %} 20 | jobLabel: k8s-app 21 | namespaceSelector: 22 | matchNames: 23 | - kube-system 24 | selector: 25 | matchLabels: 26 | app.kubernetes.io/name: etcd 27 | -------------------------------------------------------------------------------- /roles/ks-monitor/templates/thanos-ruler-podDisruptionBudget.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if kubernetes_version is version('v1.25.0', '>=') %} 2 | apiVersion: policy/v1 3 | {% else %} 4 | apiVersion: policy/v1beta1 5 | {% endif %} 6 | kind: PodDisruptionBudget 7 | metadata: 8 | labels: 9 | app.kubernetes.io/component: thanos-ruler 10 | app.kubernetes.io/instance: kubesphere 11 | app.kubernetes.io/name: thanos-ruler 12 | app.kubernetes.io/part-of: kube-prometheus 13 | app.kubernetes.io/version: {{ thanos_tag | replace("v", "") }} 14 | name: thanos-ruler-kubesphere 15 | namespace: kubesphere-monitoring-system 16 | spec: 17 | minAvailable: 1 18 | selector: 19 | matchLabels: 20 | app.kubernetes.io/component: thanos-ruler 21 | app.kubernetes.io/instance: kubesphere 22 | app.kubernetes.io/name: thanos-ruler 23 | app.kubernetes.io/part-of: kube-prometheus -------------------------------------------------------------------------------- /roles/ks-multicluster/files/kubefed/federatedcrds/update-files.sh: -------------------------------------------------------------------------------- 1 | kind=(applications.app.k8s.io 2 | clusterrolebindings.rbac.authorization.k8s.io 3 | clusterroles.rbac.authorization.k8s.io 4 | configmaps deployments.apps 5 | ingresses.networking.k8s.io 6 | globalrolebindings.iam.kubesphere.io 7 | globalroles.iam.kubesphere.io 8 | groupbindings.iam.kubesphere.io 9 | groups.iam.kubesphere.io 10 | jobs.batch 11 | limitranges 12 | namespaces 13 | persistentvolumeclaims 14 | replicasets.apps 15 | secrets 16 | serviceaccounts 17 | services 18 | statefulsets.apps 19 | users.iam.kubesphere.io 20 | workspacerolebindings.iam.kubesphere.io 21 | workspaceroles.iam.kubesphere.io 22 | workspaces.tenant.kubesphere.io) 23 | for item in "${kind[@]}"; 24 | do 25 | kubefedctl enable $item --output yaml > ${item}.yaml 26 | done 27 | -------------------------------------------------------------------------------- /roles/ks-multicluster/files/kubefed/kubefed/.helmignore: -------------------------------------------------------------------------------- 1 | # Ignore common backup files 2 | *.swp 3 | *~ 4 | -------------------------------------------------------------------------------- /roles/ks-multicluster/files/kubefed/kubefed/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | description: KubeFed helm chart 3 | name: kubefed 4 | version: 0.0.3 5 | kubeVersion: ">= 1.16.0-0" 6 | dependencies: 7 | - name: controllermanager 8 | version: 0.0.3 9 | repository: "https://localhost/" # Required but unused. 10 | condition: controllermanager.enabled 11 | 12 | -------------------------------------------------------------------------------- /roles/ks-multicluster/files/kubefed/kubefed/Chart.yaml.backup: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | description: KubeFed helm chart 3 | name: kubefed 4 | version: 0.0.3 5 | dependencies: 6 | - name: controllermanager 7 | version: 0.0.3 8 | repository: "https://localhost/" # Required but unused. 9 | condition: controllermanager.enabled -------------------------------------------------------------------------------- /roles/ks-multicluster/files/kubefed/kubefed/charts/controllermanager/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: "0.0.3" 3 | description: A Helm chart for KubeFed Controller Manager 4 | name: controllermanager 5 | version: 0.0.3 6 | -------------------------------------------------------------------------------- /roles/ks-multicluster/files/kubefed/kubefed/charts/controllermanager/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "controllermanager.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "controllermanager.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "controllermanager.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /roles/ks-multicluster/files/kubefed/kubefed/charts/controllermanager/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kubefed-admission-webhook 5 | namespace: {{ .Release.Namespace }} 6 | {{- if .Values.service.labels }} 7 | labels: 8 | {{ toYaml .Values.service.labels | indent 4 }} 9 | {{- end }} 10 | spec: 11 | selector: 12 | kubefed-admission-webhook: "true" 13 | ports: 14 | - port: 443 15 | targetPort: 8443 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: kubefed-controller-manager-metrics-service 21 | namespace: {{ .Release.Namespace }} 22 | {{- if .Values.service.labels }} 23 | labels: 24 | {{ toYaml .Values.service.labels | indent 4 }} 25 | {{- end }} 26 | annotations: 27 | prometheus.io/port: "9090" 28 | prometheus.io/scheme: http 29 | prometheus.io/scrape: "true" 30 | spec: 31 | selector: 32 | kubefed-control-plane: "controller-manager" 33 | ports: 34 | - name: metrics 35 | port: 9090 36 | targetPort: metrics 37 | -------------------------------------------------------------------------------- /roles/ks-multicluster/files/kubefed/kubefed/charts/controllermanager/templates/serviceaccounts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kubefed-controller 5 | namespace: {{ .Release.Namespace }} 6 | --- 7 | apiVersion: v1 8 | kind: ServiceAccount 9 | metadata: 10 | namespace: {{ .Release.Namespace }} 11 | name: kubefed-admission-webhook 12 | -------------------------------------------------------------------------------- /roles/ks-multicluster/files/kubefed/kubefed/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "kubefed.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "kubefed.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "kubefed.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /roles/ks-multicluster/files/notification/kubesphere-monitoring-federated.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: types.kubefed.io/v1beta1 2 | kind: FederatedNamespace 3 | metadata: 4 | name: kubesphere-monitoring-federated 5 | namespace: kubesphere-monitoring-federated 6 | spec: 7 | placement: 8 | clusterSelector: {} 9 | template: 10 | metadata: 11 | labels: 12 | kubesphere.io/workspace: system-workspace 13 | annotations: 14 | kubesphere.io/creator: admin 15 | -------------------------------------------------------------------------------- /roles/ks-multicluster/files/notification/update_federated_crds.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | caBundle=$(kubectl get secret ks-controller-manager-webhook-cert -n kubesphere-system -o jsonpath='{.data.ca\.crt}') 4 | cat > /tmp/patch.yaml < 9 | {{ bin_dir }}/kubectl get federatednamespaces.types.kubefed.io -n kubesphere-monitoring-federated kubesphere-monitoring-federated -o jsonpath='{.metadata.name}' 10 | register: namespace_deployed 11 | failed_when: false 12 | 13 | - name: notification-manager | Deploying federated namespace 14 | shell: "{{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/notification/kubesphere-monitoring-federated.yaml" 15 | register: fed_result 16 | failed_when: "fed_result.stderr and 'Warning' not in fed_result.stderr and 'spec.clusterIP' not in fed_result.stderr" 17 | until: fed_result is succeeded 18 | retries: 5 19 | delay: 3 20 | when: 21 | - namespace_deployed.rc != 0 or namespace_deployed.stdout.find("kubesphere-monitoring-federated") == -1 22 | 23 | - name: notification-manager | Update federated crds 24 | shell: "bash {{ kubesphere_dir }}/notification/update_federated_crds.sh" 25 | -------------------------------------------------------------------------------- /roles/ks-network/topology/weave-scope/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: weave-scope | Getting weave-scope installation files 4 | copy: 5 | src: "{{ item }}" 6 | dest: "{{ kubesphere_dir }}/" 7 | loop: 8 | - weave-scope 9 | 10 | 11 | - name: weave-scope | Creating manifests 12 | template: 13 | src: "{{ item.file }}.j2" 14 | dest: "{{ kubesphere_dir }}/weave-scope/{{ item.file }}" 15 | with_items: 16 | - { type: ds, file: weave-scope.yaml } 17 | 18 | 19 | - name: weave-scope | Deploying weave-scope 20 | shell: > 21 | {{ bin_dir }}/kubectl apply -f {{ kubesphere_dir }}/weave-scope/weave-scope.yaml 22 | register: weave_result 23 | failed_when: "weave_result.stderr and 'Warning' not in weave_result.stderr and 'spec.clusterIP' not in weave_result.stderr" 24 | until: weave_result is succeeded 25 | retries: 5 26 | delay: 3 27 | 28 | - name: weave-scope | Importing weave-scope status 29 | shell: > 30 | {{ bin_dir }}/kubectl patch cc ks-installer 31 | --type merge 32 | -p '{"status": {"network": {"topology": {"status": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}}' 33 | -n kubesphere-system 34 | register: cc_result 35 | failed_when: "cc_result.stderr and 'Warning' not in cc_result.stderr" 36 | until: cc_result is succeeded 37 | retries: 5 38 | delay: 3 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /roles/kubesphere-defaults/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: download 4 | skip_sync: true 5 | images: {} 6 | tags: 7 | - facts 8 | -------------------------------------------------------------------------------- /roles/kubesphere-defaults/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: KubeSphere | Setting images' namespace override 3 | set_fact: 4 | namespace_override: "kubesphereio" 5 | when: (local_registry is defined and local_registry == "registry.cn-beijing.aliyuncs.com") or (zone is defined and zone == "cn") 6 | 7 | - name: KubeSphere | Configuring defaults 8 | debug: 9 | msg: "Check roles/kubesphere-defaults/defaults/main.yml" 10 | tags: 11 | - always 12 | -------------------------------------------------------------------------------- /roles/openpitrix/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | openpitrix_version: latest -------------------------------------------------------------------------------- /roles/openpitrix/files/openpitrix/ks-openpitrix-import.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kubesphere/ks-installer/38055b318b8f805af65aedfa5ad4508128b5eb2c/roles/openpitrix/files/openpitrix/ks-openpitrix-import.yaml -------------------------------------------------------------------------------- /roles/openpitrix/templates/builtin-repo.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: application.kubesphere.io/v1alpha1 2 | kind: HelmRepo 3 | metadata: 4 | annotations: 5 | app.kubesphere.io/sync-period: "3600s" 6 | kubesphere.io/creator: admin 7 | labels: 8 | kubesphere.io/workspace: system-workspace 9 | name: builtin-stable 10 | spec: 11 | credential: {} 12 | name: built-stable 13 | syncPeriod: 3600 14 | url: https://charts.kubesphere.io/stable -------------------------------------------------------------------------------- /roles/openpitrix/templates/ks-openpitrix-import.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | labels: 6 | app: openpitrix 7 | component: openpitrix-import-job 8 | version: v3.3.2 9 | name: openpitrix-import-job 10 | namespace: kubesphere-system 11 | spec: 12 | backoffLimit: 100 13 | template: 14 | metadata: 15 | labels: 16 | app: openpitrix 17 | component: openpitrix-import-job 18 | version: v3.3.2 19 | spec: 20 | serviceAccountName: kubesphere 21 | restartPolicy: OnFailure 22 | initContainers: 23 | - name: wait-apiserver 24 | image: {{ openpitrix_job_repo }}:{{ openpitrix_job_tag }} 25 | imagePullPolicy: IfNotPresent 26 | command: ['sh', '-c', 'until nc -z ks-apiserver.kubesphere-system.svc 80; do echo "waiting for apiserver"; sleep 2; done;'] 27 | containers: 28 | - name: import 29 | command: 30 | - import-app 31 | - import 32 | image: {{ openpitrix_job_repo }}:{{ openpitrix_job_tag }} 33 | imagePullPolicy: IfNotPresent 34 | volumeMounts: 35 | - mountPath: /etc/kubesphere 36 | name: kubesphere-config 37 | volumes: 38 | - name: kubesphere-config 39 | configMap: 40 | name: kubesphere-config -------------------------------------------------------------------------------- /roles/preinstall/tasks/helm-migrate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: KubeSphere | Getting KubeSphere component version 4 | shell: > 5 | {{ bin_dir }}/helm 2to3 convert {{ item }} 6 | loop: 7 | - "ks-openldap" 8 | - "ks-redis" 9 | - "ks-minio" 10 | - "ks-openpitrix" 11 | - "elasticsearch-logging" 12 | - "elasticsearch-logging-curator" 13 | - "istio" 14 | - "istio-init" 15 | - "jaeger-operator" 16 | - "ks-jenkins" 17 | - "ks-sonarqube" 18 | - "logging-fluentbit-operator" 19 | - "uc" 20 | - "metrics-server" 21 | failed_when: false -------------------------------------------------------------------------------- /roles/preinstall/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - import_tasks: precheck.yaml 4 | 5 | - name: KubeSphere | Checking KubeSphere component 6 | shell: > 7 | {{ bin_dir }}/kubectl get deploy -n kubesphere-system 8 | register: kubesphere_component_deployment 9 | 10 | - name: KubeSphere | Getting KubeSphere component version 11 | shell: > 12 | {{ bin_dir }}/kubectl get deploy -n kubesphere-system ks-console -o jsonpath='{.metadata.labels.version}' 13 | register: console_version 14 | when: 15 | - kubesphere_component_deployment.stdout.find("ks-console") != -1 16 | 17 | - import_tasks: helm-migrate.yaml 18 | when: 19 | - kubesphere_component_deployment.stdout.find("ks-console") != -1 20 | - console_version.stdout is version('v3.0.0', '<') 21 | -------------------------------------------------------------------------------- /roles/preinstall/tasks/preCheckSize.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Get Common PVC PersistentVolumeClaim 4 | shell: > 5 | kubectl get pvc -n {{ item.ns }} | grep "{{ item.pvc }}" | awk '{print $4}' 6 | register: pvSize 7 | 8 | - name: Stop if PV size mismatch 9 | assert: 10 | that: pvSize.stdout in item.size.split() 11 | msg: "{{ item.pvc }}'s size don't match your config, Please modify the configuration" 12 | when: 13 | - "pvSize.stdout | length != 0" 14 | - pvSize.stdout.find("G") != -1 -------------------------------------------------------------------------------- /scripts/build/Dockerfile.registry: -------------------------------------------------------------------------------- 1 | FROM registry:2 as base 2 | FROM debian:buster-slim 3 | 4 | RUN apt update -y \ 5 | && apt install -y ca-certificates \ 6 | && rm -rf /var/lib/apt/lists/* 7 | 8 | COPY --from=base /bin/registry /bin/registry 9 | COPY --from=base /etc/docker/registry/config.yml /etc/docker/registry/config.yml 10 | COPY docker /var/lib/registry/docker 11 | 12 | WORKDIR /root 13 | COPY skopeo /usr/bin 14 | COPY scripts/sync-images.sh sync.sh 15 | COPY scripts/images-list.txt images-list.txt 16 | EXPOSE 5000 17 | ENTRYPOINT ["registry"] 18 | CMD ["serve", "/etc/docker/registry/config.yml"] 19 | -------------------------------------------------------------------------------- /scripts/build/Dockerfile.skopeo: -------------------------------------------------------------------------------- 1 | FROM golang:1.14-buster as builder 2 | ARG SKOPEO_VERSION=v1.2.0 3 | RUN apt-get update && \ 4 | apt-get install -y \ 5 | libdevmapper-dev \ 6 | libgpgme11-dev 7 | 8 | ENV GOPATH=/ 9 | WORKDIR /src/github.com/containers/skopeo 10 | RUN git clone --branch ${SKOPEO_VERSION} https://github.com/containers/skopeo /src/github.com/containers/skopeo && \ 11 | CGO_ENABLE=0 GO111MODULE=on go build -mod=vendor "-buildmode=pie" -ldflags '-extldflags "-static"' -gcflags "" -tags "exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp" -o /usr/bin/skopeo ./cmd/skopeo 12 | -------------------------------------------------------------------------------- /scripts/build/Dockerfile.tools: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | ENV VERSION_ID=20.04 4 | RUN apt update && apt install curl gnupg -y && \ 5 | echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list && \ 6 | curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | apt-key add - && \ 7 | apt update && \ 8 | apt -y install skopeo 9 | 10 | RUN apt install python3-pip -y && \ 11 | pip install ansible -------------------------------------------------------------------------------- /scripts/check_cluster_status.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | function check_installer_ok(){ 8 | echo "waiting for ks-installer pod ready" 9 | kubectl -n kubesphere-system wait --timeout=180s --for=condition=Available deployment/ks-installer 10 | kubectl -n kubesphere-system wait --timeout=180s --for=condition=Ready $(kubectl -n kubesphere-system get pod -l app=ks-installer -oname) 11 | echo "waiting for KubeSphere ready" 12 | while IFS= read -r line; do 13 | echo "$line" 14 | if [[ $line =~ "Welcome to KubeSphere" ]] 15 | then 16 | return 17 | fi 18 | done < <(timeout 1800 kubectl logs -n kubesphere-system deploy/ks-installer -f --tail 1) 19 | echo "ks-installer not output 'Welcome to KubeSphere'" 20 | exit 1 21 | } 22 | 23 | function wait_status_ok(){ 24 | for ((n=0;n<60;n++)) 25 | do 26 | OK=`kubectl get pod -A| grep -E 'Running|Completed' | wc | awk '{print $1}'` 27 | Status=`kubectl get pod -A | sed '1d' | wc | awk '{print $1}'` 28 | echo "Success rate: ${OK}/${Status}" 29 | if [[ $OK == $Status ]] 30 | then 31 | n=$((n+1)) 32 | else 33 | n=0 34 | kubectl get pod -A | grep -vE 'Running|Completed' 35 | fi 36 | sleep 1 37 | done 38 | } 39 | 40 | export -f wait_status_ok 41 | 42 | check_installer_ok 43 | 44 | timeout 1800 bash -c wait_status_ok 45 | -------------------------------------------------------------------------------- /scripts/create_project_harbor.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2018 The KubeSphere Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | url="http://192.168.6.2" 18 | user="admin" 19 | passwd="Harbor12345" 20 | 21 | harbor_projects=(library 22 | kubesphere 23 | calico 24 | coredns 25 | openebs 26 | csiplugin 27 | minio 28 | mirrorgooglecontainers 29 | osixia 30 | prom 31 | thanosio 32 | jimmidyson 33 | grafana 34 | elastic 35 | istio 36 | jaegertracing 37 | jenkins 38 | weaveworks 39 | openpitrix 40 | joosthofman 41 | nginxdemos 42 | fluent 43 | kubeedge 44 | ) 45 | 46 | for project in "${harbor_projects[@]}"; do 47 | echo "creating $project" 48 | curl -u "${user}:${passwd}" -X POST -H "Content-Type: application/json" "${url}/api/v2.0/projects" -d "{ \"project_name\": \"${project}\", \"public\": true}" 49 | done 50 | -------------------------------------------------------------------------------- /scripts/sync-images.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | GREEN_COL="\\033[32;1m" 4 | RED_COL="\\033[1;31m" 5 | YELLOW_COL="\\033[33;1m" 6 | NORMAL_COL="\\033[0;39m" 7 | 8 | IMAGES_LIST=$1 9 | SOURCE_REGISTRY=$2 10 | TARGET_REGISTRY=$3 11 | : ${IMAGES_LIST:="images-list.txt"} 12 | : ${TARGET_REGISTRY:="localhost"} 13 | : ${SOURCE_REGISTRY:="docker.io"} 14 | 15 | set -eo pipefail 16 | 17 | skopeo_copy() { 18 | if skopeo copy --insecure-policy --src-tls-verify=false --dest-tls-verify=false -q docker://$1 docker://$2; then 19 | echo -e "$GREEN_COL Progress: ${CURRENT_NUM}/${TOTAL_NUMS} sync $1 successful $NORMAL_COL" 20 | else 21 | echo -e "$RED_COL Sync $1 failed $NORMAL_COL" 22 | fi 23 | } 24 | 25 | CURRENT_NUM=0 26 | TOTAL_NUMS=$(sed -n '/#/d;s/:/:/p' ${IMAGES_LIST} | wc -l) 27 | for image in $(sed -n '/#/d;s/:/:/p' ${IMAGES_LIST}); do 28 | let CURRENT_NUM=${CURRENT_NUM}+1 29 | skopeo_copy ${SOURCE_REGISTRY}/${image} ${TARGET_REGISTRY}/${image} 30 | done 31 | --------------------------------------------------------------------------------